Background

Log normal

f = brm(
  bf(GPT~1), data = d, family = lognormal(),
  chains = 4, cores = 4
)
## Compiling the C++ model
## Start sampling
pp_check(f, type="dens_overlay", nsamples = 100)

pp_check(f, type="hist", nsamples=11, binwidth=7.5)

f
##  Family: lognormal 
##   Links: mu = identity; sigma = identity 
## Formula: GPT ~ 1 
##    Data: d (Number of observations: 117) 
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup samples = 4000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## Intercept     3.62      0.03     3.55     3.68       3442 1.00
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## sigma     0.38      0.02     0.33     0.43       2833 1.00
## 
## Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample 
## is a crude measure of effective sample size, and Rhat is the potential 
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(f)

Gamma

f_gamma = brm(
  bf(GPT~1), data = d, family = Gamma(link="log"),
  chains = 4, cores = 4
)
## Compiling the C++ model
## Start sampling
pp_check(f_gamma, type="dens_overlay", nsamples = 100)

pp_check(f_gamma, type="hist", nsamples=11, binwidth=7.5)

f_gamma
##  Family: gamma 
##   Links: mu = log; shape = identity 
## Formula: GPT ~ 1 
##    Data: d (Number of observations: 117) 
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup samples = 4000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## Intercept     3.68      0.03     3.62     3.75       3691 1.00
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## shape     7.52      0.98     5.73     9.59       3118 1.00
## 
## Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample 
## is a crude measure of effective sample size, and Rhat is the potential 
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(f_gamma)

QQ Plots

mu_gamma = exp(fixef(f_gamma)[,1])
shape_gamma = summary(f_gamma)$spec_pars[,1]
scale_gamma = mu_gamma / shape_gamma

mu_lnorm = fixef(f)[,1]
sd_lnorm = summary(f)$spec_pars[,1]
d %>%
  select(GPT) %>%
  arrange(GPT) %>%
  mutate(
    lognorm = qlnorm(ppoints(n()), meanlog = mu_lnorm, sdlog = sd_lnorm),
    gamma   = qgamma(ppoints(n()), shape = shape_gamma, scale = scale_gamma)
  ) %>%
  tidyr::gather(dist, emp_q, lognorm, gamma) %>%
  ggplot(aes(x=emp_q, y=GPT)) +
    geom_abline(intercept=0, slope=1, color='red', alpha=0.5) +
    geom_point() +
    facet_grid(~dist)

data_frame(
  t = seq(0, 100, length.out = 1000)
) %>%
  mutate(
    gamma   = dgamma(t, shape = shape_gamma, scale = scale_gamma),
    lognorm = dlnorm(t, meanlog = mu_lnorm, sdlog = sd_lnorm)
  ) %>%
  tidyr::gather(model, density, gamma, lognorm) %>%
  ggplot() +
  geom_line(aes(x = t, y = density, color = model)) +
  geom_density(data = d, aes(x=GPT))

Gamma Random/Fixed Effects Model - Follow

f_gamma_fe = brm(
  bf(GPT~follow-1, shape~follow-1), data = d, family = Gamma(link="log"),
  chains = 2, cores = 2, iter = 6000, thin = 3
)
## Compiling the C++ model
## Start sampling
f_gamma_re = brm(
  bf(GPT~1|follow, shape~1|follow), data = d, family = Gamma(link="log"),
  chains = 2, cores = 2, iter = 6000, thin = 3
)
## Compiling the C++ model
## Start sampling
## Warning: There were 32 divergent transitions after warmup. Increasing adapt_delta above 0.8 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Warning: Examine the pairs() plot to diagnose sampling problems
pp_check(f_gamma_fe, type="dens_overlay", nsamples = 25) + labs(title = "All") +
pp_check(f_gamma_fe, type="dens_overlay", nsamples = 25, newdata = filter(d, follow=="F1")) + labs(title = "F1") +
pp_check(f_gamma_fe, type="dens_overlay", nsamples = 25, newdata = filter(d, follow=="F3")) + labs(title = "F3") +
pp_check(f_gamma_fe, type="dens_overlay", nsamples = 25, newdata = filter(d, follow=="F4")) + labs(title = "F3") +
  patchwork::plot_layout(ncol = 2)
## Warning: contrasts dropped from factor follow due to missing levels

## Warning: contrasts dropped from factor follow due to missing levels

## Warning: contrasts dropped from factor follow due to missing levels

pp_check(f_gamma_re, type="dens_overlay", nsamples = 25) + labs(title = "All") +
pp_check(f_gamma_re, type="dens_overlay", nsamples = 25, newdata = filter(d, follow=="F1")) + labs(title = "F1") +
pp_check(f_gamma_re, type="dens_overlay", nsamples = 25, newdata = filter(d, follow=="F3")) + labs(title = "F3") +
pp_check(f_gamma_re, type="dens_overlay", nsamples = 25, newdata = filter(d, follow=="F4")) + labs(title = "F3") +
  patchwork::plot_layout(ncol = 2)

f_gamma_re
## Warning: There were 32 divergent transitions after warmup. Increasing adapt_delta above 0.8 may help.
## See http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
##  Family: gamma 
##   Links: mu = log; shape = log 
## Formula: GPT ~ 1 | follow 
##          shape ~ 1 | follow
##    Data: d (Number of observations: 117) 
## Samples: 2 chains, each with iter = 6000; warmup = 3000; thin = 3;
##          total post-warmup samples = 2000
## 
## Group-Level Effects: 
## ~follow (Number of levels: 3) 
##                     Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## sd(Intercept)           0.19      0.27     0.01     0.94        460 1.01
## sd(shape_Intercept)     1.16      1.19     0.10     4.65        802 1.01
## 
## Population-Level Effects: 
##                 Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## Intercept           3.68      0.15     3.41     4.00        756 1.00
## shape_Intercept     2.12      0.77     0.46     3.81        750 1.00
## 
## Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample 
## is a crude measure of effective sample size, and Rhat is the potential 
## scale reduction factor on split chains (at convergence, Rhat = 1).
ranef(f_gamma_re)
## $follow
## , , Intercept
## 
##       Estimate Est.Error       Q2.5     Q97.5
## F1 -0.05038701 0.1509464 -0.3971992 0.2301831
## F3  0.05449363 0.1483103 -0.2253610 0.3627002
## F4 -0.01704961 0.1465528 -0.3443097 0.2652144
## 
## , , shape_Intercept
## 
##      Estimate Est.Error      Q2.5    Q97.5
## F1 -0.0537577 0.7749785 -1.687076 1.588671
## F3 -0.3590714 0.7873882 -2.104507 1.176276
## F4  0.4380442 0.7980429 -1.181194 2.177701
f_gamma_fe
##  Family: gamma 
##   Links: mu = log; shape = log 
## Formula: GPT ~ follow - 1 
##          shape ~ follow - 1
##    Data: d (Number of observations: 117) 
## Samples: 2 chains, each with iter = 6000; warmup = 3000; thin = 3;
##          total post-warmup samples = 2000
## 
## Population-Level Effects: 
##                Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## followF1           3.62      0.05     3.52     3.72       2010 1.00
## followF3           3.77      0.07     3.65     3.90       1921 1.00
## followF4           3.66      0.05     3.56     3.76       1997 1.00
## shape_followF1     2.07      0.20     1.65     2.45       2023 1.00
## shape_followF3     1.70      0.22     1.24     2.11       2093 1.00
## shape_followF4     2.68      0.28     2.08     3.17       1825 1.00
## 
## Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample 
## is a crude measure of effective sample size, and Rhat is the potential 
## scale reduction factor on split chains (at convergence, Rhat = 1).