Making a matrix from lsmeans contrasts return - r

To create the data frame:
num <- sample(1:25, 20)
x <- data.frame("Day_eclosion" = num, "Developmental" = c("AP", "MA",
"JU", "L"), "Replicate" = 1:5)
model <- glmer(Day_eclosion ~ Developmental + (1 | Replicate), family =
"poisson", data= x)
I get this return from:
a <- lsmeans(model, pairwise~Developmental, adjust = "tukey")
a$contrasts
contrast estimate SE df z.ratio p.value
AP - JU 0.2051 0.0168 Inf 12.172 <.0001
AP - L 0.3009 0.0212 Inf 14.164 <.0001
AP - MA 0.3889 0.0209 Inf 18.631 <.0001
JU - L 0.0958 0.0182 Inf 5.265 <.0001
JU - MA 0.1839 0.0177 Inf 10.387 <.0001
L - MA 0.0881 0.0222 Inf 3.964 0.0004
I am looking for a simple way to turn this output (just p values) into:
AP MA JU L
AP - <.0001 <.0001 <.0001
MA - - <.0001 0.0004
JU - - - <.0001
L - - -
I have about 20 sets of these that I need to turn into tables, so the simpler and more general the better.
Bonus points if the output is tab-deliminated, etc, so that I can easily paste into word/excel.
Thanks!

Here's a function that works...
pvmat = function(emm, ...) {
emm = update(emm, by = NULL) # need to work harder otherwise
pv = test(pairs(emm, reverse = TRUE, ...)) $ p.value
fmtpv = sprintf("%6.4f", pv)
fmtpv[pv < 0.0001] = "<.0001"
lbls = do.call(paste, emm#grid[emm#misc$pri.vars])
n = length(lbls)
mat = matrix("", nrow = n, ncol = n, dimnames = list(lbls, lbls))
mat[upper.tri(mat)] = fmtpv
idx = seq_len(n - 1)
mat[idx, 1 + idx] # trim off last row and 1st col
}
Illustration:
require(emmeans)
> warp.lm = lm(breaks ~ wool * tension, data = warpbreaks)
> warp.emm = emmeans(warp.lm, ~ wool * tension)
> warp.emm
wool tension emmean SE df lower.CL upper.CL
A L 44.6 3.65 48 37.2 51.9
B L 28.2 3.65 48 20.9 35.6
A M 24.0 3.65 48 16.7 31.3
B M 28.8 3.65 48 21.4 36.1
A H 24.6 3.65 48 17.2 31.9
B H 18.8 3.65 48 11.4 26.1
Confidence level used: 0.95
> pm = pvmat(warp.emm, adjust = "none")
> print(pm, quote=FALSE)
B L A M B M A H B H
A L 0.0027 0.0002 0.0036 0.0003 <.0001
B L 0.4170 0.9147 0.4805 0.0733
A M 0.3589 0.9147 0.3163
B M 0.4170 0.0584
A H 0.2682
Notes
As provided, this does not support by variables. Accordingly, the first line of the function disables them.
Using pairs(..., reverse = TRUE) generates the P values in the correct order needed later for upper.tri()
you can pass arguments to test() via ...
To create a tab-delimited version, use the clipr package:
clipr::write_clip(pm)
What you need is now in the clipboard and ready to paste into a spreadsheet.
Addendum
Answering this question inspired me to add a new function pwpm() to the emmeans package. It will appear in the next CRAN release, and is available now from the github site. It displays means and differences as well as P values; but the user may select which to include.
> pwpm(warp.emm)
wool = A
L M H
L [44.6] 0.0007 0.0009
M 20.556 [24.0] 0.9936
H 20.000 -0.556 [24.6]
wool = B
L M H
L [28.2] 0.9936 0.1704
M -0.556 [28.8] 0.1389
H 9.444 10.000 [18.8]
Row and column labels: tension
Upper triangle: P values adjust = “tukey”
Diagonal: [Estimates] (emmean)
Upper triangle: Comparisons (estimate) earlier vs. later

Related

How to only test select pairwise comparisons using emmeans?

I have seen several examples how it might be possible to select desired pairwise comparisons, but unfortunately do not know how to apply that to my data.
Here is my abbreviated data set: https://www.dropbox.com/s/x9xjc9o0222rg0w/df.csv?dl=0
# FIXED effects: age and brain_area
df$age <- factor(df$age)
df$brain_area <- factor(df$brain_area)
# RANDOM effects: subject_ID and section
df$subject_ID <- factor(df$subject_ID)
df$section <- factor(df$section)
# dependent variable: DV
# ___________________ mixed TWO-way ANOVA
require(lme4)
require(lmerTest)
require(emmeans)
model = lmer(DV ~ age * brain_area + (1 | subject_ID), data = df)
anova(model) # significant interaction and both main effects
# ____________________ ALL pairwise comparisons
emmeans(model, pairwise~brain_area|age, adj='fdr')
# ____________________ I marked below comparisons that I would like to exclude (but keep all others)
$contrasts
age = old:
contrast estimate SE df t.ratio p.value
a - b 0.0412 0.0158 174 2.603 0.0125
a - c -0.0566 0.0158 174 -3.572 0.0007
a - control 0.3758 0.0158 174 23.736 <.0001 # exclude
a - d -0.0187 0.0158 174 -1.182 0.2387
b - c -0.0978 0.0158 174 -6.175 <.0001
b - control 0.3346 0.0158 174 21.132 <.0001 # exclude
b - d -0.0599 0.0158 174 -3.786 0.0004
c - control 0.4324 0.0158 174 27.308 <.0001
c - d 0.0378 0.0158 174 2.389 0.0199
control - d -0.3946 0.0158 174 -24.918 <.0001 # exclude
age = young:
contrast estimate SE df t.ratio p.value
a - b 0.0449 0.0147 174 3.063 0.0032
a - c -0.0455 0.0147 174 -3.105 0.0032
a - control 0.2594 0.0147 174 17.694 <.0001 # exclude
a - d 0.0202 0.0147 174 1.377 0.1702
b - c -0.0904 0.0147 174 -6.169 <.0001
b - control 0.2145 0.0147 174 14.631 <.0001 # exclude
b - d -0.0247 0.0147 174 -1.686 0.1040
c - control 0.3049 0.0147 174 20.799 <.0001
c - d 0.0657 0.0147 174 4.483 <.0001
control - d -0.2392 0.0147 174 -16.317 <.0001 # exclude
# ____________________ The line below seems to work BUT completely excludes 'control' level from factor 'brain_area'. I do not wish to completely exclude it...
emmeans(model, specs=pairwise~brain_area| age,
at = list(brain_area = c("a", "b", "c", "d")), adj='fdr' )
You need to provide the contrast coefficients manually. In this case, it's fairly simple to obtain all of them, then remove the ones you don't want; something like this:
EMM <- emmeans(model, ~ brain_area | age)
EMM # show the means
coef <- emmeans:::pairwise.emmc(levels(EMM)[["brain_area"]])
coef <- coef[-c(3, 6, 10)]
contrast(EMM, coef, adjust = "fdr")

lme4 deviant/tratment contrast coding with interactions in R - levels are missing

I have a mixed effects model (with lme4) with a 2-way interaction term, each term having multiple levels (each 4) and I would like to investigate their effects in reference to their grand mean. I present this example here from the car data set and omit the error term since it is not neccessary for this example:
## shorten data frame for simplicity
df=Cars93[c(1:15),]
df=Cars93[is.element(Cars93$Make,c('Acura Integra', 'Audi 90','BMW 535i','Subaru Legacy')),]
df$Make=drop.levels(df$Make)
df$Model=drop.levels(df$Model)
## define contrasts (every factor has 4 levels)
contrasts(df$Make) = contr.treatment(4)
contrasts(df$Model) = contr.treatment(4)
## model
m1 <- lm(Price ~ Model*Make,data=df)
summary(m1)
as you can see, the first levels are omitted in the interaction term. And I would like to have all 4 levels in the output, referenced to the grand mean (often referred to deviant coding). These are the sources I looked at: https://marissabarlaz.github.io/portfolio/contrastcoding/#coding-schemes and How to change contrasts to compare with mean of all levels rather than reference level (R, lmer)?. The last reference does not report interactions though.
The simple answer is that what you want is not possible directly. You have to use a slightly different approach.
In a model with interactions, you want to use contrasts in which the mean is zero and not a specific level. Otherwise, the lower-order effects (i.e., main effects) are not main effects but simple effects (evaluated when the other factor level is at its reference level). This is explained in more details in my chapter on mixed models:
http://singmann.org/download/publications/singmann_kellen-introduction-mixed-models.pdf
To get what you want, you have to fit the model in a reasonable manner and then pass it to emmeans to compare against the intercept (i.e., the unweighted grand mean). This works also for interactions as shown below (as your code did not work, I use warpbreaks).
afex::set_sum_contrasts() ## uses contr.sum globally
library("emmeans")
## model
m1 <- lm(breaks ~ wool * tension,data=warpbreaks)
car::Anova(m1, type = 3)
coef(m1)[1]
# (Intercept)
# 28.14815
## both CIs include grand mean:
emmeans(m1, "wool")
# wool emmean SE df lower.CL upper.CL
# A 31.0 2.11 48 26.8 35.3
# B 25.3 2.11 48 21.0 29.5
#
# Results are averaged over the levels of: tension
# Confidence level used: 0.95
## same using test
emmeans(m1, "wool", null = coef(m1)[1], infer = TRUE)
# wool emmean SE df lower.CL upper.CL null t.ratio p.value
# A 31.0 2.11 48 26.8 35.3 28.1 1.372 0.1764
# B 25.3 2.11 48 21.0 29.5 28.1 -1.372 0.1764
#
# Results are averaged over the levels of: tension
# Confidence level used: 0.95
emmeans(m1, "tension", null = coef(m1)[1], infer = TRUE)
# tension emmean SE df lower.CL upper.CL null t.ratio p.value
# L 36.4 2.58 48 31.2 41.6 28.1 3.196 0.0025
# M 26.4 2.58 48 21.2 31.6 28.1 -0.682 0.4984
# H 21.7 2.58 48 16.5 26.9 28.1 -2.514 0.0154
#
# Results are averaged over the levels of: wool
# Confidence level used: 0.95
emmeans(m1, c("tension", "wool"), null = coef(m1)[1], infer = TRUE)
# tension wool emmean SE df lower.CL upper.CL null t.ratio p.value
# L A 44.6 3.65 48 37.2 51.9 28.1 4.499 <.0001
# M A 24.0 3.65 48 16.7 31.3 28.1 -1.137 0.2610
# H A 24.6 3.65 48 17.2 31.9 28.1 -0.985 0.3295
# L B 28.2 3.65 48 20.9 35.6 28.1 0.020 0.9839
# M B 28.8 3.65 48 21.4 36.1 28.1 0.173 0.8636
# H B 18.8 3.65 48 11.4 26.1 28.1 -2.570 0.0133
#
# Confidence level used: 0.95
Note that for coef() you probably want to use fixef() for lme4 models.

Estimating the standard deviation from mean and confidence intervals with a gamma distribution in R

I have the following problem I'd like to solve in R and apply to a larger workflow. I need to estimate the standard deviation from a gamma distribution where the mean and 95% confidence intervals are known.
state = c("group1", "group2", "group3")
mean = c(0.970, 0.694, 0.988)
lowers = c(0.527, 0.381, 0.536)
uppers = c(1.87, 1.37, 1.90)
df = data.frame(state=state, mean=mean, lower=lower, upper=upper)
Using excel and the "solver" tool I can adjust the standard deviation to minimize the sum of squared differences between the target 2.5 (lowers) and 97.5 (uppers) percentiles of the distribution with the actuals. Challenge is this needs to be scaled up to a rather large set of data and operationalized in my R dataframe workflow. Any ideas how to solve this?
I think this problem is ultimately an optimization problem, dealing one row of data at a time. Since you want to scale it, though, here's an approximation for finding the distribution core parameters.
This process is not an optimization: it expands a defined range of possible k (shape) parameters and finds the shape/scale combination (given your mean) that most closely resembles your upper and lower quantiles. You control the granularity of k, which is as good as you're going to get to having a tolerance (which would be appropriate for optimizations).
As such, this process will be imperfect. I hope it gets you a fast-enough process for good-enough accuracy.
I'm going to first demonstrate a process that operates one row at a time, as guesser1. I'll then expand it to do the same operation on an arbitrary number of mean, lower, and upper.
Data with Known Answers
But first, I want to generate my own samples so that we have known "truth" for this guesser.
library(dplyr)
set.seed(42)
n <- 4
randks <- tibble(
k = sample(1:10, size = n, replace = TRUE),
scale = sample(seq(0.5, 2.5, by = 0.5), size = n, replace = TRUE)
) %>%
mutate(
samp = map2(k, scale, ~ rgamma(1000, shape = .x, scale = .y)),
theor_mu = k*scale,
mu = map_dbl(samp, ~ mean(.x)),
lwr = map_dbl(samp, ~ quantile(.x, 0.025)),
upr = map_dbl(samp, ~ quantile(.x, 0.975))
) %>%
select(-samp)
randks
# # A tibble: 4 x 6
# k scale theor_mu mu lwr upr
# <int> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 10 2 20 19.9 9.47 33.7
# 2 10 1.5 15 15.1 7.36 25.0
# 3 3 2 6 5.85 1.08 14.5
# 4 9 0.5 4.5 4.51 1.99 7.72
Guesser1
Single row at a time:
guesser1 <- function(mu, lwr, upr, k.max = 10, k.by = 0.01) {
stopifnot(length(mu) == 1, length(lwr) == 1, length(upr) == 1)
ks <- seq(0, k.max, by = k.by)[-1]
L <- sapply(ks, function(k) qgamma(0.025, shape = k, scale = mu / k))
U <- sapply(ks, function(k) qgamma(0.975, shape = k, scale = mu / k))
dists <- sqrt((L-lwr)^2 + (U-upr)^2)
ind <- which.min(dists)
data.frame(
k = ks[ind],
scale = mu / ks[ind],
dist = min(dists),
lwr = L[ind],
upr = U[ind]
)
}
In action:
out1 <- do.call(rbind, Map(guesser1, randks$mu, randks$lwr, randks$upr))
cbind(subset(randks, select = -theor_mu), out1)
# k scale mu lwr upr k scale dist lwr upr
# 1 10 2.0 19.88 9.47 33.67 10.00 1.988 0.304 9.53 33.97
# 2 10 1.5 15.06 7.36 25.02 10.00 1.506 0.727 7.22 25.73
# 3 3 2.0 5.85 1.08 14.50 2.76 2.120 0.020 1.10 14.50
# 4 9 0.5 4.51 1.99 7.72 9.55 0.472 0.142 2.12 7.79
### \____ randks __________/ \____ guessed ____________/
There are certainly some differences, underscoring my original assertion that this is imperfect.
Guessers
All rows at once. This is a little more work in the function, since it deals with matrices instead of just vectors. Not a problem, I just wanted to prove it one-at-a-time before going for the gusto.
guessers <- function(mu, lwr, upr, k.max = 10, k.by = 0.01, include.size = FALSE) {
stopifnot(length(mu) == length(lwr), length(mu) == length(upr))
# count <- length(mu)
ks <- seq(0, k.max, by = k.by)[-1]
# 'ks' dims: [mu]
L <- outer(mu, ks, function(m, k) qgamma(0.025, shape = k, scale = m / k))
U <- outer(mu, ks, function(m, k) qgamma(0.975, shape = k, scale = m / k))
# 'L' & 'U' dims: [mu, ks]
dists <- sqrt((L - lwr)^2 + (U - upr)^2)
inds <- apply(dists, 1, which.min)
mindists <- apply(dists, 1, min)
i <- seq_along(mu)
out <- data.frame(
k = ks[inds],
scale = mu / ks[inds],
dist = mindists,
lwr = L[cbind(i, inds)],
upr = U[cbind(i, inds)]
)
size <- if (include.size) {
message("guessers memory: ",
object.size(list(ks, L, U, dists, inds, mindists, i, out)))
}
out
}
In action:
outs <- guessers(randks$mu, randks$lwr, randks$upr, include.size = TRUE)
# guessers memory: 106400
cbind(subset(randks, select = -theor_mu), outs)
# k scale mu lwr upr k scale dist lwr upr
# 1 10 2.0 19.88 9.47 33.67 10.00 1.988 0.304 9.53 33.97
# 2 10 1.5 15.06 7.36 25.02 10.00 1.506 0.727 7.22 25.73
# 3 3 2.0 5.85 1.08 14.50 2.76 2.120 0.020 1.10 14.50
# 4 9 0.5 4.51 1.99 7.72 9.55 0.472 0.142 2.12 7.79
### \____ randks __________/ \____ guessed (same) _____/
(I included a memory message in there just to track how much this can scale. It's not bad now, and that argument should definitely not be used in production. FYI.)
Comparison
Using microbenchmark, we repeat each operation a few times and compare their run times.
microbenchmark::microbenchmark(
g1 = Map(guesser1, randks$mu, randks$lwr, randks$upr),
gs = guessers(randks$mu, randks$lwr, randks$upr)
)
# Unit: milliseconds
# expr min lq mean median uq max neval
# g1 27.3 28.8 33.9 29.7 33.0 131.1 100
# gs 13.3 13.6 14.4 13.8 14.6 20.3 100
Not too surprisingly, the all-at-once guessers is a bit faster. When will this not be the case? When the number of rows gets so big that memory consumption is a problem. I don't know what that is.
Let's try the same thing, but increasing randks from 4 rows to 1000 and repeating the benchmark.
n <- 1000
# randks <- ...
nrow(randks)
# [1] 1000
microbenchmark::microbenchmark(
g1 = Map(guesser1, randks$mu, randks$lwr, randks$upr),
gs = guessers(randks$mu, randks$lwr, randks$upr),
times = 10
)
# Unit: seconds
# expr min lq mean median uq max neval
# g1 8.50 8.99 9.59 9.31 9.64 11.72 10
# gs 3.35 3.44 3.61 3.63 3.77 3.93 10
So it's definitely faster. The median run-time for 1000 estimations is 3.63 seconds, so it appears to finish about 300/sec.
guessers memory: 24066176
(24 MiB) Actually, that doesn't seem bad at all. Decrease k.by to increase your accuracy, or increase k.by to speed this up.

How to get absolute difference estimate and confidence intervals from log(x+1) variable with emmeans

I have a mixed effect model with a log(x+1) transformed response variable. The output from emmeans with the type as "response" provides the mean and confidence intervals for both groups that I am comparing. However what I want is the mean and CI of the difference between the groups (i.e. the estimate). emmeans only provides the ratio (with type="response") or the log ratio (with type="link") and I am unsure how to change this into absolute values. If you run the model without the log(x+1) transformation then emmeans provides the estimated difference and CI around this difference, not the ratios. How can I also do this when my response variable is log(x+1) transformed?
bmnameF.lme2 = lme(log(bm+1)~TorC*name, random=~TorC|site,
data=matched.cases3F, method='REML')
emmeans(lme, pairwise~TorC,
type='response')%>%confint(OmeanFHR[[2]])%>%as.data.frame
emmeans.TorC emmeans.emmean emmeans.SE emmeans.df emmeans.lower.CL emmeans.upper.CL contrasts.contrast contrasts.estimate contrasts.SE contrasts.df contrasts.lower.CL contrasts.upper.CL
Managed 376.5484 98.66305 25 219.5120 645.9267 Managed - Open 3.390123 1.068689 217 1.821298 6.310297
Open 111.0722 43.15374 25 49.8994 247.2381 Managed - Open 3.390123 1.068689 217 1.821298 6.310297
Let me show a different example so the results are reproducible to all viewers:
mod = lm(log(breaks+1) ~ wool*tension, data = warpbreaks)
As you see, with a log transformation, comparisons/contrasts are expressed as ratios by default. But this can be changed by specifying transform instead of type in the emmeans() call:
> emmeans(mod, pairwise ~ tension|wool, transform = "response")
$emmeans
wool = A:
tension response SE df lower.CL upper.CL
L 42.3 5.06 48 32.1 52.4
M 23.6 2.83 48 17.9 29.3
H 23.7 2.83 48 18.0 29.4
wool = B:
tension response SE df lower.CL upper.CL
L 27.7 3.32 48 21.0 34.4
M 28.4 3.40 48 21.6 35.3
H 19.3 2.31 48 14.6 23.9
Confidence level used: 0.95
$contrasts
wool = A:
contrast estimate SE df t.ratio p.value
L - M 18.6253 5.80 48 3.213 0.0065
L - H 18.5775 5.80 48 3.204 0.0067
M - H -0.0479 4.01 48 -0.012 0.9999
wool = B:
contrast estimate SE df t.ratio p.value
L - M -0.7180 4.75 48 -0.151 0.9875
L - H 8.4247 4.04 48 2.086 0.1035
M - H 9.1426 4.11 48 2.224 0.0772
P value adjustment: tukey method for comparing a family of 3 estimates
Or, you can do this later via the regrid() function:
emm1 = emmeans(mod, ~ tension | wool)
emm2 = regrid(emm1)
emm2 # estimates
pairs(emm2) # comparisons
regrid() creates a new emmGrid object where everything is already back-transformed, thus side-stepping the behavior that happens with contrasts of log-transformed results. (In the previous illustration, the transform argument just calls regrid after it constructs the reference grid.)
But there is another subtle thing going on: The transformation is auto-detected as log; the +1 part is ignored. Thus, the back-transformed estimates are all too large by 1. To get this right, you need to use the make.tran() function to create this generalization of the log transformation:
> emm3 = update(emmeans(mod, ~ tension | wool), tran = make.tran("genlog", 1))
> str(emm3)
'emmGrid' object with variables:
tension = L, M, H
wool = A, B
Transformation: “log(mu + 1)”
> regrid(emm3)
wool = A:
tension response SE df lower.CL upper.CL
L 41.3 5.06 48 31.1 51.4
M 22.6 2.83 48 16.9 28.3
H 22.7 2.83 48 17.0 28.4
wool = B:
tension response SE df lower.CL upper.CL
L 26.7 3.32 48 20.0 33.4
M 27.4 3.40 48 20.6 34.3
H 18.3 2.31 48 13.6 22.9
Confidence level used: 0.95
The comparisons will come out the same as shown earlier, because offsetting all the means by 1 doesn't affect the pairwise differences.
See vignette("transformations", "emmeans") or https://cran.r-project.org/web/packages/emmeans/vignettes/transformations.html for more details.

Specifying prior distribution on matrix in rstan

I am having trouble with getting a Bayesian mixed-effects model to yield stationary and well-mixed chains. I have created my own data so I know what parameters should be retrieved by the model. Unfortunately because the effective number of parameters is so low and the Rhat so high the parameter estimates are complete nonsense.
The data is designed so there are 60 subjects, split into three groups (g1, g2, g3) of 20 subjects each. Each subject is exposed to 3 conditions (cond1, cond2, cond3). I designed the data so there is no difference among the groups, but there are differences among the conditions, with cond1 scoring 100 on average, cond2 scoring 75 on average, and cond3 scoring 125.
df <- data.frame(id = factor(rep(1:60, 3)),
group = factor(rep(c("g1", "g2", "g3"), each = 20, length.out = 180)),
condition = factor(rep(c("cond1", "cond2", "cond3"), each = 60)),
score = c(ceiling(rnorm(60, 100, 15)), ceiling(rnorm(60, 75, 15)), ceiling(rnorm(60, 125, 15))))
Here are the descriptives
library(dplyr)
df %>% group_by(group, condition) %>% summarise(m = mean(score), sd = sd(score))
# group condition m sd
# <fct> <fct> <dbl> <dbl>
# 1 g1 cond1 108 12.4
# 2 g1 cond2 79.4 13.1
# 3 g1 cond3 128 11.5
# 4 g2 cond1 105 15.5
# 5 g2 cond2 71.6 10.6
# 6 g2 cond3 127 17.7
# 7 g3 cond1 106 13.3
# 8 g3 cond2 75.8 17.6
# 9 g3 cond3 124 14.5
Everything looks to be correct, the differences between conditions are preserved nicely across groups.
Now for the the model. The model I am running has a grand mean, a parameter for group, a parameter for condition, a parameter for the group x condition interaction, and a subject parameter.
Here is the data list
##### Step 1: put data into a list
mixList <- list(N = nrow(df),
nSubj = nlevels(df$id),
nGroup = nlevels(df$group),
nCond = nlevels(df$condition),
nGxC = nlevels(df$group)*nlevels(df$condition),
sIndex = as.integer(df$id),
gIndex = as.integer(df$group),
cIndex = as.integer(df$condition),
score = df$score)
Now to build the model in rstan, saving the string as a .stan file using the cat() function
###### Step 2: build model
cat("
data{
int<lower=1> N;
int<lower=1> nSubj;
int<lower=1> nGroup;
int<lower=1> nCond;
int<lower=1,upper=nSubj> sIndex[N];
int<lower=1,upper=nGroup> gIndex[N];
int<lower=1,upper=nCond> cIndex[N];
real score[N];
}
parameters{
real a0;
vector[nGroup] bGroup;
vector[nCond] bCond;
vector[nSubj] bSubj;
matrix[nGroup,nCond] bGxC;
real<lower=0> sigma_s;
real<lower=0> sigma_g;
real<lower=0> sigma_c;
real<lower=0> sigma_gc;
real<lower=0> sigma;
}
model{
vector[N] mu;
bCond ~ normal(100, sigma_c);
bGroup ~ normal(100, sigma_g);
bSubj ~ normal(0, sigma_s);
sigma ~ cauchy(0,2)T[0,];
for (i in 1:N){
mu[i] = a0 + bGroup[gIndex[i]] + bCond[cIndex[i]] + bSubj[sIndex[i]] + bGxC[gIndex[i],cIndex[i]];
}
score ~ normal(mu, sigma);
}
", file = "mix.stan")
Next is to generate the chains in rstan
##### Step 3: generate the chains
mix <- stan(file = "mix.stan",
data = mixList,
iter = 2e3,
warmup = 1e3,
cores = 1,
chains = 1)
And here is the output
###### Step 4: Diagnostics
print(mix, pars = c("a0", "bGroup", "bCond", "bGxC", "sigma"), probs = c(.025,.975))
# mean se_mean sd 2.5% 97.5% n_eff Rhat
# a0 -1917.21 776.69 2222.64 -5305.69 1918.58 8 1.02
# bGroup[1] 2368.36 2083.48 3819.06 -2784.04 9680.78 3 1.54
# bGroup[2] 7994.87 446.06 1506.31 4511.22 10611.46 11 1.00
# bGroup[3] 7020.78 2464.68 4376.83 81.18 14699.90 3 1.91
# bCond[1] -3887.06 906.99 1883.45 -7681.24 -247.48 4 1.60
# bCond[2] 4588.50 676.28 1941.92 -594.56 7266.09 8 1.10
# bCond[3] 73.91 1970.28 3584.74 -5386.96 5585.99 3 2.13
# bGxC[1,1] 3544.02 799.91 1819.18 -1067.27 6327.68 5 1.26
# bGxC[1,2] -4960.08 1942.57 3137.33 -10078.84 317.07 3 2.66
# bGxC[1,3] -396.35 418.34 1276.44 -2865.39 2543.45 9 1.42
# bGxC[2,1] -2085.90 1231.36 2439.58 -5769.81 3689.38 4 1.46
# bGxC[2,2] -10594.89 1206.58 2560.42 -14767.50 -5074.33 5 1.02
# bGxC[2,3] -6024.75 2417.43 4407.09 -12002.87 4651.14 3 1.71
# bGxC[3,1] -1111.81 1273.66 2853.08 -4843.38 5572.87 5 1.48
# bGxC[3,2] -9616.85 2314.56 4020.02 -15775.40 -4262.64 3 2.98
# bGxC[3,3] -5054.27 828.77 2245.68 -8666.01 -321.74 7 1.00
# sigma 13.81 0.14 0.74 12.36 15.17 27 1.00
The low number of effective samples and high Rhats tell me I am doing something terribly wrong here, but what?
Is it not specifying a prior on bGxC?
How does one specify a prior on a matrix?
Matrices are inefficient in Stan (see here). It's better to use a vector of vectors:
vector[nCond] bGxC[nGroup];
And to set a prior:
for(i in 1:nGroup){
bGxC[i] ~ normal(0, sigma_gc);
}
And:
for (i in 1:N){
mu[i] = a0 + bGroup[gIndex[i]] + bCond[cIndex[i]] + bSubj[sIndex[i]] + bGxC[gIndex[i]][cIndex[i]];
}

Resources