Related
I have run a multiple imputation (m=45, 10 iterations) using the MICE package, and want to calculate the cronbach's alpha for a number of ordinal scales in the data. Is there a function in r that could assist me in calculating the alpha coefficient across the imputed datasets in a manner that would satisfy Rubin's rules for pooling estimates?
We may exploit pool.scalar from the mice package, which performs pooling of univariate estimates according to Rubin's rules.
Since you have not provided a reproducible example yourself, I will provide one.
set.seed(123)
# sample survey responses
df <- data.frame(
x1 = c(1,2,2,3,2,2,3,3,2,3,
1,2,2,3,2,2,3,3,2,3,
1,2,2,3,2,2,3,3,2,3),
x2 = c(1,1,1,2,3,3,2,3,3,3,
1,1,1,2,3,3,2,3,3,3,
1,2,2,3,2,2,3,3,2,3),
x3 = c(1,1,2,1,2,3,3,3,2,3,
1,1,2,1,2,3,3,3,2,3,
1,2,2,3,2,2,3,3,2,3)
)
# function to column-wise generate missing values (MCAR)
create_missings <- function(data, prob) {
x <- replicate(ncol(data),rbinom(nrow(data), 1, prob))
for(k in 1:ncol(data)) {
data[, k] <- ifelse(x[, k] == 1, NA, data[,k])
}
data
}
df <- create_missings(df, prob = 0.2)
# multiple imputation ----------------------------------
library(mice)
imp <- mice(df, m = 10, maxit = 20)
# extract the completed data in long format
implong <- complete(imp, 'long')
We need a function to compute cronbach's alpha and obtain an estimate of the standard error of alpha, which can be used in a call to pool.scalar() later on. Since there is no available formula with which we can analytically estimate the standard error of alpha, we also need to deploy a bootstrapping procedure to estimate this standard error.
The function cronbach_fun() takes the following arguments:
list_compl_data: a character string specifying the list of completed data from a mids object.
boot: a logical indicating whether a non-parametrical bootstrap should be conducted.
B: an integer specifying the number of bootstrap samples to be taken.
ci: a logical indicating whether a confidence interval around alpha should be estimated.
cronbach_fun <- function(list_compl_data, boot = TRUE, B = 1e4, ci = FALSE) {
n <- nrow(list_compl_data); p <- ncol(list_compl_data)
total_variance <- var(rowSums(list_compl_data))
item_variance <- sum(apply(list_compl_data, 2, sd)^2)
alpha <- (p/(p - 1)) * (1 - (item_variance/total_variance))
out <- list(alpha = alpha)
boot_alpha <- numeric(B)
if (boot) {
for (i in seq_len(B)) {
boot_dat <- list_compl_data[sample(seq_len(n), replace = TRUE), ]
total_variance <- var(rowSums(boot_dat))
item_variance <- sum(apply(boot_dat, 2, sd)^2)
boot_alpha[i] <- (p/(p - 1)) * (1 - (item_variance/total_variance))
}
out$var <- var(boot_alpha)
}
if (ci){
out$ci <- quantile(boot_alpha, c(.025,.975))
}
return(out)
}
Now that we have our function to do the 'heavy lifting', we can run it on all m completed data sets, after which we can obtain Q and U (which are required for the pooling of the estimates). Consult ?pool.scalar for more information.
m <- length(unique(implong$.imp))
boot_alpha <- rep(list(NA), m)
for (i in seq_len(m)) {
set.seed(i) # fix random number generator
sub <- implong[implong$.imp == i, -c(1,2)]
boot_alpha[[i]] <- cronbach_fun(sub)
}
# obtain Q and U (see ?pool.scalar)
Q <- sapply(boot_alpha, function(x) x$alpha)
U <- sapply(boot_alpha, function(x) x$var)
# pooled estimates
pool_estimates <- function(x) {
out <- c(
alpha = x$qbar,
lwr = x$qbar - qt(0.975, x$df) * sqrt(x$t),
upr = x$qbar + qt(0.975, x$df) * sqrt(x$t)
)
return(out)
}
Output
# Pooled estimate of alpha (95% CI)
> pool_estimates(pool.scalar(Q, U))
alpha lwr upr
0.7809977 0.5776041 0.9843913
I am trying to figure out how to sample from a custom density in rJAGS but am running into issues. having searched the site, I saw that there is a zeroes (or ones) trick that can be employed based on BUGS code but am having a hard time with its implementation in rJAGS. I think I am doing it correctly but keep getting the following error:
Error in jags.model(model1.spec, data = list(x = x, N = N), n.chains = 4, :
Error in node dpois(lambda)
Length mismatch in Node::setValue
Here is my rJAGS code for reproducibility:
library(rjags)
set.seed(4)
N = 100
x = rexp(N, 3)
L = quantile(x, prob = 1) # Censoring point
censor = ifelse(x <= L, 1, 0) # Censoring indicator
x[censor == 1] <- L
model1.string <-"
model {
for (i in 1:N){
x[i] ~ dpois(lambda)
lambda <- -N*log(1-exp(-(1/mu)))
}
mu ~ dlnorm(mup, taup)
mup <- log(.0001)
taup <- 1/49
R <- 1 - exp(-(1/mu) * .0001)
}
"
model1.spec<-textConnection(model1.string)
jags <- jags.model(model1.spec,
data = list('x' = x,
'N' = N),
n.chains=4,
n.adapt=100)
Here, my negative log likelihood of the density I am interested in is -N*log(1-exp(-(1/mu))). Is there an obvious mistake in the code?
Using the zeros trick, the variable on the left-hand side of the dpois() relationship has to be an N-length vector of zeros. The variable x should show up in the likelihood somewhere. Here is an example using the normal distribution.
set.seed(519)
N <- 100
x <- rnorm(100, mean=3)
z <- rep(0, N)
C <- 10
pi <- pi
model1.string <-"
model {
for (i in 1:N){
lambda[i] <- pow(2*pi*sig2, -0.5) * exp(-.5*pow(x[i]-mu, 2)/sig2)
loglam[i] <- log(lambda[i]) + C
z[i] ~ dpois(loglam[i])
}
mu ~ dnorm(0,.1)
tau ~ dgamma(1,.1)
sig2 <- pow(tau, -1)
sumLL <- sum(log(lambda[]))
}
"
model1.spec<-textConnection(model1.string)
set.seed(519)
jags <- jags.model(model1.spec,
data = list('x' = x,
'z' = z,
'N' = N,
'C' = C,
'pi' = pi),
inits = function()list(tau = 1, mu = 3),
n.chains=4,
n.adapt=100)
samps1 <- coda.samples(jags, c("mu", "sig2"), n.iter=1000)
summary(samps1)
Iterations = 101:1100
Thinning interval = 1
Number of chains = 4
Sample size per chain = 1000
1. Empirical mean and standard deviation for each variable,
plus standard error of the mean:
Mean SD Naive SE Time-series SE
mu 4.493 2.1566 0.034100 0.1821
sig2 1.490 0.5635 0.008909 0.1144
2. Quantiles for each variable:
2.5% 25% 50% 75% 97.5%
mu 0.6709 3.541 5.218 5.993 7.197
sig2 0.7909 0.999 1.357 1.850 2.779
For my thesis I have to fit some glm models with MLEs that R doesn't have, I was going ok for the models with close form but now I have to use de Gausian CDF, so i decide to fit a simple probit model.
this is the code:
Data:
set.seed(123)
x <-matrix( rnorm(50,2,4),50,1)
m <- matrix(runif(50,2,4),50,1)
t <- matrix(rpois(50,0.5),50,1)
z <- (1+exp(-((x-mean(x)/sd(x)))))^-1 + runif(50)
y <- ifelse(z < 1.186228, 0, 1)
data1 <- as.data.frame(cbind(y,x,m,t))
myprobit <- function (formula, data)
{
mf <- model.frame(formula, data)
y <- model.response(mf, "numeric")
X <- model.matrix(formula, data = data)
if (any(is.na(cbind(y, X))))
stop("Some data are missing.")
loglik <- function(betas, X, y, sigma) { #loglikelihood
p <- length(betas)
beta <- betas[-p]
eta <- X %*% beta
sigma <- 1 #because of identification, sigma must be equal to 1
G <- pnorm(y, mean = eta,sd=sigma)
sum( y*log(G) + (1-y)*log(1-G))
}
ls.reg <- lm(y ~ X - 1)#starting values using ols, indicating that this model already has a constant
start <- coef(ls.reg)
fit <- optim(start, loglik, X = X, y = y, control = list(fnscale = -1), method = "BFGS", hessian = TRUE) #optimizar
if (fit$convergence > 0) {
print(fit)
stop("optim failed to converge!") #verify convergence
}
return(fit)
}
myprobit(y ~ x + m + t,data = data1)
And i get: Error in X %*% beta : non-conformable arguments, if i change start <- coef(ls.reg) with start <- c(coef(ls.reg), 1) i get wrong stimatives comparing with:
probit <- glm(y ~ x + m + t,data = data1 , family = binomial(link = "probit"))
What am I doing wrong?
Is possible to correctly fit this model using pnorm, if no, what algorithm should I use to approximate de gausian CDF. Thanks!!
The line of code responsible for your error is the following:
eta <- X %*% beta
Note that "%*%" is the matrix multiplication operator. By reproducing your code I noticed that X is a matrix with 50 rows and 4 columns. Hence, for matrix multiplication to be possible your "beta" needs to have 4 rows. But when you run "betas[-p]" you subset the betas vector by removing its last element, leaving only three elements instead of the four you need for matrix multiplication to be defined. If you remove [-p] the code will work.
I'm trying to estimate a linear model with a log-normal distributed error term. I already have working code for a linear model with normally distributed errors:
library(Ecdat)
library(assertthat)
library(maxLik)
# Load the data
data(Wages1)
# Check what R says
summary(lm(wage ~ school + exper + sex, data = Wages1))
# Use maxLik from package maxLik
# The likelihood function
my_log_lik_pos <- function(theta, data){
y <- data[, 1]
x <- data[, -1]
beta <- head(theta, -1)
sigma <- tail(theta, 1)
xb <- x%*%beta
are_equal(dim(xb), c(nrow(my_data), 1))
return(sum(log(dnorm(y, mean = xb, sd = sigma))))
}
# Bind the data
my_data <- cbind(Wages1$wage, 1, Wages1$school, Wages1$exper, Wages1$sex)
my_problem <- maxLik(my_log_lik_pos, data = my_data,
start = rep(1,5), method = "BFGS")
summary(my_problem)
I get approximately the same results. Now I try to do the same, but using the log-normal likelihood. For this, I have to first simulate some data:
true_beta <- c(0.1, 0.2, 0.3, 0.4, 0.5)
ys <- my_data[, -1] %*% head(true_beta, -1) +
rlnorm(nrow(my_data), 0, tail(true_beta, 1))
my_data_2 <- cbind(ys, my_data[, -1])
And the log-likelihood function:
my_log_lik_lognorm <- function(theta, data){
y <- data[, 1]
x <- data[, -1]
beta <- head(theta, -1)
sigma <- tail(theta, 1)
xb <- x%*%beta
are_equal(dim(xb), c(nrow(data), 1))
return(sum(log(dlnorm(y, mean = xb, sd = sigma))))
}
my_problem2 <- maxLik(my_log_lik_lognorm, data = my_data_2,
start = rep(0.2,5), method = "BFGS")
summary(my_problem2)
The estimated parameters should be around the values of true_beta, but for some reason I find completely different values. I tried with different methods, different starting values but to no avail. I'm sure that I'm missing something obvious, but I don't see what.
Am I right to assume that the log-likelihood of the log-normal distribution is:
sum(log(dlnorm(y, mean = .., sd = ...))
Unless I'm mistaken, this is the definition of the log-likelihood (sum of the logs of the densities).
I found the issue: it seems the problem is not my log-likelihood function. When I try to estimate the model with glm:
summary(glm(ys ~ school + exper + sex, family=gaussian(link="log"), data=Wages1))
I get the same result as with maxLik and my log-likelihood. It would seem the problem comes from when I tried to simulate some data:
ys <- my_data[, -1] %*% head(true_beta, -1) +
rlnorm(nrow(my_data), 0, tail(true_beta, 1))
The correct way to simulate the data:
ys <- rlnorm(nrow(my_data), my_data[, -1] %*% head(true_beta, -1), tail(true_beta, 1))
Now everything works!
I am trying to create a linear mixed model (lmm) that allows for a spatial correlation between points (have lat/long for each point). I would like the spatial correlation to be based upon the great circular distance between points.
The package ramps includes a correlation structure that computes the ‘haversine’ distance – although I am having trouble implementing it. I have previously used other correlation structures (corGaus, corExp) and not had any difficulties. I am assuming the corRGaus with the 'haversine' metric can be implemented in the same way.
I am able to successfully create an lmm with spatial correlation calculated on a planar distance using the lme function.
I am also able to create a linear model (not mixed) with spatial correlation calculated using great circular distance although there are errors with the correlation structure using the gls command.
When trying to the use the gls command for a linear model with the great circular distance I have the following errors:
x = runif(20, 1,50)
y = runif(20, 1,50)
gls(x ~ y, cor = corRGaus(form = ~ x + y))
Generalized least squares fit by REML
Model: x ~ y
Data: NULL
Log-restricted-likelihood: -78.44925
Coefficients:
(Intercept) y
24.762656602 0.007822469
Correlation Structure: corRGaus
Formula: ~x + y
Parameter estimate(s):
Error in attr(object, "fixed") && unconstrained :
invalid 'x' type in 'x && y'
When I increase the size of the data there are memory allocation errors (still a very small dataset):
x = runif(100, 1, 50)
y = runif(100, 1, 50)
lat = runif(100, -90, 90)
long = runif(100, -180, 180)
gls(x ~ y, cor = corRGaus(form = ~ x + y))
Error in glsEstimate(glsSt, control = glsEstControl) :
'Calloc' could not allocate memory (18446744073709551616 of 8 bytes)
When trying to run a mixed model using the lme command and the corRGaus from the ramps package the following results:
x = runif(100, 1, 50)
y = runif(100, 1, 50)
LC = c(rep(1, 50) , rep(2, 50))
lat = runif(100, -90, 90)
long = runif(100, -180, 180)
lme(x ~ y,random = ~ y|LC, cor = corRGaus(form = ~ long + lat))
Error in `coef<-.corSpatial`(`*tmp*`, value = value[parMap[, i]]) :
NA/NaN/Inf in foreign function call (arg 1)
In addition: Warning messages:
1: In nlminb(c(coef(lmeSt)), function(lmePars) -logLik(lmeSt, lmePars), :
NA/NaN function evaluation
2: In nlminb(c(coef(lmeSt)), function(lmePars) -logLik(lmeSt, lmePars), :
NA/NaN function evaluation
I am unsure about how to proceed with this method. The "haversine" function is what I want to use to complete my models, but I am having trouble implementing them. There are very few questions anywhere about the ramps package, and I have seen very few implementations. Any helps would be greatly appreciated.
I have previously attempted to modify the nlme package and was unable to do so. I posted a question about this, where I was recommended to use the ramps package.
I am using R 3.0.0 on a Windows 8 computer.
OK, here is an option that implements various spatial correlation structures in gls/nlme with haversine distance.
The various corSpatial-type classes already have machinery in place to construct a correlation matrix from spatial covariates, given a distance metric. Unfortunately, dist does not implement haversine distance, and dist is the function called by corSpatial to compute a distance matrix from the spatial covariates.
The distance matrix computations are performed in getCovariate.corSpatial. A modified form of this method will pass the appropriate distance to other methods, and the majority of methods will not need to be modified.
Here, I create a new corStruct class, corHaversine, and modify only getCovariate and one other method (Dim) that determines which correlation function is used. Those methods which do not need modification, are copied from equivalent corSpatial methods. The (new) mimic argument in corHaversine takes the name of the spatial class with the correlation function of interest: by default, it is set to "corSpher".
Caveat: beyond ensuring that this code runs for spherical and Gaussian correlation functions, I haven't really done a lot of checking.
#### corHaversine - spatial correlation with haversine distance
# Calculates the geodesic distance between two points specified by radian latitude/longitude using Haversine formula.
# output in km
haversine <- function(x0, x1, y0, y1) {
a <- sin( (y1 - y0)/2 )^2 + cos(y0) * cos(y1) * sin( (x1 - x0)/2 )^2
v <- 2 * asin( min(1, sqrt(a) ) )
6371 * v
}
# function to compute geodesic haversine distance given two-column matrix of longitude/latitude
# input is assumed in form decimal degrees if radians = F
# note fields::rdist.earth is more efficient
haversineDist <- function(xy, radians = F) {
if (ncol(xy) > 2) stop("Input must have two columns (longitude and latitude)")
if (radians == F) xy <- xy * pi/180
hMat <- matrix(NA, ncol = nrow(xy), nrow = nrow(xy))
for (i in 1:nrow(xy) ) {
for (j in i:nrow(xy) ) {
hMat[j,i] <- haversine(xy[i,1], xy[j,1], xy[i,2], xy[j,2])
}
}
as.dist(hMat)
}
## for most methods, machinery from corSpatial will work without modification
Initialize.corHaversine <- nlme:::Initialize.corSpatial
recalc.corHaversine <- nlme:::recalc.corSpatial
Variogram.corHaversine <- nlme:::Variogram.corSpatial
corFactor.corHaversine <- nlme:::corFactor.corSpatial
corMatrix.corHaversine <- nlme:::corMatrix.corSpatial
coef.corHaversine <- nlme:::coef.corSpatial
"coef<-.corHaversine" <- nlme:::"coef<-.corSpatial"
## Constructor for the corHaversine class
corHaversine <- function(value = numeric(0), form = ~ 1, mimic = "corSpher", nugget = FALSE, fixed = FALSE) {
spClass <- "corHaversine"
attr(value, "formula") <- form
attr(value, "nugget") <- nugget
attr(value, "fixed") <- fixed
attr(value, "function") <- mimic
class(value) <- c(spClass, "corStruct")
value
} # end corHaversine class
environment(corHaversine) <- asNamespace("nlme")
Dim.corHaversine <- function(object, groups, ...) {
if (missing(groups)) return(attr(object, "Dim"))
val <- Dim.corStruct(object, groups)
val[["start"]] <- c(0, cumsum(val[["len"]] * (val[["len"]] - 1)/2)[-val[["M"]]])
## will use third component of Dim list for spClass
names(val)[3] <- "spClass"
val[[3]] <- match(attr(object, "function"), c("corSpher", "corExp", "corGaus", "corLin", "corRatio"), 0)
val
}
environment(Dim.corHaversine) <- asNamespace("nlme")
## getCovariate method for corHaversine class
getCovariate.corHaversine <- function(object, form = formula(object), data) {
if (is.null(covar <- attr(object, "covariate"))) { # if object lacks covariate attribute
if (missing(data)) { # if object lacks data
stop("need data to calculate covariate")
}
covForm <- getCovariateFormula(form)
if (length(all.vars(covForm)) > 0) { # if covariate present
if (attr(terms(covForm), "intercept") == 1) { # if formula includes intercept
covForm <- eval(parse(text = paste("~", deparse(covForm[[2]]),"-1",sep=""))) # remove intercept
}
# can only take covariates with correct names
if (length(all.vars(covForm)) > 2) stop("corHaversine can only take two covariates, 'lon' and 'lat'")
if ( !all(all.vars(covForm) %in% c("lon", "lat")) ) stop("covariates must be named 'lon' and 'lat'")
covar <- as.data.frame(unclass(model.matrix(covForm, model.frame(covForm, data, drop.unused.levels = TRUE) ) ) )
covar <- covar[,order(colnames(covar), decreasing = T)] # order as lon ... lat
}
else {
covar <- NULL
}
if (!is.null(getGroupsFormula(form))) { # if groups in formula extract covar by groups
grps <- getGroups(object, data = data)
if (is.null(covar)) {
covar <- lapply(split(grps, grps), function(x) as.vector(dist(1:length(x) ) ) ) # filler?
}
else {
giveDist <- function(el) {
el <- as.matrix(el)
if (nrow(el) > 1) as.vector(haversineDist(el))
else numeric(0)
}
covar <- lapply(split(covar, grps), giveDist )
}
covar <- covar[sapply(covar, length) > 0] # no 1-obs groups
}
else { # if no groups in formula extract distance
if (is.null(covar)) {
covar <- as.vector(dist(1:nrow(data) ) )
}
else {
covar <- as.vector(haversineDist(as.matrix(covar) ) )
}
}
if (any(unlist(covar) == 0)) { # check that no distances are zero
stop("cannot have zero distances in \"corHaversine\"")
}
}
covar
} # end method getCovariate
environment(getCovariate.corHaversine) <- asNamespace("nlme")
To test that this runs, given range parameter of 1000:
## test that corHaversine runs with spherical correlation (not testing that it WORKS ...)
library(MASS)
set.seed(1001)
sample_data <- data.frame(lon = -121:-22, lat = -50:49)
ran <- 1000 # 'range' parameter for spherical correlation
dist_matrix <- as.matrix(haversineDist(sample_data)) # haversine distance matrix
# set up correlation matrix of response
corr_matrix <- 1-1.5*(dist_matrix/ran)+0.5*(dist_matrix/ran)^3
corr_matrix[dist_matrix > ran] = 0
diag(corr_matrix) <- 1
# set up covariance matrix of response
sigma <- 2 # residual standard deviation
cov_matrix <- (diag(100)*sigma) %*% corr_matrix %*% (diag(100)*sigma) # correlated response
# generate response
sample_data$y <- mvrnorm(1, mu = rep(0, 100), Sigma = cov_matrix)
# fit model
gls_haversine <- gls(y ~ 1, correlation = corHaversine(form=~lon+lat, mimic="corSpher"), data = sample_data)
summary(gls_haversine)
# Correlation Structure: corHaversine
# Formula: ~lon + lat
# Parameter estimate(s):
# range
# 1426.818
#
# Coefficients:
# Value Std.Error t-value p-value
# (Intercept) 0.9397666 0.7471089 1.257871 0.2114
#
# Standardized residuals:
# Min Q1 Med Q3 Max
# -2.1467696 -0.4140958 0.1376988 0.5484481 1.9240042
#
# Residual standard error: 2.735971
# Degrees of freedom: 100 total; 99 residual
Testing that it runs with Gaussian correlation, with range parameter = 100:
## test that corHaversine runs with Gaussian correlation
ran = 100 # parameter for Gaussian correlation
corr_matrix_gauss <- exp(-(dist_matrix/ran)^2)
diag(corr_matrix_gauss) <- 1
# set up covariance matrix of response
cov_matrix_gauss <- (diag(100)*sigma) %*% corr_matrix_gauss %*% (diag(100)*sigma) # correlated response
# generate response
sample_data$y_gauss <- mvrnorm(1, mu = rep(0, 100), Sigma = cov_matrix_gauss)
# fit model
gls_haversine_gauss <- gls(y_gauss ~ 1, correlation = corHaversine(form=~lon+lat, mimic = "corGaus"), data = sample_data)
summary(gls_haversine_gauss)
With lme:
## runs with lme
# set up data with group effects
group_y <- as.vector(sapply(1:5, function(.) mvrnorm(1, mu = rep(0, 100), Sigma = cov_matrix_gauss)))
group_effect <- rep(-2:2, each = 100)
group_y = group_y + group_effect
group_name <- factor(group_effect)
lme_dat <- data.frame(y = group_y, group = group_name, lon = sample_data$lon, lat = sample_data$lat)
# fit model
lme_haversine <- lme(y ~ 1, random = ~ 1|group, correlation = corHaversine(form=~lon+lat, mimic = "corGaus"), data = lme_dat, control=lmeControl(opt = "optim") )
summary(lme_haversine)
# Correlation Structure: corHaversine
# Formula: ~lon + lat | group
# Parameter estimate(s):
# range
# 106.3482
# Fixed effects: y ~ 1
# Value Std.Error DF t-value p-value
# (Intercept) -0.0161861 0.6861328 495 -0.02359033 0.9812
#
# Standardized Within-Group Residuals:
# Min Q1 Med Q3 Max
# -3.0393708 -0.6469423 0.0348155 0.7132133 2.5921573
#
# Number of Observations: 500
# Number of Groups: 5
See if this answer on R-Help is useful: http://markmail.org/search/?q=list%3Aorg.r-project.r-help+winsemius+haversine#query:list%3Aorg.r-project.r-help%20winsemius%20haversine+page:1+mid:ugecbw3jjwphu2pb+state:results
I just checked and and doesn't appear that the ramps or nlme packages have been modified to incorporate those changes suggested by Malcolm Fairbrother, so you will need to do some hacking. I don't want to be considered for the bounty since I am not posting a tested solution and I didn't dream it up either.