How could I solve Dimension mismatch in Jags model.? - r

I'm super new in bayesian analysis and I'm trying to practice with an example for Classic Capture-recapture models: Mh2
This is my code
nind <- dim(venados)[1]
K <- 43
ntraps <- 13
M <- 150
nz <- M - nind
Yaug <- array(0, dim = c(M, ntraps, K))
Yaug[1:nind,,] <- venados
y <- apply(Yaug, c(1,3), sum)
y[y > 1] <- 1
Bundle data
data1 <- list(y = y, nz = nz, nind = nind, K = K, sup = Buffer)
# Model JAGS
sink("Mh2_jags.txt")
cat("
model{
# Priors
p0 ~ dunif(0,1)
mup <- log(p0/(1-p0))
sigmap ~ dunif(0,10)
taup <- 1/(sigmap*sigmap)
psi ~ dunif(0,1)
# Likelihood
for (i in 1:(nind+nz)) {
z[i] ~ dbern(psi)
lp[i] ~ dnorm(mup,taup)
logit(p[i]) <- lp[i]
y[i] ~ dbin(mu[i],K)
} # i
N <- sum(z[1:(nind+nz)])
D <- N/sup*100
} # modelo
",fill = TRUE)
sink()
# Inicial values
inits <- function(){list(z = as.numeric(y >= 1), psi = 0.6, p0 = runif(1), sigmap = runif(1, 0.7, 1.2), lp = rnorm(M, -0.2))}
params1 <- c("p0","sigmap","psi","N","D")
# MCMC
ni <- 10000; nt <- 1; nb <- 1000; nc <- 3
# JAGS and posteriors
fM2 <- jags(data1, inits, params1, "Mh2_jags.txt", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb)
I received this error message
Processing function input.......
Done.
Compiling model graph
Resolving undeclared variables
Deleting model
Error in jags.model(file = model.file, data = data, inits = inits, n.chains = n.chains, :
RUNTIME ERROR:
Compilation error on line 16.
Dimension mismatch in subset expression of y
I have read that some letters as s and n have to be changed. However,
I do not know what to do. Please if you could give an advice.
Thank you very much

The issue is because y is two dimensional but the model assumes it is one dimensional. If you are assuming that the secondary surveys are i.i.d. Bernoulli trials (and each session had K trials)n then you would just need to take the sum of the rows of the y matrix. Assuming this is the case then you just need to modify a couple lines at the top of this script.
nind <- dim(venados)[1]
K <- 43
ntraps <- 13
M <- 150
nz <- M - nind
Yaug <- array(0, dim = c(M, ntraps, K))
Yaug[1:nind,,] <- venados
y <- apply(Yaug, c(1,3), sum)
y[y > 1] <- 1
# Take the rowSum
y_vector <- rowSums(y)
# Use y_vector instead of y
data1 <- list(y = y_vector, nz = nz, nind = nind, K = K, sup = Buffer)
Conversely, if you wanted to include covariates for the observational process (and those covariates vary by survey) you would use the matrix y and modify the model.
sink("Mh2_jags_Kloop.txt")
cat("
model{
# Priors
p0 ~ dunif(0,1)
mup <- log(p0/(1-p0))
sigmap ~ dunif(0,10)
taup <- 1/(sigmap*sigmap)
psi ~ dunif(0,1)
# Likelihood
for (i in 1:(nind+nz)) {
z[i] ~ dbern(psi)
lp[i] ~ dnorm(mup,taup)
logit(p[i]) <- lp[i]
# Loop over K surveys
for(j in 1:K){
y[i,j] ~ dbern(p[i]*z[i])
}
} # i
N <- sum(z[1:(nind+nz)])
D <- N/sup*100
} # modelo
",fill = TRUE)
sink()
Finally, you don't specify what mu is within the model. I think you want it to be p, but you also need to link the latent state model to the observational state model (if z=0 then that individual cannot be sampled. In this case you would interpret psi as the probability that nind+nz individuals are at your site.
# Model JAGS
sink("Mh2_jags.txt")
cat("
model{
# Priors
p0 ~ dunif(0,1)
mup <- log(p0/(1-p0))
sigmap ~ dunif(0,10)
taup <- 1/(sigmap*sigmap)
psi ~ dunif(0,1)
# Likelihood
for (i in 1:(nind+nz)) {
z[i] ~ dbern(psi)
lp[i] ~ dnorm(mup,taup)
logit(p[i]) <- lp[i]
y[i] ~ dbin(p[i] * z[i],K)
} # i
N <- sum(z[1:(nind+nz)])
D <- N/sup*100
} # modelo
",fill = TRUE)
sink()

Related

Bayesian in R: Dimension mismatch in values supplied for betaA

I am working with a matrix that is 35 rows and 16 columns. I am trying to run a Bayesian Multistate Model but something in my model code prevents it from working. When I run the code in R, I get the error message:
`
Error in checkForRemoteErrors(val) :
3 nodes produced errors; first error: RUNTIME ERROR:
Dimension mismatch in values supplied for betaA
`
Any help is appreciated and my code is below:
# psi = movement probability
# phi = apparent survival
# p = detection probability
# o = occurrence probability
# load libraries
library(jagsUI)
library(lattice)
library(coda)
library("R2WinBUGS")
library("R2jags")
library(zoo)
devtools::install_github("bstaton1/postpack")`
# initializing functions####
known.state.ms <- function(ms, notseen){
#notseen: label for 'not seen'
state <- ms
state[state==notseen] <- NA
for (i in 1:dim(ms)[1]){
m <- min(which(!is.na(state[i,])))
state[i,m] <- NA
}
return(state)
`}`
#i = 1
#ch = CHY[i,]
#first = f[i]`
z_inits = function(ch, first) {
nt = length(ch)
to_fill = which(ch == 4 & 1:nt >= first)
to_keep = which(ch != 4 & 1:nt >= first)
known = ch; known[to_fill] = NA
unknown = rep(NA, nt)
known_alive = rep(NA, nt)
unknown[to_fill] = 2
for (t in 1:nt) {
known_alive[t] = ifelse(any(!is.na(known[t:nt])), 1, 0)
}
last_known_alive = max(which(known_alive == 1))
if (last_known_alive < 16) {
dead = rep(0, nt)
for (t in (last_known_alive + 1):nt) {
dead[t] = sample(c(0,1), size = 1, prob = matrix(c(0.9, 0.1, 0, 1), 2,
2, byrow = T)[dead[t-1] + 1,])
}
unknown[dead == 1] = 4
}
unknown
}
`
# import data
dat <- read.csv("bass_encounter_history_0.csv")
covs <- read.csv("depth.csv")
depth = covs[,1]
histories <- unlist(lapply(dat$history, function(x) strsplit(x,split="")))
CH <- t(matrix(histories,nrow=16,ncol=35))
CH <- gsub("0",4,CH)
CH <- gsub("A",1,CH)
CH <- gsub("B",2,CH)
CH <- gsub("C",3,CH)
CH <- matrix(as.numeric(CH),nrow=35,ncol=16)
# Built the model####
nind= nrow(CH)
n.occasions = ncol(CH)
f=c(1,1,1,2,2,2,2,2,3,3,3,4,4,4,4,4,4,1,1,1,3,1,1,1,3,3,3,3,5,3,5,6,6,6,6) # initial tagging week
jags_model = function() {
# -------------------------------------------------
# Parameters:
# phiA: survival probability at site A
# phiB: survival probability at site B
# phiC: survival probability at site C
# psiA[1,t]: probability of staying in site A
# psiA[2,t]: movement probability from site A to site B
# psiB[1,t]: movement probability from site B to site A
# psiB[2,t]: probability of staying in site B
# psiB[3,t]: movement probability from site B to site C
# psiC[1,t]: probability of staying in site C
# psiC[2,t]: movement probability from site C to site B
# betaA[i]: the effect of standardized flow on movement probabilities at site A
# betaB[i]: the effect of standardized flow on movement probabilities at site B
# betaC[i]: the effect of standardized flow on movement probabilities at site C
# wA,B,C: the variable weight of the betas, 1 = essential, 0 = insignificant
# pA: recapture probability at site A
# pB: recapture probability at site B
# pC: recapture probability at site C
# -------------------------------------------------
# States (S):
# 1 alive at A
# 2 alive at B
# 3 alive at C
# 4 dead
# Observations (O):
# 1 seen at A
# 2 seen at B
# 3 seen at C
# 4 not seen
# -------------------------------------------------
# Priors and constraints
# Survival and recapture: uniform
phiA ~ dunif(0, 1)
phiB ~ dunif(0, 1)
phiC ~ dunif(0, 1)
pA ~ dunif(0, 1)
pB ~ dunif(0, 1)
pC ~ dunif(0, 1)
wA ~ dbern(.5)
for(i in 1:3){
wB[i] ~ dbern(.5)
}
wC ~ dbern(.5)
for(t in 1:(n.occasions-1)){
logit(psiA[1,t]) <- muA + wA*betaA*x[t]
psiA[2,t] <- 1 - psiA[1,t]
logit(psiC[1,t]) <- muC + wC*betaC*x[t]
psiC[2,t] <- 1 - psiC[1,t]
for(i in 1:3){
b[i,t] <- exp(muB[i] + wB[i]*betaB[i]*x[t])
psiB[i,t] <- b[i,t]/sum(b[,t])
}
}
muA ~ dt(0, 1/1.566^2, 7.763)
muC ~ dt(0, 1/1.566^2, 7.763)
mean.psiA <- 1/(1+exp(-muA))
#it's not really the mean - it's the probability of staying in A at mean value of x (only b/c x is z- transformed)
mean.psiC <- 1/(1+exp(-muC))
betaA ~ dt(0, 1/1.566^2, 7.763)
betaC ~ dt(0, 1/1.566^2, 7.763)
for(i in 1:2){
muB[i] ~ dt(0, 1/1.566^2, 7.763)
betaB[i] ~ dt(0, 1/1.566^2, 7.763)
}
muB[3] <- 0
betaB[3] <- 0
# PREDICTED TRANSITION PROBS FOR PLOTTING
for(r in 1:n.depth){
for(i in 1:3){
pred.b[i,r] <- exp(muB[i] + wB[i]*betaB[i]*depthseq[r])
pred.psiB[i,r] <- pred.b[i,r]/sum(pred.b[,r])
}
logit(pred.psiA[1,r]) <- muA + wA*betaA*depthseq[r]
pred.psiA[2,r] <- 1 - pred.psiA[1,r]
logit(pred.psiC[1,r]) <- muC + wC*betaC*depthseq[r]
pred.psiC[2,r] <- 1 - pred.psiC[1,r]
}
# Define probabilities of state S(t+1) given S(t)
for (t in 1:(n.occasions-1)){
ps[1,t,1] <- phiA * psiA[1,t]
ps[1,t,2] <- phiA * psiA[2,t]
ps[1,t,3] <- 0
ps[1,t,4] <- 1-phiA
ps[2,t,1] <- phiB * psiB[1,t]
ps[2,t,2] <- phiB * psiB[2,t]
ps[2,t,3] <- phiB * psiB[3,t]
ps[2,t,4] <- 1-phiB
ps[3,t,1] <- 0
ps[3,t,2] <- phiC * psiC[2,t]
ps[3,t,3] <- phiC * psiC[1,t] # switch these so the coefs talk about prob(stay in C)
ps[3,t,4] <- 1-phiC
ps[4,t,1] <- 0
ps[4,t,2] <- 0
ps[4,t,3] <- 0
ps[4,t,4] <- 1
# Define probabilities of O(t) given S(t)
po[1,t,1] <- pA
po[1,t,2] <- 0
po[1,t,3] <- 0
po[1,t,4] <- 1-pA
po[2,t,1] <- 0
po[2,t,2] <- pB
po[2,t,3] <- 0
po[2,t,4] <- 1-pB
po[3,t,1] <- 0
po[3,t,2] <- 0
po[3,t,3] <- pC
po[3,t,4] <- 1-pC
po[4,t,1] <- 0
po[4,t,2] <- 0
po[4,t,3] <- 0
po[4,t,4] <- 1
} #t
# Likelihood
for (i in 1:nind){
# Define latent state at first capture
z[i,f[i]] <- y[i,f[i]]
for (t in (f[i]+1):n.occasions){
# State process: draw S(t) given S(t-1)
z[i,t] ~ dcat(ps[z[i,t-1], t-1,])
# Observation process: draw O(t) given S(t)
y[i,t] ~ dcat(po[z[i,t], t-1,])
} #t
} #i
}
jags_file = "invasiondepthmodel.txt"
postpack::write_model(jags_model, jags_file)
# Configure the model settings and initial values ####
depthseq = seq(min(depth),max(depth),length.out=100)
n.depth=length(depthseq)
#compile jags data object
jags_data <- list(y = CH, x= depth, depthseq=depthseq, n.depth=n.depth, f = f,
n.occasions = n.occasions, nind = nind, z = known.state.ms(CH, 4))
#specify initial values
jags_inits <- function(i){list(
muA = runif(1,-1,1),
muB = c(runif(2,-1,1),NA),
muC = runif(1,-1,1),
wA= rbinom(3, 1, 0.5),
wB= rbinom(3, 1, 0.5),
wC= rbinom(3, 1, 0.5),
betaA = runif(2,-1,1),
betaB = c(runif(2,-1,1),NA),
betaC = runif(2,-1,1),
phiA = runif(1, 0.5, 1),
phiB = runif(1, 0.5, 1),
phiC = runif(1, 0.5, 1),
pA = runif(1, 0.5, 1),
pB = runif(1, 0.5, 1),
pC = runif(1, 0.5, 1),
z = t(sapply(1:nind, function(i) z_inits(CH[i,], f[i])))
)
}
# Parameters monitored
jags_params <- c("phiA","phiB","phiC",
"psiA","psiB","psiC",
"wA","wB","wC",
"muA","muB","muC",
"betaA","betaB","betaC",
"pA","pB","pC",
"pred.psiA","pred.psiB","pred.psiC")
jags_dims = c(
na = 10000, # number of samples in adapting phase
ni = 40000, # number of post-burn-in samples per chain
nb = 40000, # number of burn-in samples
nt = 20, # thinning rate
nc = 3, # number of chains,
parallel = T # run chains in parallel?
); with(as.list(jags_dims), ni/nt * nc)
inits = lapply(1:jags_dims["nc"], jags_inits)
# Run the model in JAGS #####
starttime = Sys.time()
cat("MCMC Started: ", format(starttime), "\n")
post = jagsUI::jags.basic(
data = jags_data,
model.file = jags_file,
inits = inits,
parameters.to.save = jags_params,
n.adapt = jags_dims["na"],
n.iter = sum(jags_dims[c("ni", "nb")]),
n.thin = jags_dims["nt"],
n.burnin = jags_dims["nb"],
n.chains = jags_dims["nc"],
parallel = jags_dims["parallel"],
verbose = F
)
I was expecting the dimensions between the matrix and betaA values to match up. However, it seems like they are not.
In the model, betaA is a scalar. In the model code, you have betaA*x[t] and in the prior: betaA ~ dt(0, 1/1.566^2, 7.763) both indicating a single value. However, in the initial values, it is a vector of length 2: betaA = runif(2,-1,1). You either need to define it as a vector in the model or pass a single value in the inits.

How to measure the robustness to outliers of some regression models

I am adding the variances of model coefficients and then returning the means of the sums.
I simply want to check which Regression Method is more robust to the outliers. I will examine many scenarios.
However, my code is giving me that ordinary least square is the best, but this is not expected results since MM estimation and Huber is called robust regression methods.
Am I doing something wrong with my code?
#####################################
rmn <- function(n, mu) {
p <- length(mu)
matrix(rnorm(n*p, mean = mu), ncol = p)
}
#####################################
RI<-function(y,x,a,mu,R=30,t=1000){
x <- as.matrix(x)
dm <- dim(x)
n <- dm[1]
bias1 <- bias2 <- bias3 <- numeric(t)
b1 <- b2<- b3 <- numeric(R)
### Outliers in X ######
for (j in 1:t) {
for (i in 1:R) {
id <- sample(n, a * n)
z <- x
z[id, ] <- rmn(length(id), mu)
b1[i] <- var(coef(lm(y ~., data = data.frame(z))))
b2[i] <- var(coef(rlm(y ~ ., data = data.frame(z), maxit = 2000, method = "MM")))
b3[i] <- var(coef(rlm(y ~ ., data = data.frame(z), psi = psi.huber,maxit = 300)))
}
bias1[j] <- sum(b1); bias2[j] <- sum(b2); bias3[j] <- sum(b3)
}
bias <- cbind("lm" = bias1,"MM-rlm" = bias2, "H-rlm" = bias3)
colMeans(bias)
}
#####################################
p <- 5
n <- 300
x<- matrix(rnorm(n * p), ncol = p)
y<-rnorm(n)
a=0.2
mu <-colMeans(x)+10
#####################################
RI(y,x,a,mu)
#####################################
UPDATE
I changed the idea of measuring Robustness, due to the first provided answer.
I measured robustness by calculating the mean absolute difference between coefficients when data is uncontaminated and when they are contaminated. I introduce outliers first in y and then in x. I still have a problem.
############ R CODE ##############
rmn <- function(n, mu, seed = TRUE) {
 if (seed) set.seed(12345)
 p <- length(mu)
 matrix( rnorm(n * p, mean = mu), ncol = p)
}
##################################
out.cv <- function(y, x, a, mu, R = 500, seed = TRUE) {
 ## y: response variable
 ## x: independent variables
 ## a: percent of outliers
 ## mu: how far should the outliers be. A vector if outliers in x,
 ## or a single number if outliers in y
 ## R: how many times to repeat this process
 x <- as.matrix(x)
 dm <- dim(x)
 n <- dm[1] ; d <- dm[2] + 1
 b1 <- b2<- b3 <- numeric(R)
 be <- coef( lm(y ~., data = as.data.frame(x[,-1]) ) )
####################################
 ### Outliers in Y ######
 if ( length(mu) == 1 ) {
 for (i in 1:R) {
 if (seed) set.seed(12345)
 id <- sample(n, a * n)
 z <- y
 if (seed) set.seed(12345)
 z[id] <- rnorm(id, mu) ## mu has to be a single number here
 ## mean absolute difference between coefficients of clean data
 ## and coefficients with contaminated data
 b1[i] <- mean( abs( coef( lm(z ~., data = as.data.frame(x[,-1])) ) - be) )
 b2[i] <- mean( abs( coef( rlm(z ~ ., data = data.frame(x[,-1]), maxit = 2000, method = "MM") ) - be ) )
 b3[i] <- mean( abs( coef( rlm(z ~ ., data = data.frame(x[,-1]), psi = psi.huber,maxit = 300) ) - be ) )
 }
########################
##### Outliers in X #########
 } else {
 for (i in 1:R) {
 if (seed) set.seed(12345)
 id <- sample(n, a * n)
 z <- x
 z[id, ] <- rmn( length(id), mu, seed ) ## mu must be a vector
 b1[i] <- mean( abs( coef( lm(y ~., data = as.data.frame(z[,-1])) )- be) )
 b2[i] <- mean( abs( coef( rlm(y ~ ., data = data.frame(z[,-1]), maxit = 2000, method = "MM") ) - be ) )
 b3[i] <- mean( abs( coef( rlm(y ~ ., data = data.frame(z[,-1]), psi = psi.huber,maxit = 300) ) - be ) )
 }
 }
 bias1 <- mean(b1) ; bias2 <- mean(b2); bias3 <- mean(b3)
 bias <- c(bias1, bias2, bias3)
 names(bias) <- c("lm", "MM-rlm","Huber-rlm")
 bias
}
################################
p <- 5
n <- 200
##############################
# Independent X and Y ####
#set.seed(12345)
#x<- matrix( rnorm(n * p), ncol = p)
#y<-rnorm(n)
## Related X and Y ####
set.seed(12345)
x <- rmn(n, numeric(p))
ber <- rnorm(p)
m <- x %*% ber
y <- rnorm(n, m, 1)
############################
a <- 0.2 #outliers 10%
mu <- 15 ## outliers in y
out.cv(y, x, a, mu)
###########################
mu <-colMeans(x)+15 ## outliers in x
out.cv(y, x, a, mu)
###################
First of all I do not see that you generate a sample from long tailed distribution. Please, use rt(n, 3) t student with very small df to get such distribution or play with other ones like log-normal. Thus do not use rnorm for sure. I see that you use some injection produce which seems to be overcomplicated.
Another thing is that specification of the MASS::rlm is not as trivial.
In my opinion start with quantreg::rq which is a quantile regression and treat it as a robust benchmark method.
Additionally your sampling procedure looks to not be a valid one. You generating a new observations each iteration which are not know apriori. I would expect bootstrapping on train or test set.

Censoring in rjags - Invalid parent values

I'm having troubles reimplementing a model from winbugs on rjags. I'm getting the Invalid parent values error which is the error you get when censoring was not correctly setup, but I can't see my mistake.
This is the original model on WinBugs:
model {
for(i in 1 : N) {
times[i] ~ dweib(v, lambda[i]) T(censor[i],)
lambda[i] <- exp(beta0 + beta1*type[i])
S[i] <- exp(-lambda[i]*pow(times[i],v));
f[i] <- lambda[i]*v*pow(times[i],v-1)*S[i]
h[i] <- f[i]/S[i]
}
beta0 ~ dnorm(0.0, 0.0001)
beta1 ~ dnorm(0.0, 0.0001)
v ~ dexp(0.001)
median0 <- pow(log(2) * exp(-beta0), 1/v)
median1 <- pow(log(2) * exp(-beta0-beta1), 1/v)
}
Setting up a reproducible example:
type <- as.factor(c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0))
censor <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,882,892,1031,
1033,1306,1335,0,1452,1472,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,381,0,0,0,0,0,0,0,0,0,529,0,
0,0,0,0,0,0,0,0,945,0,0,1180,0,0,1277,1397,1512,1519)
times <-c (17,42,44,48,60,72,74,95,103,108,122,144,167,170,183,185,193,195,197,208,234,235,254,307,315,401,
445,464,484,528,542,567,577,580,795,855,NA,NA,NA,NA,NA,NA,1366,NA,NA,1,63,105,129,182,216,250,262,
301,301,342,354,356,358,380,NA,383,383,388,394,408,460,489,499,524,NA,535,562,675,676,748,748,778,
786,797,NA,955,968,NA,1245,1271,NA,NA,NA,NA)
df <- tibble(type = type, censor = censor, time = times) %>%
mutate(censor_limit = replace(censor, censor == 0, max(times, na.rm = TRUE))) %>%
mutate(is_censored = ifelse(is.na(time), 1, 0)) %>%
mutate(time_init = ifelse(is_censored == 1, censor_limit + 1, NA))
df$censor <- NULL
head(df)
And this is the rjags part:
m <- textConnection("model {
for(i in 1 : N) {
isCensored[i] ~ dinterval(times[i], censorLimit[i])
times[i] ~ dweib(v, lambda[i])
lambda[i] <- exp(beta0 + beta1*type[i])
S[i] <- exp(-lambda[i]*pow(times[i],v));
f[i] <- lambda[i]*v*pow(times[i],v-1)*S[i]
h[i] <- f[i]/S[i]
}
beta0 ~ dnorm(0.0, 0.0001)
beta1 ~ dnorm(0.0, 0.0001)
v ~ dexp(0.001)
# Median survival time
median0 <- pow(log(2) * exp(-beta0), 1/v)
median1 <- pow(log(2) * exp(-beta0-beta1), 1/v)
}")
d <- list(N = nrow(df), times = df$time, type = df$type, isCensored = df$is_censored,
censorLimit = df$censor_limit)
inits1 = function() {
inits = list(v = 1, beta0 = 0, beta1=0, times = df$time_init)
}
mod <- jags.model(m, data = d, inits = inits1, n.chains = 3)
update(mod, 1e3)
mod_sim <- coda.samples(model = mod, variable.names = c("lambda", "median0", "median1"), n.iter = 5e3)
mod_csim <- as.mcmc(do.call(rbind, mod_sim))
Output:
Compiling model graph
Resolving undeclared variables
Allocating nodes
Graph information:
Observed stochastic nodes: 164
Unobserved stochastic nodes: 19
Total graph size: 910
Initializing model
Deleting model
Error in jags.model(m, data = d, inits = inits1, n.chains = 3): Error in node h[35]
Invalid parent values

Plotting newton-raphson/fisher scoring iterations in R

Is there a package in R plotting newton-raphson/fisher scoring iterations when fitting a glm modelel (from the stats package)?
I answered a very similar question yesterday. In your case however, things are a little simpler.
Note that when you call glm, it eventually calls glm.fit (or any other method argument you specify to glm) which computes the solution path in the loop from lines 78 to 170. The current iteration's value of the coefficients is computed on line 97 using a .Call to a C function C_Cdqrls. As a hack, you can extract the current value of the coefficients to the global environment (fit$coefficients), within this loop, by modifying the glm.fit function like so:
glm.fit.new = function (x, y, weights = rep(1, nobs), start = NULL, etastart = NULL,
mustart = NULL, offset = rep(0, nobs), family = gaussian(),
control = list(), intercept = TRUE) {
control <- do.call("glm.control", control)
x <- as.matrix(x)
xnames <- dimnames(x)[[2L]]
ynames <- if (is.matrix(y))
rownames(y)
else names(y)
conv <- FALSE
nobs <- NROW(y)
nvars <- ncol(x)
EMPTY <- nvars == 0
if (is.null(weights))
weights <- rep.int(1, nobs)
if (is.null(offset))
offset <- rep.int(0, nobs)
variance <- family$variance
linkinv <- family$linkinv
if (!is.function(variance) || !is.function(linkinv))
stop("'family' argument seems not to be a valid family object",
call. = FALSE)
dev.resids <- family$dev.resids
aic <- family$aic
mu.eta <- family$mu.eta
unless.null <- function(x, if.null) if (is.null(x))
if.null
else x
valideta <- unless.null(family$valideta, function(eta) TRUE)
validmu <- unless.null(family$validmu, function(mu) TRUE)
if (is.null(mustart)) {
eval(family$initialize)
}
else {
mukeep <- mustart
eval(family$initialize)
mustart <- mukeep
}
if (EMPTY) {
eta <- rep.int(0, nobs) + offset
if (!valideta(eta))
stop("invalid linear predictor values in empty model",
call. = FALSE)
mu <- linkinv(eta)
if (!validmu(mu))
stop("invalid fitted means in empty model", call. = FALSE)
dev <- sum(dev.resids(y, mu, weights))
w <- ((weights * mu.eta(eta)^2)/variance(mu))^0.5
residuals <- (y - mu)/mu.eta(eta)
good <- rep_len(TRUE, length(residuals))
boundary <- conv <- TRUE
coef <- numeric()
iter <- 0L
}
else {
coefold <- NULL
eta <- if (!is.null(etastart))
etastart
else if (!is.null(start))
if (length(start) != nvars)
stop(gettextf("length of 'start' should equal %d and correspond to initial coefs for %s",
nvars, paste(deparse(xnames), collapse = ", ")),
domain = NA)
else {
coefold <- start
offset + as.vector(if (NCOL(x) == 1L)
x * start
else x %*% start)
}
else family$linkfun(mustart)
mu <- linkinv(eta)
if (!(validmu(mu) && valideta(eta)))
stop("cannot find valid starting values: please specify some",
call. = FALSE)
devold <- sum(dev.resids(y, mu, weights))
boundary <- conv <- FALSE
# EDIT: counter to create track of iterations
i <<- 1
for (iter in 1L:control$maxit) {
good <- weights > 0
varmu <- variance(mu)[good]
if (anyNA(varmu))
stop("NAs in V(mu)")
if (any(varmu == 0))
stop("0s in V(mu)")
mu.eta.val <- mu.eta(eta)
if (any(is.na(mu.eta.val[good])))
stop("NAs in d(mu)/d(eta)")
good <- (weights > 0) & (mu.eta.val != 0)
if (all(!good)) {
conv <- FALSE
warning(gettextf("no observations informative at iteration %d",
iter), domain = NA)
break
}
z <- (eta - offset)[good] + (y - mu)[good]/mu.eta.val[good]
w <- sqrt((weights[good] * mu.eta.val[good]^2)/variance(mu)[good])
fit <- .Call(stats:::C_Cdqrls, x[good, , drop = FALSE] *
w, z * w, min(1e-07, control$epsilon/1000), check = FALSE)
#======================================================
# EDIT: assign the coefficients to variables in the global namespace
#======================================================
assign(paste0("iteration_x_", i), fit$coefficients,
envir = .GlobalEnv)
i <<- i + 1 # increase the counter
if (any(!is.finite(fit$coefficients))) {
conv <- FALSE
warning(gettextf("non-finite coefficients at iteration %d",
iter), domain = NA)
break
}
if (nobs < fit$rank)
stop(sprintf(ngettext(nobs, "X matrix has rank %d, but only %d observation",
"X matrix has rank %d, but only %d observations"),
fit$rank, nobs), domain = NA)
start[fit$pivot] <- fit$coefficients
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
dev <- sum(dev.resids(y, mu, weights))
if (control$trace)
cat("Deviance = ", dev, " Iterations - ", iter,
"\n", sep = "")
boundary <- FALSE
if (!is.finite(dev)) {
if (is.null(coefold))
stop("no valid set of coefficients has been found: please supply starting values",
call. = FALSE)
warning("step size truncated due to divergence",
call. = FALSE)
ii <- 1
while (!is.finite(dev)) {
if (ii > control$maxit)
stop("inner loop 1; cannot correct step size",
call. = FALSE)
ii <- ii + 1
start <- (start + coefold)/2
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
dev <- sum(dev.resids(y, mu, weights))
}
boundary <- TRUE
if (control$trace)
cat("Step halved: new deviance = ", dev, "\n",
sep = "")
}
if (!(valideta(eta) && validmu(mu))) {
if (is.null(coefold))
stop("no valid set of coefficients has been found: please supply starting values",
call. = FALSE)
warning("step size truncated: out of bounds",
call. = FALSE)
ii <- 1
while (!(valideta(eta) && validmu(mu))) {
if (ii > control$maxit)
stop("inner loop 2; cannot correct step size",
call. = FALSE)
ii <- ii + 1
start <- (start + coefold)/2
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
}
boundary <- TRUE
dev <- sum(dev.resids(y, mu, weights))
if (control$trace)
cat("Step halved: new deviance = ", dev, "\n",
sep = "")
}
if (abs(dev - devold)/(0.1 + abs(dev)) < control$epsilon) {
conv <- TRUE
coef <- start
break
}
else {
devold <- dev
coef <- coefold <- start
}
}
if (!conv)
warning("glm.fit: algorithm did not converge", call. = FALSE)
if (boundary)
warning("glm.fit: algorithm stopped at boundary value",
call. = FALSE)
eps <- 10 * .Machine$double.eps
if (family$family == "binomial") {
if (any(mu > 1 - eps) || any(mu < eps))
warning("glm.fit: fitted probabilities numerically 0 or 1 occurred",
call. = FALSE)
}
if (family$family == "poisson") {
if (any(mu < eps))
warning("glm.fit: fitted rates numerically 0 occurred",
call. = FALSE)
}
if (fit$rank < nvars)
coef[fit$pivot][seq.int(fit$rank + 1, nvars)] <- NA
xxnames <- xnames[fit$pivot]
residuals <- (y - mu)/mu.eta(eta)
fit$qr <- as.matrix(fit$qr)
nr <- min(sum(good), nvars)
if (nr < nvars) {
Rmat <- diag(nvars)
Rmat[1L:nr, 1L:nvars] <- fit$qr[1L:nr, 1L:nvars]
}
else Rmat <- fit$qr[1L:nvars, 1L:nvars]
Rmat <- as.matrix(Rmat)
Rmat[row(Rmat) > col(Rmat)] <- 0
names(coef) <- xnames
colnames(fit$qr) <- xxnames
dimnames(Rmat) <- list(xxnames, xxnames)
}
names(residuals) <- ynames
names(mu) <- ynames
names(eta) <- ynames
wt <- rep.int(0, nobs)
wt[good] <- w^2
names(wt) <- ynames
names(weights) <- ynames
names(y) <- ynames
if (!EMPTY)
names(fit$effects) <- c(xxnames[seq_len(fit$rank)], rep.int("",
sum(good) - fit$rank))
wtdmu <- if (intercept)
sum(weights * y)/sum(weights)
else linkinv(offset)
nulldev <- sum(dev.resids(y, wtdmu, weights))
n.ok <- nobs - sum(weights == 0)
nulldf <- n.ok - as.integer(intercept)
rank <- if (EMPTY)
0
else fit$rank
resdf <- n.ok - rank
aic.model <- aic(y, n, mu, weights, dev) + 2 * rank
list(coefficients = coef, residuals = residuals, fitted.values = mu,
effects = if (!EMPTY) fit$effects, R = if (!EMPTY) Rmat,
rank = rank, qr = if (!EMPTY) structure(fit[c("qr", "rank",
"qraux", "pivot", "tol")], class = "qr"), family = family,
linear.predictors = eta, deviance = dev, aic = aic.model,
null.deviance = nulldev, iter = iter, weights = wt, prior.weights = weights,
df.residual = resdf, df.null = nulldf, y = y, converged = conv,
boundary = boundary)
}
Note that this is a hack for a couple of reasons:
1. The function C_Cdrqls is not exported by the package stats, and so we have to look for it within namespace:package:stats.
2. This pollutes your global environment with the iteration values via a side-effect of the call to glm.fit.new, creating one vector per iteration. Side-effects are generally frowned upon in functional languages like R. You can probably clean the multiple objects bit up by creating a matrix or a data.frame and assign within that.
However, once you have the iteration values extracted, you can do whatever you want with them, including plotting them.
Here is what a call to glm with the newly defined glm.fit.new method would look like:
counts = c(18,17,15,20,10,20,25,13,12)
outcome = gl(3,1,9)
treatment = gl(3,3)
print(d.AD = data.frame(treatment, outcome, counts))
glm.D93 = glm(counts ~ outcome + treatment, family = poisson(),
control = list(trace = TRUE, epsilon = 1e-16), method = "glm.fit.new")
You can check that the iteration parameter values have indeed been populated in the global environment:
> ls(pattern = "iteration_x_")
[1] "iteration_x_1" "iteration_x_10" "iteration_x_11" "iteration_x_2"
[5] "iteration_x_3" "iteration_x_4" "iteration_x_5" "iteration_x_6"
[9] "iteration_x_7" "iteration_x_8" "iteration_x_9"

How to deal with perfect fit linear model

The data I'm dealing with occasionally has a "perfectly fitting" linear model. For each regression I run, I need to extract the r.squared value which I've been doing with summary(mymodel)$r.squared but this fails in the case of a perfectly fitting model (see below).
df <- data.frame(x = c(1,2,3,4,5), y = c(1,1,1,1,1))
mymodel <- lm(y ~ x, data = df)
summary(mymodel)$r.squared #This raises a warning
0.5294
How can I handle these cases? Basically, I think I want to do something like
If(mymodel is a perfect fit)
rsquared = 1
else
rsquared = summary(mymodel)$r.squared
You can use tryCatch
df <- data.frame(x = c(1,2,3,4,5), y = c(1,1,1,1,1))
mymodel <- lm(y ~ x, data = df)
summary(mymodel)$r.squared #This raises a warning
tryCatch(summary(mymodel)$r.squared, warning = function(w) return(1))
# [1] 1
And with an added conditional to catch specific warnings
df <- data.frame(x = c(1,2,3,4,5), y = c(1,1,1,1,1))
mymodel <- lm(y ~ x, data = df)
summary(mymodel)$r.squared #This raises a warning
f <- function(expr) {
tryCatch(expr,
warning = function(w) {
if (grepl('perfect fit', w))
return(1)
else return(w)
})
}
f(TRUE)
# [1] TRUE
f(sum(1:5))
# [1] 15
f(summary(mymodel)$r.squared)
# [1] 1
f(warning('this is not a fit warning'))
# <simpleWarning in doTryCatch(return(expr), name, parentenv, handler): this is not a fit warning>
If you want to make sure that everything will be working perfect then you can just slightly modify the source code (type summary.lm to see the original code):
df <- data.frame(x = c(1,2,3,4,5), y = c(1,1,1,1,1))
mymodel <- lm(y ~ x, data = df)
This is how i modified it. All is the same as the original summary function apart from the bit at the bottom of the function.
summary2 <- function (object, correlation = FALSE, symbolic.cor = FALSE,
...)
{
z <- object
p <- z$rank
rdf <- z$df.residual
if (p == 0) {
r <- z$residuals
n <- length(r)
w <- z$weights
if (is.null(w)) {
rss <- sum(r^2)
}
else {
rss <- sum(w * r^2)
r <- sqrt(w) * r
}
resvar <- rss/rdf
ans <- z[c("call", "terms", if (!is.null(z$weights)) "weights")]
class(ans) <- "summary.lm"
ans$aliased <- is.na(coef(object))
ans$residuals <- r
ans$df <- c(0L, n, length(ans$aliased))
ans$coefficients <- matrix(NA, 0L, 4L)
dimnames(ans$coefficients) <- list(NULL, c("Estimate",
"Std. Error", "t value", "Pr(>|t|)"))
ans$sigma <- sqrt(resvar)
ans$r.squared <- ans$adj.r.squared <- 0
return(ans)
}
if (is.null(z$terms))
stop("invalid 'lm' object: no 'terms' component")
if (!inherits(object, "lm"))
warning("calling summary.lm(<fake-lm-object>) ...")
Qr <- qr(object)
n <- NROW(Qr$qr)
if (is.na(z$df.residual) || n - p != z$df.residual)
warning("residual degrees of freedom in object suggest this is not an \"lm\" fit")
r <- z$residuals
f <- z$fitted.values
w <- z$weights
if (is.null(w)) {
mss <- if (attr(z$terms, "intercept"))
sum((f - mean(f))^2)
else sum(f^2)
rss <- sum(r^2)
}
else {
mss <- if (attr(z$terms, "intercept")) {
m <- sum(w * f/sum(w))
sum(w * (f - m)^2)
}
else sum(w * f^2)
rss <- sum(w * r^2)
r <- sqrt(w) * r
}
resvar <- rss/rdf
p1 <- 1L:p
R <- chol2inv(Qr$qr[p1, p1, drop = FALSE])
se <- sqrt(diag(R) * resvar)
est <- z$coefficients[Qr$pivot[p1]]
tval <- est/se
ans <- z[c("call", "terms", if (!is.null(z$weights)) "weights")]
ans$residuals <- r
ans$coefficients <- cbind(est, se, tval, 2 * pt(abs(tval),
rdf, lower.tail = FALSE))
dimnames(ans$coefficients) <- list(names(z$coefficients)[Qr$pivot[p1]],
c("Estimate", "Std. Error", "t value", "Pr(>|t|)"))
ans$aliased <- is.na(coef(object))
ans$sigma <- sqrt(resvar)
ans$df <- c(p, rdf, NCOL(Qr$qr))
if (p != attr(z$terms, "intercept")) {
df.int <- if (attr(z$terms, "intercept"))
1L
else 0L
ans$r.squared <- mss/(mss + rss)
ans$adj.r.squared <- 1 - (1 - ans$r.squared) * ((n -
df.int)/rdf)
ans$fstatistic <- c(value = (mss/(p - df.int))/resvar,
numdf = p - df.int, dendf = rdf)
}
else ans$r.squared <- ans$adj.r.squared <- 0
ans$cov.unscaled <- R
dimnames(ans$cov.unscaled) <- dimnames(ans$coefficients)[c(1,
1)]
#below is the only change to the code
#instead of ans$r.squared <- 1 the original code had a warning
if (is.finite(resvar) && resvar < (mean(f)^2 + var(f)) *
1e-30) {
ans$r.squared <- 1 #this is practically the only change in the source code. Originally it had the warning here
}
#moved the above lower in the order of the code so as not to affect the original code
#checked it and seems to be working properly
if (correlation) {
ans$correlation <- (R * resvar)/outer(se, se)
dimnames(ans$correlation) <- dimnames(ans$cov.unscaled)
ans$symbolic.cor <- symbolic.cor
}
if (!is.null(z$na.action))
ans$na.action <- z$na.action
class(ans) <- "summary.lm"
ans
}
Run the new formula and see that it works now without any warnings. No other if or else if conditions are required.
> summary2(mymodel)$r.squared
[1] 1
One option to catch a perfect fit is to determine the residuals: if it is a perfect fit, the sum of residuals will be zero.
x = 1:5
# generate 3 sets of y values, last set is random values
y = matrix(data = c(rep(1,5),1:5,rnorm(5)), nrow = 5)
tolerance = 0.0001
r.sq = array(NA,ncol(y))
# check fit for three sets
for (i in 1:ncol(y)){
fit = lm(y[,i]~x)
# determine sum of residuals
if (sum(abs(resid(fit))) < tolerance) {
# perfect fit case
r.sq[i] = 1 } else {
# non-perfect fit case
r.sq[i] = summary(fit)$r.squared
}
}
print(r.sq)
# [1] 1.0000000 1.0000000 0.7638879

Resources