library(GLMsData)
data(fluoro)
attach(fluoro)
lambda <- seq(-1, 1, 0.2)
SS <- cbind()
FV <- cbind()
lm.out <- list()
for (i in 1:length(lambda)) {
if (lambda[i] != 0) {
y <- (Dose^lambda[i] - 1)/lambda[i]
} else {
y <- log(Dose)
}
# Fit models for each value of lambda.
lm.out[[i]] <- lm(y ~ Time, data=fluoro, na.action=na.exclude)
SS <- sapply(lm.out, rstandard)
# extracting standardized residuals and fitted values
FV <- sapply(lm.out, fitted)
#layout_matrix_1 <- matrix(1:12, ncol = 3) # Define position matrix
#layout(layout_matrix_1) #, widths=1:2, heights=1:2)
scatter.smooth(
SS[, i] ~ FV[, i], col="grey",
las=1, ylab="Standardized residuals", xlab="Fitted values",
main=bquote("Plot of residuals versus fitted values for"~
lambda == ~ .(lambda[i]))) +
facet_wrap(facets = vars(lambda)) # plotting the residuals plots
}
I've been able to plot residuals vs fitted values for each value of lambda. I want my residual plots all on a single page.
Since you're using scatter.smooth from the stats package which comes with base R, ggplot2::facet_wrap is the wrong function, because it's based on "grinds" whereas base isn't. You want to set the mfrow=c(<nrow>, <ncol>) in the graphical parameters.
I noticed you already started with layout() which is also possible.
library(GLMsData)
data(fluoro)
lambda <- seq(-1, 1, 0.2)
SS <- cbind()
FV <- cbind()
op <- par(mfrow=c(4, 3)) ## set new pars, backup old into `op`
lm.out <- list()
for (i in 1:length(lambda)) {
if (lambda[i] != 0) {
y <- with(fluoro, (Dose^lambda[i] - 1)/lambda[i])
} else {
y <- with(fluoro, log(Dose))
}
# Fit models for each value of lambda.
lm.out[[i]] <- lm(y ~ Time, data=fluoro, na.action=na.exclude)
SS <- sapply(lm.out, rstandard)
# extracting standardized residuals and fitted values
FV <- sapply(lm.out, fitted)
scatter.smooth(
SS[, i] ~ FV[, i], col="grey",
las=1, ylab="Standardized residuals", xlab="Fitted values",
main=bquote("Plot of residuals versus fitted values for"~
lambda == ~ .(lambda[i])))
}
par(op) ## restore old pars
Note: In case you get an Error in plot.new() : figure margins too large error, you need to expand the plot pane in RStudio; a much better option, however, is to save the plot to disk. Moreover, I used with() instead of attach() because the ladder is considered bad practice, read this answers, why.
Related
For my thesis I have to fit some glm models with MLEs that R doesn't have, I was going ok for the models with close form but now I have to use de Gausian CDF, so i decide to fit a simple probit model.
this is the code:
Data:
set.seed(123)
x <-matrix( rnorm(50,2,4),50,1)
m <- matrix(runif(50,2,4),50,1)
t <- matrix(rpois(50,0.5),50,1)
z <- (1+exp(-((x-mean(x)/sd(x)))))^-1 + runif(50)
y <- ifelse(z < 1.186228, 0, 1)
data1 <- as.data.frame(cbind(y,x,m,t))
myprobit <- function (formula, data)
{
mf <- model.frame(formula, data)
y <- model.response(mf, "numeric")
X <- model.matrix(formula, data = data)
if (any(is.na(cbind(y, X))))
stop("Some data are missing.")
loglik <- function(betas, X, y, sigma) { #loglikelihood
p <- length(betas)
beta <- betas[-p]
eta <- X %*% beta
sigma <- 1 #because of identification, sigma must be equal to 1
G <- pnorm(y, mean = eta,sd=sigma)
sum( y*log(G) + (1-y)*log(1-G))
}
ls.reg <- lm(y ~ X - 1)#starting values using ols, indicating that this model already has a constant
start <- coef(ls.reg)
fit <- optim(start, loglik, X = X, y = y, control = list(fnscale = -1), method = "BFGS", hessian = TRUE) #optimizar
if (fit$convergence > 0) {
print(fit)
stop("optim failed to converge!") #verify convergence
}
return(fit)
}
myprobit(y ~ x + m + t,data = data1)
And i get: Error in X %*% beta : non-conformable arguments, if i change start <- coef(ls.reg) with start <- c(coef(ls.reg), 1) i get wrong stimatives comparing with:
probit <- glm(y ~ x + m + t,data = data1 , family = binomial(link = "probit"))
What am I doing wrong?
Is possible to correctly fit this model using pnorm, if no, what algorithm should I use to approximate de gausian CDF. Thanks!!
The line of code responsible for your error is the following:
eta <- X %*% beta
Note that "%*%" is the matrix multiplication operator. By reproducing your code I noticed that X is a matrix with 50 rows and 4 columns. Hence, for matrix multiplication to be possible your "beta" needs to have 4 rows. But when you run "betas[-p]" you subset the betas vector by removing its last element, leaving only three elements instead of the four you need for matrix multiplication to be defined. If you remove [-p] the code will work.
I have an array of outputs from hundreds of segmented linear models (made using the segmented package in R). I want to be able to use these outputs on new data, using the predict function. To be clear, I do not have the segmented linear model objects in my workspace; I just saved and reimported the relevant outputs (e.g. the coefficients and breakpoints). For this reason I can't simply use the predict.segmented function from the segmented package.
Below is a toy example based on this link that seems promising, but does not match the output of the predict.segmented function.
library(segmented)
set.seed(12)
xx <- 1:100
zz <- runif(100)
yy <- 2 + 1.5*pmax(xx-35,0) - 1.5*pmax(xx-70,0) +
15*pmax(zz-0.5,0) + rnorm(100,0,2)
dati <- data.frame(x=xx,y=yy,z=zz)
out.lm<-lm(y~x,data=dati)
o<-## S3 method for class 'lm':
segmented(out.lm,seg.Z=~x,psi=list(x=c(30,60)),
control=seg.control(display=FALSE))
# Note that coefficients with U in the name are differences in slopes, not slopes.
# Compare:
slope(o)
coef(o)[2] + coef(o)[3]
coef(o)[2] + coef(o)[3] + coef(o)[4]
# prediction
pred <- data.frame(x = 1:100)
pred$dummy1 <- pmax(pred$x - o$psi[1,2], 0)
pred$dummy2 <- pmax(pred$x - o$psi[2,2], 0)
pred$dummy3 <- I(pred$x > o$psi[1,2]) * (coef(o)[2] + coef(o)[3])
pred$dummy4 <- I(pred$x > o$psi[2,2]) * (coef(o)[2] + coef(o)[3] + coef(o)[4])
names(pred)[-1]<- names(model.frame(o))[-c(1,2)]
# compute the prediction, using standard predict function
# computing confidence intervals further
# suppose that the breakpoints are fixed
pred <- data.frame(pred, predict(o, newdata= pred,
interval="confidence"))
# Try prediction using the predict.segment version to compare
test <- predict.segmented(o)
plot(pred$fit, test, ylim = c(0, 100))
abline(0,1, col = "red")
# At least one segment not being predicted correctly?
Can I use the base r predict() function (not the segmented.predict() function) with the coefficients and break points saved from segmented linear models?
UPDATE
I figured out that the code above has issues (don't use it). Through some reverse engineering of the segmented.predict() function, I produced the design matrix and use that to predict values instead of directly using the predict() function. I do not consider this a full answer of the original question yet because predict() can also produce confidence intervals for the prediction, and I have not yet implemented that--question still open for someone to add confidence intervals.
library(segmented)
## Define function for making matrix of dummy variables (this is based on code from predict.segmented())
dummy.matrix <- function(x.values, x_names, psi.est = TRUE, nameU, nameV, diffSlope, est.psi) {
# This function creates a model matrix with dummy variables for a segmented lm with two breakpoints.
# Inputs:
# x.values: the x values of the segmented lm
# x_names: the name of the column of x values
# psi.est: this is legacy from the predict.segmented function, leave it set to 'TRUE'
# obj: the segmented lm object
# nameU: names (class character) of 3rd and 4th coef, which are "U1.x" "U2.x" for lm with two breaks. Example: names(c(obj$coef[3], obj$coef[4]))
# nameV: names (class character) of 5th and 6th coef, which are "psi1.x" "psi2.x" for lm with two breaks. Example: names(c(obj$coef[5], obj$coef[6]))
# diffSlope: the coefficients (class numeric) with the slope differences; called U1.x and U2.x for lm with two breaks. Example: c(o$coef[3], o$coef[4])
# est.psi: the estimated break points (class numeric); these are the estimated breakpoints from segmented.lm. Example: c(obj$psi[1,2], obj$psi[2,2])
#
n <- length(x.values)
k <- length(est.psi)
PSI <- matrix(rep(est.psi, rep(n, k)), ncol = k)
newZ <- matrix(x.values, nrow = n, ncol = k, byrow = FALSE)
dummy1 <- pmax(newZ - PSI, 0)
if (psi.est) {
V <- ifelse(newZ > PSI, -1, 0)
dummy2 <- if (k == 1)
V * diffSlope
else V %*% diag(diffSlope)
newd <- cbind(x.values, dummy1, dummy2)
colnames(newd) <- c(x_names, nameU, nameV)
} else {
newd <- cbind(x.values, dummy1)
colnames(newd) <- c(x_names, nameU)
}
# if (!x_names %in% names(coef(obj.seg)))
# newd <- newd[, -1, drop = FALSE]
return(newd)
}
## Test dummy matrix function----------------------------------------------
set.seed(12)
xx<-1:100
zz<-runif(100)
yy<-2+1.5*pmax(xx-35,0)-1.5*pmax(xx-70,0)+15*pmax(zz-.5,0)+rnorm(100,0,2)
dati<-data.frame(x=xx,y=yy,z=zz)
out.lm<-lm(y~x,data=dati)
#1 segmented variable, 2 breakpoints: you have to specify starting values (vector) for psi:
o<-segmented(out.lm,seg.Z=~x,psi=c(30,60),
control=seg.control(display=FALSE))
slope(o)
plot.segmented(o)
summary(o)
# Test dummy matrix fn with the same dataset
newdata <- dati
nameU1 <- c("U1.x", "U2.x")
nameV1 <- c("psi1.x", "psi2.x")
diffSlope1 <- c(o$coef[3], o$coef[4])
est.psi1 <- c(o$psi[1,2], o$psi[2,2])
test <- dummy.matrix(x.values = newdata$x, x_names = "x", psi.est = TRUE,
nameU = nameU1, nameV = nameV1, diffSlope = diffSlope1, est.psi = est.psi1)
# Predict response variable using matrix multiplication
col1 <- matrix(1, nrow = dim(test)[1])
test <- cbind(col1, test) # Now test is the same as model.matrix(o)
predY <- coef(o) %*% t(test)
plot(predY[1,])
lines(predict.segmented(o), col = "blue") # good, predict.segmented gives same answer
I am puzzled by a simple question in R JAGS. I have for example, 10 parameters: d[1], d[2], ..., d[10]. It is intuitive from the data that they should be increasing. So I want to put a constraint on them.
Here is what I tried to do but it give error messages saying "Node inconsistent with parents":
model{
...
for (j in 1:10){
d.star[j]~dnorm(0,0.0001)
}
d=sort(d.star)
}
Then I tried this:
d[1]~dnorm(0,0.0001)
for (j in 2:10){
d[j]~dnorm(0,0.0001)I(d[j-1],)
}
This worked, but I don't know if this is the correct way to do it. Could you share your thoughts?
Thanks!
If you are ever uncertain about something like this, it is best to just simulate some data to determine if the model structure you suggest works (spoiler alert: it does).
Here is the model that I used:
cat('model{
d[1] ~ dnorm(0, 0.0001) # intercept
d[2] ~ dnorm(0, 0.0001)
for(j in 3:11){
d[j] ~ dnorm(0, 0.0001) I(d[j-1],)
}
for(i in 1:200){
y[i] ~ dnorm(mu[i], tau)
mu[i] <- inprod(d, x[i,])
}
tau ~ dgamma(0.01,0.01)
}',
file = "model_example.R")```
And here are the data I simulated to use with this model.
library(run.jags)
library(mcmcplots)
# intercept with sorted betas
set.seed(161)
betas <- c(1,sort(runif(10, -5,5)))
# make covariates, 1 for intercept
x <- cbind(1,matrix(rnorm(2000), nrow = 200, ncol = 10))
# deterministic part of model
y_det <- x %*% betas
# add noise
y <- rnorm(length(y_det), y_det, 1)
data_list <- list(y = as.numeric(y), x = x)
# fit the model
mout <- run.jags('model_example.R',monitor = c("d", "tau"), data = data_list)
Following this, we can plot out the estimates and overlay the true parameter values
caterplot(mout, "d", reorder = FALSE)
points(rev(c(1:11)) ~ betas, pch = 18,cex = 0.9)
The black points are the true parameter values, the blue points and lines are the estimates. Looks like this set up does fine so long as there are enough data to estimate all of those parameters.
It looks like there is an syntax error in the first implementation. Just try:
model{
...
for (j in 1:10){
d.star[j]~dnorm(0,0.0001)
}
d[1:10] <- sort(d.star) # notice d is indexed.
}
and compare the results with those of the second implementation. According to the documentation, these are both correct, but it is advised to use the function sort.
I'm trying to apply a lasso regression for my data. I'm using lars package for R. Using coef function, I get coefficients of lasso model and using them, I plot this model. But this model is always wrong by a constant (blue color).
FX <- cbind(1, X, X^2, X^3, X^4, X^5,X^6, X^7)
lasso <- lars(FX, Y, type='lasso')
alpha <- coef(lasso, s=1, mode='lambda')
tr_x <- (1:100)/100
y0 <- getFuncByParam(tr_x, alpha)
lines(x,y0, col='blue', lwd=2)
But when I use predict function, I'm getting correct model (pink color)
Ftest <- cbind(1, tr_x , tr_x^2, tr_x^3, tr_x^4, tr_x^5, tr_x^6, tr_x^7)
y0 <- predict(lasso, Ftest, 1, type='fit', mode='lambda')
UPD
getFuncByParam <- function(x, a){
n <- length(a)
res <- 0
for(i in 1 : n ){
res <- res + a[i]*x^(i - 1)
}
return(res)
}
I'm trying to estimate a linear model with a log-normal distributed error term. I already have working code for a linear model with normally distributed errors:
library(Ecdat)
library(assertthat)
library(maxLik)
# Load the data
data(Wages1)
# Check what R says
summary(lm(wage ~ school + exper + sex, data = Wages1))
# Use maxLik from package maxLik
# The likelihood function
my_log_lik_pos <- function(theta, data){
y <- data[, 1]
x <- data[, -1]
beta <- head(theta, -1)
sigma <- tail(theta, 1)
xb <- x%*%beta
are_equal(dim(xb), c(nrow(my_data), 1))
return(sum(log(dnorm(y, mean = xb, sd = sigma))))
}
# Bind the data
my_data <- cbind(Wages1$wage, 1, Wages1$school, Wages1$exper, Wages1$sex)
my_problem <- maxLik(my_log_lik_pos, data = my_data,
start = rep(1,5), method = "BFGS")
summary(my_problem)
I get approximately the same results. Now I try to do the same, but using the log-normal likelihood. For this, I have to first simulate some data:
true_beta <- c(0.1, 0.2, 0.3, 0.4, 0.5)
ys <- my_data[, -1] %*% head(true_beta, -1) +
rlnorm(nrow(my_data), 0, tail(true_beta, 1))
my_data_2 <- cbind(ys, my_data[, -1])
And the log-likelihood function:
my_log_lik_lognorm <- function(theta, data){
y <- data[, 1]
x <- data[, -1]
beta <- head(theta, -1)
sigma <- tail(theta, 1)
xb <- x%*%beta
are_equal(dim(xb), c(nrow(data), 1))
return(sum(log(dlnorm(y, mean = xb, sd = sigma))))
}
my_problem2 <- maxLik(my_log_lik_lognorm, data = my_data_2,
start = rep(0.2,5), method = "BFGS")
summary(my_problem2)
The estimated parameters should be around the values of true_beta, but for some reason I find completely different values. I tried with different methods, different starting values but to no avail. I'm sure that I'm missing something obvious, but I don't see what.
Am I right to assume that the log-likelihood of the log-normal distribution is:
sum(log(dlnorm(y, mean = .., sd = ...))
Unless I'm mistaken, this is the definition of the log-likelihood (sum of the logs of the densities).
I found the issue: it seems the problem is not my log-likelihood function. When I try to estimate the model with glm:
summary(glm(ys ~ school + exper + sex, family=gaussian(link="log"), data=Wages1))
I get the same result as with maxLik and my log-likelihood. It would seem the problem comes from when I tried to simulate some data:
ys <- my_data[, -1] %*% head(true_beta, -1) +
rlnorm(nrow(my_data), 0, tail(true_beta, 1))
The correct way to simulate the data:
ys <- rlnorm(nrow(my_data), my_data[, -1] %*% head(true_beta, -1), tail(true_beta, 1))
Now everything works!