How to calculate lambda.1se from iterative cross validation? - r

I have the following code to choose a value of lambda based on the lowest resulting mean squared error (MSE) after iterated cross validation.
library(glmnet)
set.seed(3)
IV1 <- data.frame(IV1 = rnorm(100))
IV2 <- data.frame(IV2 = rnorm(100))
IV3 <- data.frame(IV3 = rnorm(100))
IV4 <- data.frame(IV4 = rnorm(100))
IV5 <- data.frame(IV5 = rnorm(100))
DV <- data.frame(DV = rnorm(100))
data <- data.frame(IV1,IV2,IV3,IV4,IV5,DV)
x <- model.matrix(DV~.-IV5 , data)[ , -1]
y <- data$DV
AB <- glmnet(x=x, y=y, alpha=1)
plot(AB,xvar="lambda")
lambdas <- NULL
for (i in 1:100){
fit <- cv.glmnet(x, y)
errors <- data.frame(fit$lambda, fit$cvm)
lambdas <- rbind(lambdas, errors)
}
lambdas <- aggregate(lambdas[ , 2], list(lambdas$fit.lambda), mean)
bestindex <- which(lambdas[2]== min(lambdas[2]))
bestlambda <- lambdas[bestindex,1]
How would I modify this to select lambda.1se (i.e., the largest λ at which the MSE is within one standard error of the minimal MSE)?
Edit:
How about this
lambdas <- NULL #initialize
n.fits <- 100
for (i in 1:n.fits) {
{
fit <- cv.glmnet(x,y)
errors = data.frame(fit$lambda,fit$cvm)
lambdas <- rbind(lambdas,errors)
r2[i]<-max(1-fit$cvm/var(y))
}
# take mean cvm for each lambda
lambdas <- aggregate(lambdas[, 2], list(lambdas$fit.lambda), mean)
lambdas<-as.data.frame(lambdas)
# find subset with mse within 1 se of mean
onese<-std.error(lambdas[2])
min<-min(lambdas[2])
low<-min-onese
high<-min+onese
lambdas<-subset(lambdas, x>low)
lambdas<-subset(lambdas, x<high)
#choose highest lambda among those
bestindex = which(lambdas[1]==max(lambdas[1]))
bestlambda = lambdas[bestindex,1]

If you decide to use cv.glmnet, the following might be what you are looking for. (p.s. I also cleaned up your simulation code a bit; note that I also didn't make use of the AB object from glmnet which is obviously not the same as cv.glmnet)
library(glmnet)
## Simulate data:
set.seed(3)
x <- data.frame(
IV1 = rnorm(100),
IV2 = rnorm(100),
IV3 = rnorm(100),
IV4 = rnorm(100),
IV5 = rnorm(100)
)
x <- as.matrix(x)
y <- rnorm(100) #target or response
## Iteratively fit models
lambdas <- NULL #initialize
n.fits <- 100
for (i in 1:n.fits) {
fit <- cv.glmnet(x, y, family="gaussian")
df <- data.frame(fit$lambda.1se, mean(fit$cvm) ) #can use median for CVM also
lambdas <- rbind(lambdas, df)
}
## Select best lambda:
bestindex <- which.min(lambdas[ , 2]) #the way you had it was way too complicated
bestlambda <- lambdas[bestindex, 1]
bestlambda

Related

How to have output from lm() include std. error and others without using summary() for stargazer

I'm fitting several linear models in r in the following way:
set.seed(12345)
n = 100
x1 = rnorm(n)
x2 = rnorm(n)+0.1
y = x + rnorm(n)
df <- data.frame(x1, x2, y)
x_str <- c("x1", "x1+x2")
regf_lm <- function(df,y_var, x_str ) {
frmla <- formula(paste0(y_var," ~ ", x_str ))
fit <- lm(frmla, data = df )
summary(fit) #fit
}
gbind_lm <- function(vv) {
n <- vv %>% length()
fits <- list()
coefs <- list()
ses <- list()
for (i in 1:n ) {
coefs[[i]] <- vv[[i]]$coefficients[,1]
ses[[i]] <- vv[[i]]$coefficients[,2]
fits[[i]] <- vv[[i]]
}
list("fits" = fits, "coefs" = coefs, "ses" = ses)
}
stargazer_lm <- function(mylist, fname, title_str,m_type = "html",...) {
stargazer(mylist$fits, coef = mylist$coefs,
se = mylist$ses,
type = m_type, title = title_str,
out = paste0("~/projects/outputs",fname), single.row = T ,...)
}
p_2 <- map(x_str,
~ regf_lm (df = df ,
y_var = "y", x_str = .))
m_all <- do.call(c, list(p_2)) %>% gbind_lm()
stargazer_lm(m_all,"name.html","My model", m_type = "html")
In regf_lm, if I use summary(fit) on the last line, I'm able to generate reg output with columns for estimated coefficients, std. error, etc. But Stargazer() does not work with summary(lm()) (returns error $ operator is invalid for atomic vectors). However, if I just use "fit" on the last line in regf_lm, the output shows only the estimated coefficients and not std error, R sq...and gbind_lm() won't work because I cannot extract ses or fit.
Any advice is greatly appreciated.
You can directly export model statistics in tidy format with the package broom
library(broom)
set.seed(12345)
n = 100
x1 = rnorm(n)
x2 = rnorm(n)+0.1
y = x1 + rnorm(n)
df <- data.frame(x1, x2, y)
x_str <- c("x1", "x1+x2")
regf_lm <- function(df,y_var, x_str ) {
frmla <- formula(paste0(y_var," ~ ", x_str ))
fit <- lm(frmla, data = df )
return(list(fit,select(broom::tidy(fit),std.error))) #fit
}
exm_model <- regf_lm(iris,'Sepal.Width','Sepal.Length')
stargazer(exm_model[[1]], coef = exm_model[[2]], title = 'x_model',
out ='abc', single.row = T)
This piece of code worked on my local with no problem, I think you can apply this in your workflow.

How to measure the robustness to outliers of some regression models

I am adding the variances of model coefficients and then returning the means of the sums.
I simply want to check which Regression Method is more robust to the outliers. I will examine many scenarios.
However, my code is giving me that ordinary least square is the best, but this is not expected results since MM estimation and Huber is called robust regression methods.
Am I doing something wrong with my code?
#####################################
rmn <- function(n, mu) {
p <- length(mu)
matrix(rnorm(n*p, mean = mu), ncol = p)
}
#####################################
RI<-function(y,x,a,mu,R=30,t=1000){
x <- as.matrix(x)
dm <- dim(x)
n <- dm[1]
bias1 <- bias2 <- bias3 <- numeric(t)
b1 <- b2<- b3 <- numeric(R)
### Outliers in X ######
for (j in 1:t) {
for (i in 1:R) {
id <- sample(n, a * n)
z <- x
z[id, ] <- rmn(length(id), mu)
b1[i] <- var(coef(lm(y ~., data = data.frame(z))))
b2[i] <- var(coef(rlm(y ~ ., data = data.frame(z), maxit = 2000, method = "MM")))
b3[i] <- var(coef(rlm(y ~ ., data = data.frame(z), psi = psi.huber,maxit = 300)))
}
bias1[j] <- sum(b1); bias2[j] <- sum(b2); bias3[j] <- sum(b3)
}
bias <- cbind("lm" = bias1,"MM-rlm" = bias2, "H-rlm" = bias3)
colMeans(bias)
}
#####################################
p <- 5
n <- 300
x<- matrix(rnorm(n * p), ncol = p)
y<-rnorm(n)
a=0.2
mu <-colMeans(x)+10
#####################################
RI(y,x,a,mu)
#####################################
UPDATE
I changed the idea of measuring Robustness, due to the first provided answer.
I measured robustness by calculating the mean absolute difference between coefficients when data is uncontaminated and when they are contaminated. I introduce outliers first in y and then in x. I still have a problem.
############ R CODE ##############
rmn <- function(n, mu, seed = TRUE) {
 if (seed) set.seed(12345)
 p <- length(mu)
 matrix( rnorm(n * p, mean = mu), ncol = p)
}
##################################
out.cv <- function(y, x, a, mu, R = 500, seed = TRUE) {
 ## y: response variable
 ## x: independent variables
 ## a: percent of outliers
 ## mu: how far should the outliers be. A vector if outliers in x,
 ## or a single number if outliers in y
 ## R: how many times to repeat this process
 x <- as.matrix(x)
 dm <- dim(x)
 n <- dm[1] ; d <- dm[2] + 1
 b1 <- b2<- b3 <- numeric(R)
 be <- coef( lm(y ~., data = as.data.frame(x[,-1]) ) )
####################################
 ### Outliers in Y ######
 if ( length(mu) == 1 ) {
 for (i in 1:R) {
 if (seed) set.seed(12345)
 id <- sample(n, a * n)
 z <- y
 if (seed) set.seed(12345)
 z[id] <- rnorm(id, mu) ## mu has to be a single number here
 ## mean absolute difference between coefficients of clean data
 ## and coefficients with contaminated data
 b1[i] <- mean( abs( coef( lm(z ~., data = as.data.frame(x[,-1])) ) - be) )
 b2[i] <- mean( abs( coef( rlm(z ~ ., data = data.frame(x[,-1]), maxit = 2000, method = "MM") ) - be ) )
 b3[i] <- mean( abs( coef( rlm(z ~ ., data = data.frame(x[,-1]), psi = psi.huber,maxit = 300) ) - be ) )
 }
########################
##### Outliers in X #########
 } else {
 for (i in 1:R) {
 if (seed) set.seed(12345)
 id <- sample(n, a * n)
 z <- x
 z[id, ] <- rmn( length(id), mu, seed ) ## mu must be a vector
 b1[i] <- mean( abs( coef( lm(y ~., data = as.data.frame(z[,-1])) )- be) )
 b2[i] <- mean( abs( coef( rlm(y ~ ., data = data.frame(z[,-1]), maxit = 2000, method = "MM") ) - be ) )
 b3[i] <- mean( abs( coef( rlm(y ~ ., data = data.frame(z[,-1]), psi = psi.huber,maxit = 300) ) - be ) )
 }
 }
 bias1 <- mean(b1) ; bias2 <- mean(b2); bias3 <- mean(b3)
 bias <- c(bias1, bias2, bias3)
 names(bias) <- c("lm", "MM-rlm","Huber-rlm")
 bias
}
################################
p <- 5
n <- 200
##############################
# Independent X and Y ####
#set.seed(12345)
#x<- matrix( rnorm(n * p), ncol = p)
#y<-rnorm(n)
## Related X and Y ####
set.seed(12345)
x <- rmn(n, numeric(p))
ber <- rnorm(p)
m <- x %*% ber
y <- rnorm(n, m, 1)
############################
a <- 0.2 #outliers 10%
mu <- 15 ## outliers in y
out.cv(y, x, a, mu)
###########################
mu <-colMeans(x)+15 ## outliers in x
out.cv(y, x, a, mu)
###################
First of all I do not see that you generate a sample from long tailed distribution. Please, use rt(n, 3) t student with very small df to get such distribution or play with other ones like log-normal. Thus do not use rnorm for sure. I see that you use some injection produce which seems to be overcomplicated.
Another thing is that specification of the MASS::rlm is not as trivial.
In my opinion start with quantreg::rq which is a quantile regression and treat it as a robust benchmark method.
Additionally your sampling procedure looks to not be a valid one. You generating a new observations each iteration which are not know apriori. I would expect bootstrapping on train or test set.

Calculate stderr, t-value, p-value, predict value for linear regression

I'm fitting linear models with MatrixModels:::lm.fit.sparse and MatrixModels::glm4 (also sparse).
However, these functions return coeff, residuals and fitted.values only.
What's the fastest and easiest way to get/calculate another values such as stderr, t-value, p-value, predict value?
I use the data from MatrixModels:::lm.fit.sparse example.
I built a custom function summary_sparse to perform a summary for this model.
All matrix operations are performed with Matrix package.
Results are compared with dense type model.
Note lm.fit.sparse have to be evaluated with method = "chol" to get proper results.
Functions:
summary_sparse <- function(l, X) {
XXinv <- Matrix::chol2inv(Matrix::chol(Matrix::crossprod(X)))
se <- sqrt(Matrix::diag(XXinv*sum(l$residuals**2)/(nrow(X)-ncol(X))))
ts <- l$coef/se
pvals <- 2*c(1 - pnorm(abs(ts)))
list(coef = l$coef, se = se, t = ts, p = pvals)
}
predict_sparse <- function(X, coef) {
X %*% coef
}
Application:
dd <- expand.grid(a = as.factor(1:3),
b = as.factor(1:4),
c = as.factor(1:2),
d= as.factor(1:8))
n <- nrow(dd <- dd[rep(seq_len(nrow(dd)), each = 10), ])
set.seed(17)
dM <- cbind(dd, x = round(rnorm(n), 1))
## randomly drop some
n <- nrow(dM <- dM[- sample(n, 50),])
dM <- within(dM, { A <- c(2,5,10)[a]
B <- c(-10,-1, 3:4)[b]
C <- c(-8,8)[c]
D <- c(10*(-5:-2), 20*c(0, 3:5))[d]
Y <- A + B + A*B + C + D + A*D + C*x + rnorm(n)/10
wts <- sample(1:10, n, replace=TRUE)
rm(A,B,C,D)
})
X <- Matrix::sparse.model.matrix( ~ (a+b+c+d)^2 + c*x, data = dM)
Xd <- as(X,"matrix")
fmDense <- lm(dM[,"Y"]~Xd-1)
ss <- summary(fmDense)
r1 <- MatrixModels:::lm.fit.sparse(X, y = dM[,"Y"], method = "chol")
f <- summary_sparse(r1, X)
all.equal(do.call(cbind, f), ss$coefficients, check.attributes = F)
#TRUE
all.equal(predict_sparse(X, r1$coef)#x, predict(fmDense), check.attributes = F, check.names=F)
#TRUE

Simulate data for regression with standardized Y

Based on this topic, I have created a function that returns a dataset with variables related to the outcome (y) by specific linear coefs.
simulate_data_regression <- function(sample=10, coefs=0, error=0){
n_var <- length(coefs)
X <- matrix(0, ncol=n_var, nrow=sample)
beta <- as.matrix(coefs)
for (i in 1:n_var){
X[,i] <- scale(rnorm(sample, 0, 1))
}
y <- X %*% beta
if(error != 0){
y <- y + rnorm(sample, 0, error)
}
data = data.frame(X=X)
names(data) <- paste0("V", 1:n_var)
data$y <- as.vector(y)
return(data)
}
data <- simulate_data_regression(sample=50, coefs=c(0.1, 0.8), error=0)
summary(data)
sd(data$V1)
sd(data$y)
It works great. However, I would need to have a standardized y (mean 0 and SD 1). But when I try to scale it, the coefficients change:
data <- simulate_data_regression(sample=50, coefs=c(0.1, 0.8), error=0)
data$y <- as.vector(scale(data$y))
coef(lm(y ~ ., data=data))
It is possible to do such thing? Thank you very much!
Edit
In other words, I would like the coefs that are specified to be standardized coefs (expressed in outcome's SD).
Scaling y a posteriori changes the coefs by 1/sd(y). However, I can't think of any way to change the betas before generating y, so that the betas return to their specified value after the scaling of y.
Edit 2: Failed attempt
I've tried running the function twice, first extracting sd(y) and scaling the coefficients with it, in the hope that those scaled coefficients will change to the specified ones once I'll scale y. But it doens't work, which is expected, as sd(y) changes when I change the coefs :'(
Here's the failed attempt:
simulate_data_regression <- function(sample=10, coefs=0, error=0, standardized=TRUE){
stuff <- .simulate_data_regression(sample=sample, coefs=coefs, error=error)
if(standardized == TRUE){
y_sd <- sd(data$y)
data <- .simulate_data_regression(sample=sample, coefs=y_sd*coefs, error=error, X=stuff$X)$data
data$y <- as.vector(scale(data$y))
} else{
data <- stuff$data
}
return(data)
}
.simulate_data_regression <- function(sample=10, coefs=0, error=0, X=NULL, y=NULL){
n_var <- length(coefs)
if(is.null(X)){
X <- matrix(0, ncol=n_var, nrow=sample)
for (i in 1:n_var){
X[,i] <- scale(rnorm(sample, 0, 1))
}
}
beta <- as.matrix(coefs)
y <- X %*% beta
if(error != 0){
y <- y + rnorm(sample, 0, error)
}
data = data.frame(X=X)
names(data) <- paste0("V", 1:n_var)
data$y <- as.vector(y)
return(list(X=X, y=y, data=data))
}
If you scale y the inference will be the same, only the p-values of the intercepts change, not the p-values of the coefficients.
In this example I have set error = 1.
set.seed(1234) # Make the results reproducible
data <- simulate_data_regression(sample = 50, coefs = c(0.1, 0.8), error = 1)
data2 <- data
data2$y <- scale(data2$y)
fit <- lm(y ~ ., data)
fit2 <- lm(y ~ ., data2)
summary(fit)
summary(fit2)
As you can see the p-values of the coefficients are exactly the same though the coefficients themselves are different. You would expect that since you are scaling by the standard errors of the regressors and therefore the coefficients will be scaled by the inverses of those standard errors.
The version of your function below has an argument, which, that allows to specify which regressors to scale. Its default is all of them.
simulate_data_regression2 <- function(sample = 10, coefs = 0, error = 0, which = seq_along(coefs)){
n_var <- length(coefs)
X <- matrix(0, ncol=n_var, nrow=sample)
beta <- as.matrix(coefs)
for (i in 1:n_var){
X[,i] <- rnorm(sample, 0, 1)
if(i %in% which) X[, i] <- scale(X[, i])
}
y <- X %*% beta
if(error != 0){
y <- y + rnorm(sample, 0, error)
}
data = data.frame(X=X)
names(data) <- paste0("V", 1:n_var)
data$y <- as.vector(y)
data
}
Now test the function.
set.seed(1234) # Make the results reproducible
data <- simulate_data_regression2(sample=50, coefs=c(0.1, 0.8), error=1)
set.seed(1234) # Reproduce the data generation process
data2 <- simulate_data_regression2(sample=50, coefs=c(0.1, 0.8), error=1, which = 2)
fit <- lm(y ~ ., data)
fit2 <- lm(y ~ ., data2)
As you can see the coefficients of V2 are equal.
coef(fit)
#(Intercept) V1 V2
# 0.01997809 0.19851020 0.96310013
coef(fit2)
#(Intercept) V1 V2
# 0.07040538 0.21130549 0.96310013
The p-values of the estimates of the coefficients V2 are also equal
summary(fit)
summary(fit2)

Creating function arguments from a named list (with an application to stats4::mle)

I should start by saying what I'm trying to do: I want to use the mle function without having to re-write my log likelihood function each time I want to try a different model specification. Because mle is expecting a named list of starting values, you apparently cannot just write the log-likelihood function as taking a vector of parameters. A simple example:
Suppose I want to fit a linear regression model via maximum likelihood and at first, I'm ignoring one of my predictors:
n <- 100
df <- data.frame(x1 = runif(n), x2 = runif(n), y = runif(n))
Y <- df$y
X <- model.matrix(lm(y ~ x1, data = df))
# define log-likelihood function
ll <- function(beta0, beta1, sigma){
beta = matrix(NA, nrow=2, ncol=1)
beta[,1] = c(beta0, beta1)
-sum(log(dnorm(Y - X %*% beta, 0, sigma)))
}
library(stats4)
mle(ll, start = list(beta0=.1, beta1=.2, sigma=1)
Now, if I want to fit a different model, say:
m <- lm(y ~ x1 + x2, data = df)
I cannot re-use my log-likelihood function--I'd have to re-write it to have the beta3 parameter. What I'd like to do is something like:
ll.flex <- function(theta){
# theta is a vector that I can use directly
...
}
if I could then somehow adjust the start argument in mle to account for my now vector-input log-likelihood function, or barring that, have a function that constructs the log-likelihood function at run-time, say by constructing the named list of arguments and then using it to define the function e.g., something like this:
X <- model.matrix(lm(y ~ x1 + x2, data = df))
arguments <- rep(NA, dim(X)[2])
names(arguments) <- colnames(X)
ll.magic <- function(bring.this.to.life.as.function.arguments(arguments)){...}
Update:
I ended up writing a helper function that can add an arbitrary number of named arguments x1, x2, x3... to a passed function f.
add.arguments <- function(f,n){
# adds n arguments to a function f; returns that new function
t = paste("arg <- alist(",
paste(sapply(1:n, function(i) paste("x",i, "=",sep="")), collapse=","),
")", sep="")
formals(f) <- eval(parse(text=t))
f
}
It's ugly, but it got the job done, letting me re-factor my log-likelihood function on the fly.
You can use the mle2 function from the package bbmle which allows you to pass vectors as parameters. Here is some sample code.
# REDEFINE LOG LIKELIHOOD
ll2 = function(params){
beta = matrix(NA, nrow = length(params) - 1, ncol = 1)
beta[,1] = params[-length(params)]
sigma = params[[length(params)]]
minusll = -sum(log(dnorm(Y - X %*% beta, 0, sigma)))
return(minusll)
}
# REGRESS Y ON X1
X <- model.matrix(lm(y ~ x1, data = df))
mle2(ll2, start = c(beta0 = 0.1, beta1 = 0.2, sigma = 1),
vecpar = TRUE, parnames = c('beta0', 'beta1', 'sigma'))
# REGRESS Y ON X1 + X2
X <- model.matrix(lm(y ~ x1 + x2, data = df))
mle2(ll2, start = c(beta0 = 0.1, beta1 = 0.2, beta2 = 0.1, sigma = 1),
vecpar = TRUE, parnames = c('beta0', 'beta1', 'beta2', 'sigma'))
This gives you
Call:
mle2(minuslogl = ll2, start = c(beta0 = 0.1, beta1 = 0.2, beta2 = 0.1,
sigma = 1), vecpar = TRUE, parnames = c("beta0", "beta1",
"beta2", "sigma"))
Coefficients:
beta0 beta1 beta2 sigma
0.5526946 -0.2374106 0.1277266 0.2861055
It might be easier to use optim directly; that's what mle is using anyway.
ll2 <- function(par, X, Y){
beta <- matrix(c(par[-1]), ncol=1)
-sum(log(dnorm(Y - X %*% beta, 0, par[1])))
}
getp <- function(X, sigma=1, beta=0.1) {
p <- c(sigma, rep(beta, ncol(X)))
names(p) <- c("sigma", paste("beta", 0:(ncol(X)-1), sep=""))
p
}
set.seed(5)
n <- 100
df <- data.frame(x1 = runif(n), x2 = runif(n), y = runif(n))
Y <- df$y
X1 <- model.matrix(y ~ x1, data = df)
X2 <- model.matrix(y ~ x1 + x2, data = df)
optim(getp(X1), ll2, X=X1, Y=Y)$par
optim(getp(X2), ll2, X=X2, Y=Y)$par
With the output of
> optim(getp(X1), ll2, X=X1, Y=Y)$par
sigma beta0 beta1
0.30506139 0.47607747 -0.04478441
> optim(getp(X2), ll2, X=X2, Y=Y)$par
sigma beta0 beta1 beta2
0.30114079 0.39452726 -0.06418481 0.17950760
It might not be what you're looking for, but I would do this as follows:
mle2(y ~ dnorm(mu, sigma),parameters=list(mu~x1 + x2), data = df,
start = list(mu = 1,sigma = 1))
mle2(y ~ dnorm(mu,sigma), parameters = list(mu ~ x1), data = df,
start = list(mu=1,sigma=1))
You might be able to adapt this formulation for a multinomial, although dmultinom might not work -- you might need to write a Dmultinom() that took a matrix of multinomial samples and returned a (log)probability.
The R code that Ramnath provided can also be applied to the optim function because
it takes vectors as parameters also.

Resources