I am using the following R code to run several linear regression models and extract results to dataframe:
library(tidyverse)
library(broom)
data <- mtcars
outcomes <- c("wt", "mpg", "hp", "disp")
exposures <- c("gear", "vs", "am")
models <- expand.grid(outcomes, exposures) %>%
group_by(Var1) %>% rowwise() %>%
summarise(frm = paste0(Var1, "~factor(", Var2, ")")) %>%
group_by(model_id = row_number(),frm) %>%
do(tidy(lm(.$frm, data = data))) %>%
mutate(lci = estimate-(1.96*std.error),
uci = estimate+(1.96*std.error))
How can I modify my code to use robust standard errors similar to STATA?
* example of using robust standard errors in STATA
regress y x, robust
There is a comprehensive discussion about the robust standard errors in lm models at stackexchange.
You can update your code in the following way:
library(sandwich)
models <- expand.grid(outcomes, exposures) %>%
group_by(Var1) %>% rowwise() %>%
summarise(frm = paste0(Var1, "~factor(", Var2, ")")) %>%
group_by(model_id = row_number(),frm) %>%
do(cbind(
tidy(lm(.$frm, data = data)),
robSE = sqrt(diag(vcovHC(lm(.$frm, data = data), type="HC1"))) )
) %>%
mutate(
lci = estimate - (1.96 * std.error),
uci = estimate + (1.96 * std.error),
lciR = estimate - (1.96 * robSE),
uciR = estimate + (1.96 * robSE)
)
The important line is this:
sqrt(diag(vcovHC(lm(.$frm, data = data), type="HC1"))) )
Function vcovHC returns covariance matrix. You need to extract variances on the diagonal diag and take compute a square root sqrt.
Related
I would like to replicate the plot.lda print method using ggplot2 and tidymodels. Is there an elegant way to get the plot?
I think I can fake the augment() function, which does not have a lda method, by using predict() and bind it onto the original data.
Here is an example with the base R and tidymodels code:
library(ISLR2)
library(MASS)
# First base R
train <- Smarket$Year < 2005
lda.fit <-
lda(
Direction ~ Lag1 + Lag2,
data = Smarket,
subset = train
)
plot(lda.fit)
# Next tidymodels
library(tidyverse)
library(tidymodels)
library(discrim)
lda_spec <- discrim_linear() %>%
set_mode("classification") %>%
set_engine("MASS")
the_rec <- recipe(
Direction ~ Lag1 + Lag2,
data = Smarket
)
the_workflow<- workflow() %>%
add_recipe(the_rec) %>%
add_model(lda_spec)
Smarket_train <- Smarket %>%
filter(Year != 2005)
the_workflow_fit_lda_fit <-
fit(the_workflow, data = Smarket_train) %>%
extract_fit_parsnip()
# now my attempt to do the plot
predictions <- predict(the_workflow_fit_lda_fit,
new_data = Smarket_train,
type = "raw"
)[[3]] %>%
as.vector()
bind_cols(Smarket_train, .fitted = predictions) %>%
ggplot(aes(x=.fitted)) +
geom_histogram(aes(y = stat(density)),binwidth = .5) +
scale_x_continuous(breaks = seq(-4, 4, by = 2))+
facet_grid(vars(Direction)) +
xlab("") +
ylab("Density")
There must be a better way to do this.... thoughts?
You can do this by using a combination of extract_fit_*() and parsnip:::repair_call(). The plot.lda() method uses the $call object in the LDA fit, which we need to adjust since the call object from using tidymodels will be different than using lda() directly.
library(ISLR2)
library(MASS)
# First base R
train <- Smarket$Year < 2005
lda.fit <-
lda(
Direction ~ Lag1 + Lag2,
data = Smarket,
subset = train
)
# Next tidymodels
library(tidyverse)
library(tidymodels)
library(discrim)
lda_spec <- discrim_linear() %>%
set_mode("classification") %>%
set_engine("MASS")
the_rec <- recipe(
Direction ~ Lag1 + Lag2,
data = Smarket
)
the_workflow <- workflow() %>%
add_recipe(the_rec) %>%
add_model(lda_spec)
Smarket_train <- Smarket %>%
filter(Year != 2005)
the_workflow_fit_lda_fit <-
fit(the_workflow, data = Smarket_train)
After fitting both models, we can inspect the $call objects and we see that they are different.
lda.fit$call
#> lda(formula = Direction ~ Lag1 + Lag2, data = Smarket, subset = train)
extract_fit_engine(the_workflow_fit_lda_fit)$call
#> lda(formula = ..y ~ ., data = data)
The parsnip::repair_call() function will replace data with the data we pass in. Additionally, we will rename the response of the data to ..y to match the call.
the_workflow_fit_lda_fit %>%
extract_fit_parsnip() %>%
parsnip::repair_call(rename(Smarket_train, ..y = Direction)) %>%
extract_fit_engine() %>%
plot()
Created on 2021-11-12 by the reprex package (v2.0.1)
I am trying to run a multiple linear regression but i am getting the same coefficients for all my grouped variables
names<- rep(LETTERS[1:25], each = 20)
daysp<- runif(1:500,1,500)
startdate <-sample(seq(as.Date('1999/01/01'), as.Date('2020/01/01'), by="day"), 500)
enddate<- sample(seq(as.Date('2010/01/01'), as.Date('2020/01/01'), by="day"), 500)
class <- rep(LETTERS[1:4], each = 125)
amt<- runif(1:500,10000,500000)
2ndclass <- rep(LETTERS[5:8], each = 125)
df<-data.frame(names,daysp,startdate,enddate,class,amt,2ndclass)
Changed to factor class and 2ndclass
fitted_models = df %>% group_by(names) %>% do(model = lm(daysp ~ startdate + enddate
+ class + 2ndclass + amt, data=df))
fitted_models$models
How can i run the regressions and get different coefficients for each group?
data = df explicitly uses the entire data frame df, ignoring any grouping. Use . to refer to the data that is piped in, which will let do use the groups. See the example at the bottom of ?do for reference:
## From ?do
by_cyl <- mtcars %>% group_by(cyl)
models <- by_cyl %>% do(mod = lm(mpg ~ disp, data = .))
Though, versions of dplyr > 1.0 will prefer using nest_by (also demonstrated on the ?do help page):
models <- mtcars %>%
nest_by(cyl) %>%
mutate(mod = list(lm(mpg ~ disp, data = data)))
models %>% summarise(broom::tidy(mod))
It is fairly straightforward to use the augment function from the Broom package in R to add predictions back into a tibble. Viz.
df <- iris %>%
nest(data = everything()) %>%
mutate(model = map(data, function(x) lm(Sepal.Length ~ Sepal.Width, data = x)),
pred = map2(model, data, ~augment(.x, newdata = .y))) %>%
unnest(pred)
However, when I take a linear model trained on one set of data and try and predict on new data I receive the following error.
mod <- lm(Sepal.Length ~ Sepal.Width, data = iris)
df2 <- iris %>%
mutate(Sepal.Width = Sepal.Width + rnorm(1)) %>%
nest(data = everything()) %>%
mutate(pred = map2(mod, data, ~augment(.x, newdata = .y)))
# Error: Problem with `mutate()` input `pred`.
# x No augment method for objects of class numeric
# i Input `pred` is `map2(mod, data, ~augment(.x, newdata = .y))`.
How should I use augment to fit new data? Is using an external model object (in the example above this is mod) the best practice or is there a more elegant way?
Since there is only one model we can do this without using map.
library(dplyr)
df1 <- iris %>%
mutate(Sepal.Width = Sepal.Width + rnorm(1)) %>%
tidyr::nest(data = everything()) %>%
summarise(pred = broom::augment(mod, newdata = data[[1]]),
mod = list(mod),
data = data)
Having just posted the question, I think I have an answer. I won't accept the answer for 48 hours just in case someone contradicts or provides a more comprehensive one.
In the example, map2 expects mod as a vector or list but it is a model object. Putting mod into the tibble as a list object suppresses the error and correctly calculates predictions.
mod <- lm(Sepal.Length ~ Sepal.Width, data = iris)
df2 <- iris %>%
mutate(Sepal.Width = Sepal.Width + rnorm(1)) %>%
nest(data = everything()) %>%
mutate(mod = list(mod)) %>% #! this is the additional step
mutate(pred = map2(mod, data, ~augment(.x, newdata = .y))) %>%
unnest(pred)
Alternatively, coerce the external model object as list.
...
mutate(pred = map2(list(mod), data, ~augment(.x, newdata = .y))) %>%
...
I know that glmnet does not regularize the intercept by default, but I would like to do it anyway. I was taking a look at this question and tried to do what whuber suggested (adding a constant variable and turning the parameter intercept to FALSE) , but as a result glmnet is not fitting the added constant as well.
library(dplyr)
library(glmnet)
X <-
mtcars %>%
mutate(intercept = 1) %>%
select(-c(mpg)) %>%
as.matrix()
y <-
mtcars %>%
select(mpg) %>%
as.matrix()
model <- glmnet(X, y, intercept = FALSE, alpha = 0, lambda = 0)
coef(model)
I'd like to perform linear regression, however instead of using RMSE as my error function, I'd like to use MAE (Mean Absolute Error).
Is there a package that would allow me to do this?
You may use caret and Metrics packages.
library(caret)
data("mtcars")
maeSummary <- function (data,
lev = NULL,
model = NULL) {
require(Metrics)
out <- mae(data$obs, data$pred)
names(out) <- "MAE"
out
}
mControl <- trainControl(summaryFunction = maeSummary)
set.seed(123)
lm_model <- train(mpg ~ wt,
data = mtcars,
method = "lm",
metric = "MAE",
maximize = FALSE,
trControl = mControl)
> lm_model$metric
[1] "MAE"
Probably late to the party, but here is a solution using CVXR package for optimisation.
library(CVXR)
# defining variables to be tuned during optimisation
coefficient <- Variable(1)
intercept <- Variable(1)
# defining the objective i.e. minimizing the sum af absolute differences (MAE)
objective <- Minimize(sum(abs(mtcars$disp - (mtcars$hp * coefficient) - intercept)))
# optimisation
problem <- Problem(objective)
result <- solve(problem)
# result
result$status
mae_coefficient <- result$getValue(coefficient)
mae_intercept <- result$getValue(intercept)
lm_coeff_intrc <- lm(formula = disp ~ hp, data = mtcars)$coefficients
library(tidyverse)
ggplot(mtcars, aes(hp, disp)) +
geom_point() +
geom_abline(
slope = lm_coeff_intrc["hp"],
intercept = lm_coeff_intrc["(Intercept)"],
color = "red"
) +
geom_abline(
slope = mae_coefficient,
intercept = mae_intercept,
color = "blue"
)
df <- mtcars %>%
select(disp, hp) %>%
rownames_to_column() %>%
mutate(
mae = disp - hp * mae_coefficient - mae_intercept,
lm = disp - hp * lm_coeff_intrc["hp"] - lm_coeff_intrc["(Intercept)"]
)
df %>%
select(mae, lm) %>%
pivot_longer(cols = 1:2) %>%
group_by(name) %>%
summarise(
mae = sum(abs(value))
)