I have this function that I got from a textbook that runs a couple of linear regressions and then saves the P-Value for each regression.
I would also like to save the T-Statistic as well but I am having a hard time finding the right syntax to enter for the select function.
Here is the current function.
models <- lapply(paste(factors, ' ~ a + b + c + d + e + f + g + h+ j -',factors),
function(f){ lm(as.formula(f), data = df) %>% # Call lm(.)
summary() %>% # Gather the output
"$"(coef) %>% # Keep only the coefs
data.frame() %>% # Convert to dataframe
filter(rownames(.) == "(Intercept)") %>% # Keep only the Intercept
dplyr::select(Estimate,`Pr...t..`)}) # Keep the coef & p-value
I know that I have to change the very last part of the function: dplyr::select(Estimate,`Pr...t..`) but after all my research and trial and error I am still stuck.
Here is a reproducible example using the mtcars data.
library(dplyr)
df <- mtcars
df <- df %>%
select(1,2,3,4,5,6,7)
factors <- c("mpg", "cyl", "disp", "hp", "drat", "wt")
models <- lapply(paste(factors, ' ~ mpg + cyl + disp + hp + drat + wt -',factors),
function(f){ lm(as.formula(f), data = df) %>% # Call lm(.)
summary() %>% # Gather the output
"$"(coef) %>% # Keep only the coefs
data.frame() %>% # Convert to dataframe
filter(rownames(.) == "(Intercept)") %>% # Keep only the Intercept
dplyr::select(Estimate,`Pr...t..`)} # Keep the coef & p-value
)
final <- matrix(unlist(models), ncol = 2, byrow = T) %>% # Switch from list to dataframe
data.frame(row.names = factors
Your example works for me. You can make this a little bit more "tidy" as follows:
library(broom)
sumfun <- function(f) {
lm(as.formula(f), data = df) %>%
tidy() %>%
filter(term == "(Intercept)") %>%
dplyr::select(estimate, p.value)
}
pp <- paste(factors, ' ~ mpg + cyl + disp + hp + drat + wt -',factors)
names(pp) <- factors
final <- purrr::map_dfr(pp, sumfun, .id = "factor")
Related
code below first prints out lm for mpg ~ disp then for mpg ~ disp + wt. I would like to create another loop over the models (note that the second lm is my personalize model, and for the simplicity, we can assume it is lm). how can I loop over different models?
data("mtcars")
formulas <- list(
mpg ~ disp,
mpg ~ disp + wt
)
models <- list(lm, lm)
res <- vector("list", length = length(formulas))
for(i in seq_along(formulas)){
res[[i]] <- lm(formulas[[i]], data = mtcars)
}
res
or
lapply(formulas, lm, data = mtcars)
You may use nested lapply -
lapply(models, function(x) lapply(formulas, function(y) x(y, data = mtcars)))
I like to use tidyverse's purrr for such multi-model approaches:
pacman::p_load(dplyr, purrr)
data("mtcars")
d <- crossing(formula = c(mpg ~ disp, mpg ~ disp + wt),
model = list("lm", "glm")) %>%
mutate(result = pmap(.l = list(model, formula),
.f = function(m, f) do.call(m, args = list(formula = f, data = substitute(mtcars)))))
We could use outer in base R and should be fast
out <- c(outer(models, formulas, Vectorize(function(x, y) list(x(y, data = mtcars)))))
I have a dataframe that I want to run linear models on by group, then use the broom package to extract the slope and r squared for each model. So far I am trying this:
library(tidyverse)
library(broom)
#read in the dataset
data(mtcars)
#add a group variable
mtcars <- mtcars %>% as_tibble() %>% mutate(LC = 1)
#create a second group
mtcars2 <- mtcars
mtcars2 <- mtcars2 %>% mutate(LC = 2)
#bind together
mtcars <- rbind(mtcars, mtcars2)
#groupby and run regressions
all_regress <- mtcars %>% group_by(LC) %>%
do(mod1 = lm(mpg ~ disp, data = .),
mod2 = lm(mpg ~ wt, data = .))
#use broom the extract the slope and rsq per group
glance <-all_regress %>% mutate(tidy = map(mod1, broom::tidy),
glance = map(mod1, broom::glance),
augment = map(mod1, broom::augment),
rsq = glance %>% map_dbl('r.squared'),
slope = tidy %>% map_dbl(function(x) x$estimate[2]))
but this fails with:
Error: Problem with `mutate()` input `tidy`.
x No tidy method for objects of class qr
ℹ Input `tidy` is `map(mod1, broom::tidy)`.
ℹ The error occurred in row 1.
If I do this without groups such as:
#read in the dataset
data(mtcars)
mtcars <- mtcars %>% as_tibble()
#run regressions
all_regress <- mtcars %>%
do(mod1 = lm(mpg ~ disp, data = .),
mod2 = lm(mpg ~ wt, data = .))
#use broom the extract the slope and rsq per group
glance <- all_regress %>% mutate(tidy = map(mod1, broom::tidy),
glance = map(mod1, broom::glance),
augment = map(mod1, broom::augment),
rsq = glance %>% map_dbl('r.squared'),
slope = tidy %>% map_dbl(function(x) x$estimate[2]))
there is no error.
I think simply adding ungroup() achieves what you need:
all_regress <- mtcars %>% group_by(LC) %>%
do(mod1 = lm(mpg ~ disp, data = .),
mod2 = lm(mpg ~ wt, data = .)) %>% ungroup()
#use broom the extract the slope and rsq per group
glance <-all_regress %>% mutate(tidy = map(mod1, broom::tidy),
glance = map(mod1, broom::glance),
augment = map(mod1, broom::augment),
rsq = glance %>% map_dbl('r.squared'),
slope = tidy %>% map_dbl(function(x) x$estimate[2]))
I used this approach, its longer but i think theres more control in the individual steps. Finally i created a tibble with lists columns containing each model.
library(tidyverse)
library(broom)
#read in the dataset
data(mtcars)
#add a group variable
mtcars <- mtcars %>% as_tibble() %>% dplyr::select(-c(vs, am, gear, carb, cyl)) %>% mutate(LC = 1)
#create a second group
mtcars2 <- mtcars
mtcars2 <- mtcars2 %>% mutate(LC = 2)
#bind together
mtcars <- bind_rows(mtcars2, mtcars)
#group_split and run regressions
all_regress <- mtcars %>% group_split(LC) %>%
map(~ list(mod1 = lm(mpg ~ disp, data = .),
mod2 = lm(mpg ~ wt, data = .)))
# example <- all_regress[[2]][[1]] %>% glance()
#the list has 2 levels with 2 models each
data <- all_regress %>%
map(~
map(.x, function(model){
#column lists are needed because each function output different objects
tibble(mod = list(model),
tidy = list(broom::tidy(model)),
glance = list(broom::glance(model)),
augment = list(broom::augment(model))) %>%
mutate(
rsq = list(glance[[1]]$r.squared),
slope = list(tidy[[1]]$estimate[2]))
} ))
data_final <-
data %>% map2(unique(mtcars$LC), ~
map2(.x, .y, function(each_model, lc){
mutate(each_model, LC = lc)
}))
final_format <- #because of the list structure i need to bind the two datasets in each level and then bind them again.
map(data_final, ~reduce(.x, rbind)) %>% reduce(rbind)
#acces the data
final_format[1, 1][[1]]
The gtsummary package in R has a new neat function add_significance_stars() which adds significance stars to coefficient estimates with small p-values in regression models. However, this function can only operate on tbl_regression or tbl_uvregression objects.
Is there a similar method that can be applied to a table_summary object so that p-value stars notate significant summary statistics?
library(tidyverse)
library(gtsummary)
This is a table_summary object with p-values displayed in a column.
mtcars %>%
select(gear, mpg, disp, hp, wt) %>%
tbl_summary(by = "gear") %>%
add_p()
table_summary object
This is a tbl_regression object with p-values displayed in the desired fashion in the footer
mtcars %>%
select(gear, mpg, disp, hp, wt) %>%
lm(formula = gear ~ mpg + disp + hp + wt) %>%
tbl_regression(intercept = TRUE) %>%
add_significance_stars()
tbl_regression object
The purpose of add_estimate_stars() is to replace the p-values with stars. If you'd like to add stars to p-values in a tbl_summary(), you can define a function that appends stars to significant p-values. Example below!
library(gtsummary)
packageVersion("gtsummary")
#> [1] '1.4.0'
fmt_pvalue_with_stars <- function(x) {
dplyr::case_when(
x < 0.001 ~ paste0(style_pvalue(x), "***"),
x < 0.01 ~ paste0(style_pvalue(x), "**"),
x < 0.05 ~ paste0(style_pvalue(x), "*"),
TRUE ~ style_pvalue(x)
)
}
tbl <-
mtcars %>%
select(am, hp, cyl) %>%
tbl_summary(by = am) %>%
add_p(pvalue_fun = fmt_pvalue_with_stars) %>%
modify_footnote(p.value ~ "*p<0.05; **p<0.01; ***p<0.001")
Created on 2021-04-24 by the reprex package (v2.0.0)
I build lots of GLMs. Usually on large data sets with many model parameters. This means that base R's glm() function isn't really useful because it won't cope with the size/complexity, so I usually use revoScaleR::rxGlm() instead.
However I'd like to be able to do ANOVA tests on pairs of nested models, and I haven't found a way to do this with the model objects that rxGlm() creates, because R's anova() function won't work with them. revoScaleR provides an as.glm() function which converts an rxGlm() object to a glm() object - sort of - but it doesn't work here.
For example:
library(dplyr)
data(mtcars)
# don't like having named rows
mtcars <- mtcars %>%
mutate(veh_name = rownames(.)) %>%
select(veh_name, everything())
# fit a GLM: mpg ~ everything else
glm_a1 <- glm(mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + gear + carb,
data = mtcars,
family = gaussian(link = "identity"),
trace = TRUE)
summary(glm_a1)
# fit another GLM where gear is removed
glm_a2 <- glm(mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + carb,
data = mtcars,
family = gaussian(link = "identity"),
trace = TRUE)
summary(glm_a2)
# F test on difference
anova(glm_a1, glm_a2, test = "F")
works fine, but if instead I do:
library(dplyr)
data(mtcars)
# don't like having named rows
mtcars <- mtcars %>%
mutate(veh_name = rownames(.)) %>%
select(veh_name, everything())
glm_b1 <- rxGlm(mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + gear + carb,
data = mtcars,
family = gaussian(link = "identity"),
verbose = 1)
summary(glm_b1)
# fit another GLM where gear is removed
glm_b2 <- rxGlm(mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + carb,
data = mtcars,
family = gaussian(link = "identity"),
verbose = 1)
summary(glm_b2)
# F test on difference
anova(as.glm(glm_b1), as.glm(glm_b2), test = "F")
I see the error message:
Error in qr.lm(object) : lm object does not have a proper 'qr'
component. Rank zero or should not have used lm(.., qr=FALSE)
The same problem cropped up on a previous SO posting: Error converting rxGlm to GLM but doesn't seem to have been solved.
Can anyone help please? if as.glm() isn't going to help here, is there some other way? Could I write a custom function to do this (stretching my coding abilities to their limit I suspect!)?
Also, is SO the best forum, or would one of the other StackExchange forums be a better place to look for guidance?
Thank you.
Partial solution...
my_anova <- function (model_1, model_2, test_type)
{
# only applies for nested GLMs. How do I test for this?
cat("\n")
if(test_type != "F")
{
cat("Invalid function call")
}
else
{
# display model formulae
cat("Model 1:", format(glm_b1$formula), "\n")
cat("Model 2:", format(glm_b2$formula), "\n")
if(test_type == "F")
{
if (model_1$df[2] < model_2$df[2]) # model 1 is big, model 2 is small
{
dev_s <- model_2$deviance
df_s <- model_2$df[2]
dev_b <- model_1$deviance
df_b <- model_1$df[2]
}
else # model 2 is big, model 1 is small
{
dev_s <- model_1$deviance
df_s <- model_1$df[2]
dev_b <- model_2$deviance
df_b <- model_2$df[2]
}
F <- (dev_s - dev_b) / ((df_s - df_b) * dev_b / df_b)
}
# still need to calculate the F tail probability however
# df of F: numerator: df_s - df_b
# df of F: denominator: df_b
F_test <- pf(F, df_s - df_b, df_b, lower.tail = FALSE)
cat("\n")
cat("F: ", round(F, 4), "\n")
cat("Pr(>F):", round(F_test, 4))
}
}
A slightly changed example from the R help for do():
by_cyl <- group_by(mtcars, cyl)
models <- by_cyl %>% do(mod = lm(mpg ~ disp, data = .))
coefficients<-models %>% do(data.frame(coef = coef(.$mod)[[1]]))
In the dataframe coefficients, there is the first coefficient of the linear model for each cyl group. My question is how can I produce a dataframe that contains not only a column with the coefficients, but also a column with the grouping variable.
===== Edit: I extend the example to try to make more clear my problem
Let's suppose that I want to extract the coefficients of the model and some prediction. I can do this:
by_cyl <- group_by(mtcars, cyl)
getpars <- function(df){
fit <- lm(mpg ~ disp, data = df)
data.frame(intercept=coef(fit)[1],slope=coef(fit)[2])
}
getprediction <- function(df){
fit <- lm(mpg ~ disp, data = df)
x <- df$disp
y <- predict(fit, data.frame(disp= x), type = "response")
data.frame(x,y)
}
pars <- by_cyl %>% do(getpars(.))
prediction <- by_cyl %>% do(getprediction(.))
The problem is that the code is redundant because I am fitting the model two times. My idea was to build a function that returns a list with all the information:
getAll <- function(df){
results<-list()
fit <- lm(mpg ~ disp, data = df)
x <- df$disp
y <- predict(fit, data.frame(disp= x), type = "response")
results$pars <- data.frame(intercept=coef(fit)[1],slope=coef(fit)[2])
results$prediction <- data.frame(x,y)
results
}
The problem is that I don't know how to use do() with the function getAll to obtain for example just a dataframe with the parameters (like the dataframe pars).
Like this?
coefficients <-models %>% do(data.frame(coef = coef(.$mod)[[1]], group = .[[1]]))
yielding
coef group
1 40.87196 4
2 19.08199 6
3 22.03280 8
Using the approach of Hadley Wickham in this video:
library(dplyr)
library(purrr)
library(broom)
fitmodel <- function(d) lm(mpg ~ disp, data = d)
by_cyl <- mtcars %>%
group_by(cyl) %>%
nest() %>%
mutate(mod = map(data, fitmodel),
pars = map(mod, tidy),
pred = map(mod, augment))
pars <- by_cyl %>% unnest(pars)
prediction <- by_cyl %>% unnest(pred)