Running regressions and extract model estimates to a dataframe in R - r

I have 3 exposure variables x1-x3, 10 outcome variables y1-y10 and 3 covariates cv1-cv3.
I would like to regress each outcome on each exposure adjusted for all covariates. Then I would like model estimates i.e. beta, SE, p-value placed in a dataframe. Is there a way to automate this in R. Thank you!
The models i want to run look like this:
y1 ~ x1+cv1+cv2+cv3 ... y10 ~ x1+cv1+cv2+cv3
y1 ~ x2+cv1+cv2+cv3 ... y10 ~ x2+cv1+cv2+cv3
y1 ~ x3+cv1+cv2+cv3 ... y10 ~ x3+cv1+cv2+cv3

Without data and a reproducible example, it is hard to help you, but here's an example with simulated data. First, create a fake dataset, called data:
library(tidyverse)
make_df <- function(y_i) {
data_frame(y_var = y_i, y_i = rnorm(100),
x1 = rnorm(100), x2 = rnorm(100), x3 = rnorm(100),
cv1 = runif(100), cv2 = runif(100), cv3 = runif(100))
}
ys <- paste0("Y_", sprintf("%02d", 1:10))
ys
#> [1] "Y_01" "Y_02" "Y_03" "Y_04" "Y_05" "Y_06" "Y_07" "Y_08" "Y_09" "Y_10"
data <-
ys %>%
map_dfr(make_df)
data
#> # A tibble: 1,000 x 8
#> y_var y_i x1 x2 x3 cv1 cv2 cv3
#> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 Y_01 0.504 0.892 -0.806 -1.56 0.145 0.436 0.701
#> 2 Y_01 0.967 1.24 -1.19 0.920 0.866 0.00100 0.567
#> 3 Y_01 -0.824 -0.729 -0.0855 -1.06 0.0665 0.780 0.471
#> 4 Y_01 0.294 2.37 -0.514 -0.955 0.397 0.0462 0.209
#> 5 Y_01 -0.893 0.0298 0.0369 0.0787 0.640 0.709 0.0485
#> 6 Y_01 0.670 -0.347 1.56 2.11 0.843 0.542 0.793
#> 7 Y_01 -1.59 1.04 0.228 0.573 0.185 0.151 0.558
#> 8 Y_01 -2.04 0.289 -0.435 -0.113 0.833 0.0898 0.653
#> 9 Y_01 -0.637 0.818 -0.454 0.606 0.294 0.378 0.315
#> 10 Y_01 -1.61 -0.628 -2.75 1.06 0.353 0.0863 0.332
#> # ... with 990 more rows
At this point, you have options. One way is to use the group_by %>% do(tidy(*)) recipe:
data %>%
gather(x_var, x_value, -c(y_var, y_i, cv1:cv3)) %>%
group_by(y_var, x_var) %>%
do(broom::tidy(lm(y_i ~ x_value + cv1 + cv2 + cv3, data = .)))
#> # A tibble: 150 x 7
#> # Groups: y_var, x_var [30]
#> y_var x_var term estimate std.error statistic p.value
#> <chr> <chr> <chr> <dbl> <dbl> <dbl> <dbl>
#> 1 Y_01 x1 (Intercept) -0.111 0.344 -0.324 0.747
#> 2 Y_01 x1 x_value -0.0440 0.111 -0.396 0.693
#> 3 Y_01 x1 cv1 0.286 0.372 0.769 0.444
#> 4 Y_01 x1 cv2 0.0605 0.379 0.160 0.873
#> 5 Y_01 x1 cv3 -0.0690 0.378 -0.182 0.856
#> 6 Y_01 x2 (Intercept) -0.146 0.336 -0.434 0.665
#> 7 Y_01 x2 x_value 0.117 0.105 1.12 0.265
#> 8 Y_01 x2 cv1 0.287 0.362 0.793 0.430
#> 9 Y_01 x2 cv2 0.0564 0.376 0.150 0.881
#> 10 Y_01 x2 cv3 0.0125 0.379 0.0330 0.974
#> # ... with 140 more rows
Another approach is to use a split variable and then a map function from purrr:
data %>%
gather(x_var, x_value, -c(y_var, y_i, cv1:cv3)) %>%
mutate(y_var_x_var = paste0(y_var, x_var)) %>%
split(.$y_var_x_var) %>%
map(~ lm(y_i ~ x_value + cv1 + cv2 + cv3, data = .))
#> $Y_01x1
#>
#> Call:
#> lm(formula = y_i ~ x_value + cv1 + cv2 + cv3, data = .)
#>
#> Coefficients:
#> (Intercept) x_value cv1 cv2 cv3
#> -0.11144 -0.04396 0.28585 0.06051 -0.06896
#>
#>
#> $Y_01x2
#>
#> Call:
#> lm(formula = y_i ~ x_value + cv1 + cv2 + cv3, data = .)
#>
#> Coefficients:
#> (Intercept) x_value cv1 cv2 cv3
#> -0.14562 0.11732 0.28726 0.05642 0.01249
#>
#>
# ...and so on...
#>
#>
#> $Y_10x2
#>
#> Call:
#> lm(formula = y_i ~ x_value + cv1 + cv2 + cv3, data = .)
#>
#> Coefficients:
#> (Intercept) x_value cv1 cv2 cv3
#> -0.45689 -0.02530 0.61375 0.34377 -0.02357
#>
#>
#> $Y_10x3
#>
#> Call:
#> lm(formula = y_i ~ x_value + cv1 + cv2 + cv3, data = .)
#>
#> Coefficients:
#> (Intercept) x_value cv1 cv2 cv3
#> -0.44423 -0.18377 0.64739 0.27688 -0.02013

Related

edit string text in dataframe variable

I want to tidy up a dataframe and automate the process. Given the following data.frame:
library(survival)
library(rms)
library(broom)
library(tidyverse)
res.cox <- coxph(Surv(time, status) ~ rcs(age, 3) + sex + ph.ecog +
rcs(meal.cal, 4), data = lung)
output <- tidy(res.cox)
output
# term estimate std.error statistic p.value
# <chr> <dbl> <dbl> <dbl> <dbl>
# 1 rcs(age, 3)age -0.00306 0.0219 -0.140 0.889
# 2 rcs(age, 3)age' 0.0154 0.0261 0.592 0.554
# 3 sex -0.525 0.192 -2.74 0.00620
# 4 ph.ecog 0.421 0.131 3.22 0.00128
# 5 rcs(meal.cal, 4)meal.cal -0.000416 0.00104 -0.400 0.689
# 6 rcs(meal.cal, 4)meal.cal' 0.00118 0.00232 0.509 0.611
# 7 rcs(meal.cal, 4)meal.cal'' -0.00659 0.0114 -0.577 0.564
I want to remove the rcs-spline information from term variable and be left with:
# term estimate std.error statistic p.value
# <chr> <dbl> <dbl> <dbl> <dbl>
# 1 s1 age -0.00306 0.0219 -0.140 0.889
# 2 s2 age 0.0154 0.0261 0.592 0.554
# 3 sex -0.525 0.192 -2.74 0.00620
# 4 ph.ecog 0.421 0.131 3.22 0.00128
# 5 s1 meal.cal -0.000416 0.00104 -0.400 0.689
# 6 s2 meal.cal 0.00118 0.00232 0.509 0.611
# 7 s3 meal.cal -0.00659 0.0114 -0.577 0.564
I want the solution to easily work for other cases too so when you increase the number of knots:
res.cox2 <- coxph(Surv(time, status) ~ rcs(age, 4) + rcs(meal.cal, 6) +
sex + ph.ecog, data = lung)
output2 <- tidy(res.cox2)
output2
# term estimate std.error statistic p.value
# <chr> <dbl> <dbl> <dbl> <dbl>
# 1 rcs(age, 4)age 0.0419 0.0403 1.04 0.298
# 2 rcs(age, 4)age' -0.101 0.0806 -1.26 0.208
# 3 rcs(age, 4)age'' 0.569 0.388 1.47 0.142
# 4 rcs(meal.cal, 6)meal.cal -0.000974 0.00155 -0.631 0.528
# 5 rcs(meal.cal, 6)meal.cal' 0.00751 0.0115 0.655 0.512
# 6 rcs(meal.cal, 6)meal.cal'' -0.0217 0.0358 -0.607 0.544
# 7 rcs(meal.cal, 6)meal.cal''' 0.0614 0.123 0.501 0.616
# 8 rcs(meal.cal, 6)meal.cal'''' -0.0775 0.163 -0.475 0.634
# 9 sex -0.552 0.195 -2.83 0.00465
# 10 ph.ecog 0.440 0.132 3.34 0.000835
you would be left with:
# term estimate std.error statistic p.value
# <chr> <dbl> <dbl> <dbl> <dbl>
# 1 s1 age 0.0419 0.0403 1.04 0.298
# 2 s2 age -0.101 0.0806 -1.26 0.208
# 3 s3 age 0.569 0.388 1.47 0.142
# 4 s1 meal.cal -0.000974 0.00155 -0.631 0.528
# 5 s2 meal.cal 0.00751 0.0115 0.655 0.512
# 6 s3 meal.cal -0.0217 0.0358 -0.607 0.544
# 7 s4 meal.cal 0.0614 0.123 0.501 0.616
# 8 s5 meal.cal -0.0775 0.163 -0.475 0.634
# 9 sex -0.552 0.195 -2.83 0.00465
# 10 ph.ecog 0.440 0.132 3.34 0.000835
etc...
My attempt so far gets me some of the way but I am not sure of the best way to deal with the ', '' (note the first term does not contain a ') etc.:
output %>%
mutate(rcs_indicator = str_detect(term, fixed("rcs(")),
term = str_replace_all(term, "rcs\\(.+?\\)", ""))
# term estimate std.error statistic p.value rcs_indicator
# <chr> <dbl> <dbl> <dbl> <dbl> <lgl>
# 1 age -0.00306 0.0219 -0.140 0.889 TRUE
# 2 age' 0.0154 0.0261 0.592 0.554 TRUE
# 3 sex -0.525 0.192 -2.74 0.00620 FALSE
# 4 ph.ecog 0.421 0.131 3.22 0.00128 FALSE
# 5 meal.cal -0.000416 0.00104 -0.400 0.689 TRUE
# 6 meal.cal' 0.00118 0.00232 0.509 0.611 TRUE
# 7 meal.cal'' -0.00659 0.0114 -0.577 0.564 TRUE
It might be useful to just work with the terms I need to change directly:
unique(str_subset(output$term, fixed("rcs(")) %>%
str_replace_all("'", ""))
# [1] "rcs(age, 3)age" "rcs(meal.cal, 4)meal.cal"
I feel there is a way to do this in a simpler way than the steps I am doing.
Any suggestions?
Thanks
This one is clunky but it should work:
library(dplyr)
library(stringr)
output %>%
group_by(group =str_extract(term, 'rcs\\(.')) %>%
mutate(row = row_number()) %>%
mutate(term = str_replace_all(term, 'rcs\\(', paste0("s",row, " "))) %>%
mutate(term = ifelse(str_detect(term, 's\\d'),
str_extract(term, '.\\d\\s.*\\s'), term)) %>%
mutate(term = str_trim(term)) %>%
mutate(term = str_replace_all(term, '\\,', '')) %>%
ungroup() %>%
select(-c(group, row))
term estimate std.error statistic p.value
<chr> <dbl> <dbl> <dbl> <dbl>
1 s1 age -0.00306 0.0219 -0.140 0.889
2 s2 age 0.0154 0.0261 0.592 0.554
3 sex -0.525 0.192 -2.74 0.00620
4 ph.ecog 0.421 0.131 3.22 0.00128
5 s1 meal.cal -0.000416 0.00104 -0.400 0.689
6 s2 meal.cal 0.00118 0.00232 0.509 0.611
7 s3 meal.cal -0.00659 0.0114 -0.577 0.564
This is also less elegant than desired, but should work for multiple knots
output %>%
mutate(is_spline = grepl("^rcs\\(.*?, \\d\\)", term),
n_term = str_count(term, "'") + 1,
pre = ifelse(is_spline, paste0('s', n_term, ' '), ""),
term = paste0(pre, gsub("(^rcs\\(.*?, \\d\\))|(\\'+$)", "", term))) %>%
select(-is_spline, -n_term, -pre)
#> # A tibble: 7 x 5
#> term estimate std.error statistic p.value
#> <chr> <dbl> <dbl> <dbl> <dbl>
#> 1 s1 age -0.00306 0.0219 -0.140 0.889
#> 2 s2 age 0.0154 0.0261 0.592 0.554
#> 3 sex -0.525 0.192 -2.74 0.00620
#> 4 ph.ecog 0.421 0.131 3.22 0.00128
#> 5 s1 meal.cal -0.000416 0.00104 -0.400 0.689
#> 6 s2 meal.cal 0.00118 0.00232 0.509 0.611
#> 7 s3 meal.cal -0.00659 0.0114 -0.577 0.564

Combine predicted value of probit model into original dataframe

I have the data like this:
df <- tibble::tibble(
id = rep(c(1:50), each = 5),
y = runif(250,min = 0, max = 1),
x1 = rnorm(250, mean = 0, sd=1),
x2 = rnorm(250, mean = 0, sd=1),
x3 = rnorm(250, mean = 0, sd=1),
x4 = rnorm(250, mean = 0, sd=1),
x5 = rnorm(250, mean = 0, sd=1),
) %>%
group_by(id) %>%
mutate(year = rep(c(2001:2005)))
I would like to estimate the probit model for every year to get (1)coefficient estimates,and (2) predicted value of y, and (3) number of observations used to estimate the model:
probit_model <- function(df) {
glm (y ~ x1 + x2 + x3 + x4+ x5,
family = binomial(link = "probit"),
data = df)
}
Do you know how we can get the coefficient estimates, predicted value for every year and then combine them with the original data (that is df) here? I know what we can do with OLS model (by using map function to estimate for multiple models). But I do not know how to do with probit regression.
Thank you so much.
I think you need to do this, I used this post as reference.
library(dplyr)
df <- tibble::tibble(
id = rep(c(1:50), each = 5),
y = runif(250,min = 0, max = 1),
x1 = rnorm(250, mean = 0, sd=1),
x2 = rnorm(250, mean = 0, sd=1),
x3 = rnorm(250, mean = 0, sd=1),
x4 = rnorm(250, mean = 0, sd=1),
x5 = rnorm(250, mean = 0, sd=1),
) %>%
group_by(id) %>%
mutate(year = rep(c(2001:2005)))
fitted_models = df %>% group_by(year) %>% do(model = glm(y ~ x1 + x2 + x3 + x4+ x5,
family = binomial(link = "probit"),
data = .))
#fitted_models$year
#fitted_models$model[1]
fitted_models %>% summarise(broom::tidy(model))
## A tibble: 30 x 5
#term estimate std.error statistic p.value
#<chr> <dbl> <dbl> <dbl> <dbl>
# 1 (Intercept) -0.160 0.187 -0.856 0.392
#2 x1 0.0860 0.230 0.375 0.708
#3 x2 0.0657 0.187 0.351 0.725
#4 x3 0.0472 0.160 0.296 0.767
#5 x4 0.216 0.191 1.13 0.257
#6 x5 -0.159 0.263 -0.604 0.546
#7 (Intercept) -0.0792 0.182 -0.434 0.664
#8 x1 0.0314 0.170 0.185 0.853
#9 x2 -0.0320 0.194 -0.164 0.869
#10 x3 0.167 0.218 0.763 0.445
## ... with 20 more rows
fitted_models %>% summarise(broom::glance(model))
## A tibble: 5 x 8
#null.deviance df.null logLik AIC BIC deviance df.residual nobs
#<dbl> <int> <dbl> <dbl> <dbl> <dbl> <int> <int>
# 1 21.7 49 -32.5 77.0 88.5 19.7 44 50
#2 16.4 49 -33.4 78.8 90.3 15.7 44 50
#3 15.5 49 -34.5 81.1 92.5 15.2 44 50
#4 16.6 49 -32.4 76.7 88.2 15.0 44 50
#5 19.6 49 -33.3 78.6 90.0 19.1 44 50
fitted_models %>% summarise(broom::augment(model, type.predict = "response"))
## A tibble: 250 x 12
#y x1 x2 x3 x4 x5 .fitted .resid .std.resid .hat .sigma .cooksd
#<dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 0.819 0.0246 0.0176 0.280 0.192 0.840 0.407 0.846 0.875 0.0665 0.664 0.00894
#2 0.0418 1.41 0.297 1.15 -1.41 0.347 0.372 -0.792 -0.853 0.137 0.665 0.0144
#3 0.119 -0.265 -0.158 -1.37 -2.48 -0.504 0.237 -0.300 -0.327 0.156 0.676 0.00284
#4 0.0282 -0.836 -0.442 -1.63 0.506 0.910 0.355 -0.808 -0.858 0.114 0.665 0.0112
#5 0.893 -0.481 -0.384 -0.974 0.897 -0.662 0.510 0.819 0.850 0.0703 0.665 0.00792
#6 0.865 0.417 -0.0233 0.841 -0.268 -0.140 0.451 0.865 0.883 0.0395 0.664 0.00494
#7 0.809 1.30 -0.469 1.01 -0.0913 -0.106 0.486 0.669 0.702 0.0921 0.669 0.00778
#8 0.0220 0.119 -0.580 -0.533 -1.09 0.0142 0.326 -0.780 -0.801 0.0522 0.666 0.00406
#9 0.722 0.194 -1.50 -0.395 1.65 -0.868 0.592 0.271 0.297 0.167 0.676 0.00281
#10 0.131 1.24 0.600 1.14 -1.17 0.370 0.392 -0.579 -0.618 0.122 0.671 0.00756
## ... with 240 more rows
A similar answer to #cdcarrion's, from the same post, but using map (a slightly newer approach than do()):
fit the models
library(broom)
models <- (df
%>% group_by(year)
%>% nest()
%>% mutate(model = map(data, glm,
formula = y ~ x1 + x2 + x3 + x4+ x5,
family = binomial(link = "probit")))
)
get coefficients
coefs <- (models
%>% mutate(cc = map(model, tidy))
%>% select(year, cc)
%>% unnest(cols = cc)
)
get predictions
preds <- (models
%>% mutate(aug = map(model, augment, type.predict = "response"))
%>% select(year, aug)
%>% unnest(cols = aug)
%>% select(year:x5, .fitted)
)

Performing a linear model in R of a single response with a single predictor from a large dataframe and repeat for each column

It might not be very clear from the title but what I wish to do is:
I have a dataframe df with, say, 200 columns and the first 80 columns are response variables (y1, y2, y3, ...) and the rest of 120 are predictors (x1, x2, x3, ...).
I wish to compute a linear model for each pair – lm(yi ~ xi, data = df).
Many problems and solutions I have looked through online have a either a fixed response vs many predictors or the other way around, using lapply() and its related functions.
Could anyone who is familiar with it point me to the right step?
use tidyverse
library(tidyverse)
library(broom)
df <- mtcars
y <- names(df)[1:3]
x <- names(df)[4:7]
result <- expand_grid(x, y) %>%
rowwise() %>%
mutate(frm = list(reformulate(x, y)),
model = list(lm(frm, data = df)))
result$model <- purrr::set_names(result$model, nm = paste0(result$y, " ~ ", result$x))
result$model[1:2]
#> $`mpg ~ hp`
#>
#> Call:
#> lm(formula = frm, data = df)
#>
#> Coefficients:
#> (Intercept) hp
#> 30.09886 -0.06823
#>
#>
#> $`cyl ~ hp`
#>
#> Call:
#> lm(formula = frm, data = df)
#>
#> Coefficients:
#> (Intercept) hp
#> 3.00680 0.02168
map_df(result$model, tidy)
#> # A tibble: 24 x 5
#> term estimate std.error statistic p.value
#> <chr> <dbl> <dbl> <dbl> <dbl>
#> 1 (Intercept) 30.1 1.63 18.4 6.64e-18
#> 2 hp -0.0682 0.0101 -6.74 1.79e- 7
#> 3 (Intercept) 3.01 0.425 7.07 7.41e- 8
#> 4 hp 0.0217 0.00264 8.23 3.48e- 9
#> 5 (Intercept) 21.0 32.6 0.644 5.25e- 1
#> 6 hp 1.43 0.202 7.08 7.14e- 8
#> 7 (Intercept) -7.52 5.48 -1.37 1.80e- 1
#> 8 drat 7.68 1.51 5.10 1.78e- 5
#> 9 (Intercept) 14.6 1.58 9.22 2.93e-10
#> 10 drat -2.34 0.436 -5.37 8.24e- 6
#> # ... with 14 more rows
map_df(result$model, glance)
#> # A tibble: 12 x 12
#> r.squared adj.r.squared sigma statistic p.value df logLik AIC BIC
#> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 0.602 0.589 3.86 45.5 1.79e- 7 1 -87.6 181. 186.
#> 2 0.693 0.683 1.01 67.7 3.48e- 9 1 -44.6 95.1 99.5
#> 3 0.626 0.613 77.1 50.1 7.14e- 8 1 -183. 373. 377.
#> 4 0.464 0.446 4.49 26.0 1.78e- 5 1 -92.4 191. 195.
#> 5 0.490 0.473 1.30 28.8 8.24e- 6 1 -52.7 111. 116.
#> 6 0.504 0.488 88.7 30.5 5.28e- 6 1 -188. 382. 386.
#> 7 0.753 0.745 3.05 91.4 1.29e-10 1 -80.0 166. 170.
#> 8 0.612 0.599 1.13 47.4 1.22e- 7 1 -48.3 103. 107.
#> 9 0.789 0.781 57.9 112. 1.22e-11 1 -174. 355. 359.
#> 10 0.175 0.148 5.56 6.38 1.71e- 2 1 -99.3 205. 209.
#> 11 0.350 0.328 1.46 16.1 3.66e- 4 1 -56.6 119. 124.
#> 12 0.188 0.161 114. 6.95 1.31e- 2 1 -196. 398. 402.
#> # ... with 3 more variables: deviance <dbl>, df.residual <int>, nobs <int>
Created on 2020-12-11 by the reprex package (v0.3.0)

How to extract confidence intervals from multiple regression models?

I am extracting the regression results for two different groups as shown in this example below. In the temp data.frame i get the estimate, std.error, statistic and p-value. However, i don't get the confidence intervals. Is there a simple way to extract them as well?
df <- tibble(
a = rnorm(1000),
b = rnorm(1000),
c = rnorm(1000),
d = rnorm(1000),
group = rbinom(n=1000, size=1, prob=0.5)
)
df$group = as.factor(df$group)
temp <- df %>%
group_by(group) %>%
do(model1 = tidy(lm(a ~ b + c + d, data = .))) %>%
gather(model_name, model, -group) %>%
unnest()
You are doing tidy on a lm object. If you check the help page, there is an option to include the confidence interval, conf.int=TRUE:
temp <- df %>%
group_by(group) %>%
do(model1 = tidy(lm(a ~ b + c + d, data = . ), conf.int=TRUE)) %>%
gather(model_name, model, -group) %>%
unnest()
# A tibble: 8 x 9
group model_name term estimate std.error statistic p.value conf.low conf.high
<fct> <chr> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 0 model1 (Int… 0.0616 0.0423 1.46 0.146 -0.0215 0.145
2 0 model1 b 0.00178 0.0421 0.0424 0.966 -0.0808 0.0844
3 0 model1 c -0.00339 0.0431 -0.0787 0.937 -0.0881 0.0813
4 0 model1 d -0.0537 0.0445 -1.21 0.228 -0.141 0.0337
5 1 model1 (Int… -0.0185 0.0454 -0.408 0.683 -0.108 0.0707
6 1 model1 b 0.00128 0.0435 0.0295 0.976 -0.0842 0.0868
7 1 model1 c -0.0972 0.0430 -2.26 0.0244 -0.182 -0.0126
8 1 model1 d 0.0734 0.0457 1.60 0.109 -0.0165 0.163
If your version of dplyr is higher than 1.0.0, you can use:
df %>%
group_by(group) %>%
summarise(tidy(lm(a ~ b + c + d), conf.int = TRUE), .groups = "drop")
#> # A tibble: 8 x 8
#> group term estimate std.error statistic p.value conf.low conf.high
#> <fct> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 0 (Intercept) 0.0734 0.0468 1.57 0.117 -0.0185 0.165
#> 2 0 b -0.101 0.0461 -2.19 0.0292 -0.191 -0.0102
#> 3 0 c 0.0337 0.0464 0.726 0.468 -0.0575 0.125
#> 4 0 d -0.101 0.0454 -2.23 0.0265 -0.190 -0.0118
#> 5 1 (Intercept) -0.0559 0.0468 -1.20 0.232 -0.148 0.0360
#> 6 1 b -0.0701 0.0474 -1.48 0.140 -0.163 0.0230
#> 7 1 c 0.0319 0.0477 0.668 0.504 -0.0619 0.126
#> 8 1 d -0.0728 0.0466 -1.56 0.119 -0.164 0.0188

Can I overlook a missing variable in a summing part of a function?

This is a shortened version of my real df. I have a function (called: calc) which creates a new variable called 'total', for simplicity this adds up three variables: a, b, c. When I add a dataframe, to that function, that does not feature one variable (say c) so only has a & b, the function falls over. Is there a 'function' / simple way that counts the variables regardless if they are missing?
calc <- function(x) {x %>% mutate(total = a + b + c)}
data.2 has two columns a & b with many rows of values, but when running that in the function it cannot find c so does not calculate.
new.df <- calc(data.2)
Many thanks.
If you want to perform rowwise sum or mean they have na.rm argument which you can use to ignore NA values.
library(dplyr)
calc <- function(x) {x %>% mutate(total = rowSums(select(., a:c), na.rm = TRUE))}
In general case if you are not able to find a function which gives you an out-of-box solution you can replace NA values with 0 maybe and then perform the operation that you want to perform.
calc <- function(x) {
x %>%
mutate(across(a:c, tidyr::replace_na, 0),
total = a + b + c)
}
You can use rowwise() and c_across() with any_of() (or any other tidyselect function) from dplyr (>= 1.0.0).
library(dplyr)
df <- data.frame(a = rnorm(10), b = rnorm(10))
dfc <- data.frame(a = rnorm(10), b = rnorm(10), c = rnorm(10))
calc <- function(x) {
x %>%
rowwise() %>%
mutate(total = sum(c_across(any_of(c("a", "b", "c"))))) %>%
ungroup()
}
calc(df)
#> # A tibble: 10 x 3
#> a b total
#> <dbl> <dbl> <dbl>
#> 1 -0.884 0.851 -0.0339
#> 2 -1.56 -0.464 -2.02
#> 3 -0.884 0.815 -0.0689
#> 4 -1.46 -0.259 -1.71
#> 5 0.211 -0.528 -0.317
#> 6 1.85 0.190 2.04
#> 7 -1.31 -0.921 -2.23
#> 8 0.450 0.394 0.845
#> 9 -1.14 0.428 -0.714
#> 10 -1.11 0.417 -0.698
calc(dfc)
#> # A tibble: 10 x 4
#> a b c total
#> <dbl> <dbl> <dbl> <dbl>
#> 1 -0.0868 0.632 1.81 2.36
#> 2 0.568 -0.523 0.240 0.286
#> 3 -0.0325 0.377 -0.437 -0.0921
#> 4 0.660 0.456 1.28 2.39
#> 5 -0.123 1.75 -1.03 0.599
#> 6 0.641 1.39 0.902 2.93
#> 7 0.266 0.520 0.904 1.69
#> 8 -1.53 0.319 0.439 -0.776
#> 9 0.942 0.468 -1.69 -0.277
#> 10 0.254 -0.600 -0.196 -0.542
If you want to be able to generalize beyond those 3 variables you can use any tidyselect methodology.
df <- data.frame(a = rnorm(10), b = rnorm(10))
dfc <- data.frame(a = rnorm(10), b = rnorm(10), c = rnorm(10))
calc <- function(x) {
x %>%
rowwise() %>%
mutate(total = sum(c_across(everything()))) %>%
ungroup()
}
calc(df)
#> # A tibble: 10 x 3
#> a b total
#> <dbl> <dbl> <dbl>
#> 1 0.775 1.17 1.95
#> 2 -1.05 1.21 0.155
#> 3 2.07 -0.264 1.81
#> 4 1.11 0.793 1.90
#> 5 -0.700 -0.216 -0.916
#> 6 -1.04 -1.03 -2.07
#> 7 -0.525 1.60 1.07
#> 8 0.354 0.828 1.18
#> 9 0.126 0.110 0.236
#> 10 -0.0954 -0.603 -0.698
calc(dfc)
#> # A tibble: 10 x 4
#> a b c total
#> <dbl> <dbl> <dbl> <dbl>
#> 1 -0.616 0.767 0.0462 0.196
#> 2 -0.370 -0.538 -0.186 -1.09
#> 3 0.337 1.11 -0.700 0.751
#> 4 -0.993 -0.531 -0.984 -2.51
#> 5 0.0538 1.50 -0.0808 1.47
#> 6 -0.907 -1.54 -0.734 -3.18
#> 7 -1.65 -0.242 1.43 -0.455
#> 8 -0.166 0.447 -0.281 -0.000524
#> 9 0.0637 -0.0185 0.754 0.800
#> 10 1.81 -1.09 -2.15 -1.42
Created on 2020-09-10 by the reprex package (v0.3.0)

Resources