add_column applies to a matrix - r

I have a 2-by-2 tibble, and use add_column to add a 2-by-2 matrix. I want to have a resulting 4-by-4 tibble, but it ends up with a 2-by-3 tibble.
Here is a sample code:
A <- tibble( x = c(-1,1), y = c(-2,2))
A <- A %>% add_column( z = matrix( rnorm(4), 2,2 ) )
and dim(A) returns 2 3.
So my question is how this resulting matrix can be 2-by-4?

Maybe you can try
> A %>% add_column(as_tibble(matrix(rnorm(4), 2, 2)))
# A tibble: 2 x 4
x y V1 V2
<dbl> <dbl> <dbl> <dbl>
1 -1 -2 -0.647 -0.982
2 1 2 -0.264 -1.25
If you want to add columns with names starting with z, we can use
A %>% add_column(setNames(as_tibble(matrix(rnorm(4), 2, 2)),paste0("z",1:2)))
or (thank akrun's comment)
A %>% add_column(as_tibble(matrix(rnorm(4), 2, 2, dimnames = list(NULL, c('z1', 'z2')))))

We can flatten to regular columns from the matrix column 'z' with do.call and data.frame call
library(dplyr)
library(tibble)
A %>%
add_column( z = matrix( rnorm(4), 2,2 ) ) %>%
do.call(data.frame, .) %>%
as_tibble
-output
# A tibble: 2 x 4
# x y z.1 z.2
# <dbl> <dbl> <dbl> <dbl>
#1 -1 -2 -1.41 -1.93
#2 1 2 1.80 1.01
Or another option is to split the matrix into a list with asplit and use unnest_wider
library(tidyr)
A %>%
add_column( z = matrix( rnorm(4), 2,2 ) ) %>%
mutate(z = asplit(z, 2)) %>%
unnest_wider(c(z))

Related

Apply the same function with multiple columns as inputs to multiple columns in R with tidyverse

As an example, I have the following data frame:
df <- data.frame(a1=1,a2=2,a3=3,b1=1,b2=2,b3=3)
I have a function:
fn <- function(x,y,z) x^y+(z-x)^(y-x)
I want the following:
df <- df %>% mutate(a=fn(a1,a2,a3),b=fn(b1,b2,b3))
The problem is, I have tons of triplets in my dataset, so it is not ideal to write them out one by one.
Here are base R options using:
split.default + lapply + do.call
cbind(
df,
lapply(
split.default(df, gsub("\\d+", "", names(df))),
function(x) do.call(fn, unname(x))
)
)
reshape + lapply + do.call
cbind(
df,
lapply(
subset(
reshape(
setNames(df, gsub("(\\d+)$", "\\.\\1", names(df))),
direction = "long",
varying = 1:length(df)
),
select = -c(time, id)
),
function(x) do.call(fn, as.list(x))
)
)
Output
a1 a2 a3 b1 b2 b3 a b
1 1 2 3 1 2 3 3 3
I would convert df to long format then use lag to create 3 columns then apply fn() on them
library(tidyverse)
df_long <- df %>%
pivot_longer(everything(),
names_to = c(".value", "set"),
names_pattern = "(.)(.)")
df_longer <- df_long %>%
pivot_longer(-c(set),
names_to = "key",
values_to = "val") %>%
arrange(key)
df_longer
#> # A tibble: 6 x 3
#> set key val
#> <chr> <chr> <dbl>
#> 1 1 a 1
#> 2 2 a 2
#> 3 3 a 3
#> 4 1 b 1
#> 5 2 b 2
#> 6 3 b 3
lag then apply fn(), keep only non-NA val_fn
df_longer <- df_longer %>%
group_by(key) %>%
mutate(val_lag1 = lag(val, n = 1),
val_lag2 = lag(val, n = 2)) %>%
mutate(val_fn = fn(val_lag2, val_lag1, val)) %>%
filter(!is.na(val_fn))
df_longer
#> # A tibble: 2 x 6
#> # Groups: key [2]
#> set key val val_lag1 val_lag2 val_fn
#> <chr> <chr> <dbl> <dbl> <dbl> <dbl>
#> 1 3 a 3 2 1 3
#> 2 3 b 3 2 1 3
Created on 2020-12-03 by the reprex package (v0.3.0)
I think it would be easier/shorter to combine columns into their separate group and apply the function to each column.
library(dplyr)
library(tidyr)
df %>%
pivot_longer(cols = everything(),
names_to = '.value',
names_pattern = '([a-z]+)') %>%
summarise(across(.fns = ~do.call(fn, as.list(.)))) -> result
result
# a b
# <dbl> <dbl>
#1 3 3
You can bind the result to your original dataset if needed.
bind_cols(df, result)
# a1 a2 a3 b1 b2 b3 a b
#1 1 2 3 1 2 3 3 3

How to use vector of column names as input into dplyr::group_by()?

I want to create a function based on dplyr that performs certain operations on subsets of data. The subsets are defined by values of one or more key columns in the dataset. When only one column is used to identify subsets, my code works fine:
set.seed(1)
df <- tibble(
g1 = c(1, 1, 2, 2, 2),
g2 = c(1, 2, 1, 2, 1),
a = sample(5)
)
group_key <- "g1"
aggregate <- function(df, by) {
df %>% group_by(!!sym(by)) %>% summarize(a = mean(a))
}
aggregate(df, by = group_key)
This works as expected and returns something like this:
# A tibble: 2 x 2
g1 a
<dbl> <dbl>
1 1 1.5
2 2 4
Unfortunately everything breaks down if I change group_key:
group_key <- c("g1", "g2")
aggregate(df, by = group_key)
I get an error: Only strings can be converted to symbols, which I think comes from rlang::sym(). Replacing it with syms() does not work since I get a list of names, on which group_by() chokes.
Any suggestions would be appreciated!
You need to use the unquote-splice operator !!!:
aggregate <- function(df, by) {
df %>% group_by(!!!syms(by)) %>% summarize(a = mean(a))
}
group_key <- c("g1", "g2")
aggregate(df, by = group_key)
## A tibble: 4 x 3
## Groups: g1 [2]
# g1 g2 a
# <dbl> <dbl> <dbl>
#1 1 1 1
#2 1 2 4
#3 2 1 2.5
#4 2 2 5
Alternatively, you can use dplyr::group_by_at:
agg <- function(df, by) {
require(dplyr)
df %>% group_by_at(vars(one_of(by))) %>% summarize(a = mean(a))}
group_key <- "g1"
group_keys <- c("g1","g2")
agg(df, by = group_key)
#> # A tibble: 2 x 2
#> g1 a
#> <dbl> <dbl>
#> 1 1 2.5
#> 2 2 3.33
agg(df, by = group_keys)
#> # A tibble: 4 x 3
#> # Groups: g1 [2]
#> g1 g2 a
#> <dbl> <dbl> <dbl>
#> 1 1 1 1
#> 2 1 2 4
#> 3 2 1 2.5
#> 4 2 2 5
Update with dplyr 1.0.0
The new across() allows tidyselect functions like all_of which replaces the quote-unqote procedure of NSE. The code looks a bit simpler with that:
aggregate <- function(df, by) {
df %>%
group_by(across(all_of(by))) %>%
summarize(a = mean(a))
}
df %>% aggregate(group_key)

Multiple paired t-tests on multiple variables simultaneously using dplyr/tidyverse

Assume a data structure like this:
ID testA_wave1 testA_wave2 testA_wave3 testB_wave1 testB_wave2 testB_wave3
1 1 3 2 3 6 5 3
2 2 4 4 4 3 6 6
3 3 10 2 1 4 4 4
4 4 5 3 12 2 7 4
5 5 5 3 9 2 4 2
6 6 10 0 2 6 6 5
7 7 6 8 4 6 8 3
8 8 1 5 4 5 6 0
9 9 3 2 7 8 4 4
10 10 4 9 5 11 8 8
What I want to achieve is to calculate a paired t-test for every test separately (in this case meaning testA and testB, but in real-life I have much more tests). I want to do it that way that I compare the first wave of a given test with every other subsequent wave of the same test (meaning testA_wave1 vs testA_wave2 and testA_wave1 vs testA_wave3 in the case of testA).
This way, I was able to achieve it:
df %>%
gather(variable, value, -ID) %>%
mutate(wave_ID = paste0("wave", parse_number(variable)),
variable = ifelse(grepl("testA", variable), "testA",
ifelse(grepl("testB", variable), "testB", NA_character_))) %>%
group_by(wave_ID, variable) %>%
summarise(value = list(value)) %>%
spread(wave_ID, value) %>%
group_by(variable) %>%
mutate(p_value_w1w2 = t.test(unlist(wave1), unlist(wave2), paired = TRUE)$p.value,
p_value_w1w3 = t.test(unlist(wave1), unlist(wave3), paired = TRUE)$p.value) %>%
select(variable, matches("(p_value)"))
variable p_value_w1w2 p_value_w1w3
<chr> <dbl> <dbl>
1 testA 0.664 0.921
2 testB 0.146 0.418
However, I would like to see different/more elegant solutions that give similar results. I'm looking mostly for dplyr/tidyverse solutions, but if there is a completely different way to achieve it, I'm not against it.
Sample data:
set.seed(123)
df <- data.frame(ID = 1:20,
testA_wave1 = round(rnorm(20, 5, 3), 0),
testA_wave2 = round(rnorm(20, 5, 3), 0),
testA_wave3 = round(rnorm(20, 5, 3), 0),
testB_wave1 = round(rnorm(20, 5, 3), 0),
testB_wave2 = round(rnorm(20, 5, 3), 0),
testB_wave3 = round(rnorm(20, 5, 3), 0))
Since dplyr 0.8.0 we can use group_split to split a dataframe into list of dataframes.
We gather the dataframe and convert it into long format and then separate the names of the column (key) into different columns (test and wave). We then use group_split to split the dataframe into list based on test column. For every dataframe in the list we spread it into wide format and then calculate the t.test values and rbind them into one dataframe using map_dfr.
library(tidyverse)
df %>%
gather(key, value, -ID) %>%
separate(key, c("test", "wave")) %>%
group_split(test) %>% #Previously we had to do split(.$test) here
map_dfr(. %>%
spread(wave, value) %>%
summarise(test = first(test),
p_value_w1w2 = t.test(wave1, wave2, paired = TRUE)$p.value,
p_value_w1w3 = t.test(wave1, wave3, paired = TRUE)$p.value))
# A tibble: 2 x 3
# test p_value_w1w2 p_value_w1w3
# <chr> <dbl> <dbl>
#1 testA 0.664 0.921
#2 testB 0.146 0.418
We manually perform the t-test above as there were only 2 values which needed to be calculated. If there are more number of wave... columns then this could become cumbersome. In such cases we could do
df %>%
gather(key, value, -ID) %>%
separate(key, c("test", "wave")) %>%
group_split(test) %>%
map_dfr(function(data)
data %>%
spread(wave, value) %>%
summarise_at(vars(setdiff(unique(data$wave), "wave1")),
function(x) t.test(.$wave1, x, paired = TRUE)$p.value) %>%
mutate(test = first(data$test)))
# wave2 wave3 test
# <dbl> <dbl> <chr>
#1 0.664 0.921 testA
#2 0.146 0.418 testB
Here it will perform the t-test for every "wave.." column with "wave1" column.
Since you are also open to other solutions, here is an attempt with purely base R solution
sapply(split.default(df[-1], sub("_.*", "", names(df[-1]))), function(x)
c(p_value_w1w2 = t.test(x[[1]], x[[2]],paired = TRUE)$p.value,
p_value_w1w3 = t.test(x[[1]], x[[3]],paired = TRUE)$p.value))
# testA testB
#p_value_w1w2 0.6642769 0.1456059
#p_value_w1w3 0.9209554 0.4184603
We split the columns based on test* and create a list of dataframes and apply t.test on different combinations of columns for each dataframe.
Update 03/16/2022
The tidyverse has evolved and so should this solution.
First I make a simplifying assumption: If we designed the experiment, then we know what the groups are and how many waves we followed them through. If we don't know, then we can extract this information from the column names. See at below.
library("broom")
library("tidyverse")
tests <- c("A", "B")
waves <- 3
comparisons <-
list(
test = tests,
first = 1,
later = seq(2, waves)
) %>%
cross_df()
comparisons
#> # A tibble: 4 × 3
#> test first later
#> <chr> <dbl> <int>
#> 1 A 1 2
#> 2 B 1 2
#> 3 A 1 3
#> 4 B 1 3
Transform the data from wide format to long format.
data <- df %>%
pivot_longer(
-ID,
names_to = "test_wave"
) %>%
extract(
test_wave, c("test", "wave"),
regex = "test(.+)_wave(.+)",
convert = TRUE
)
Then pair the comparisons we want to make with the data we collected. I've added lots of rename statements to make for more readable code but it's not strictly necessary.
comparisons %>%
inner_join(
data,
by = c("test", "first" = "wave")
) %>%
rename(
value.first = value
) %>%
inner_join(
data,
by = c("test", "later" = "wave", "ID")
) %>%
rename(
value.later = value
) %>%
group_by(
test, first, later
) %>%
group_modify(
~ tidy(t.test(.x$value.first, .x$value.later, paired = TRUE))
) %>%
ungroup() %>%
pivot_wider(
id_cols = test,
names_from = later,
names_glue = "wave1_vs_wave{later}",
values_from = p.value
)
#> # A tibble: 2 × 3
#> test wave1_vs_wave2 wave1_vs_wave3
#> <chr> <dbl> <dbl>
#> 1 A 0.664 0.921
#> 2 B 0.146 0.418
Appendix: Extract test names and number of waves from column names.
design <- df %>%
select(starts_with("test")) %>%
colnames() %>%
str_match("test(.+)_wave(.+)")
tests <- unique(design[, 2])
waves <- max(as.integer(design[, 3]))
Created on 2022-03-16 by the reprex package (v2.0.1)
Old solution
Here is one way to do it, using purrr quite a bit.
library("tidyverse")
set.seed(123)
df <- tibble(
ID = 1:20,
testA_wave1 = round(rnorm(20, 5, 3), 0),
testA_wave2 = round(rnorm(20, 5, 3), 0),
testA_wave3 = round(rnorm(20, 5, 3), 0),
testB_wave1 = round(rnorm(20, 5, 3), 0),
testB_wave2 = round(rnorm(20, 5, 3), 0),
testB_wave3 = round(rnorm(20, 5, 3), 0)
)
pvalues <- df %>%
# From wide tibble to long tibble
gather(test, value, -ID) %>%
separate(test, c("test", "wave")) %>%
# Not stricly necessary; will order the waves alphabetically instead
mutate(wave = parse_number(wave)) %>%
inner_join(., ., by = c("ID", "test")) %>%
# If there are two waves w1 and w2,
# we end up with pairs (w1, w1), (w1, w2), (w2, w1) and (w2, w2),
# so filter out to keep the pairing (w1, w2) only
filter(wave.x == 1, wave.x < wave.y) %>%
nest(ID, value.x, value.y) %>%
mutate(pvalue = data %>%
# Perform the test
map(~t.test(.$value.x, .$value.y, paired = TRUE)) %>%
map(broom::tidy) %>%
# Also not strictly necessary; you might want to keep all
# information about the test: estimate, statistic, etc.
map_dbl(pluck, "p.value"))
pvalues
#> # A tibble: 4 x 5
#> test wave.x wave.y data pvalue
#> <chr> <dbl> <dbl> <list> <dbl>
#> 1 testA 1 2 <tibble [20 x 3]> 0.664
#> 2 testA 1 3 <tibble [20 x 3]> 0.921
#> 3 testB 1 2 <tibble [20 x 3]> 0.146
#> 4 testB 1 3 <tibble [20 x 3]> 0.418
pvalues %>%
# Drop the data in order to pivot the table
select(- data) %>%
unite("waves", wave.x, wave.y, sep = ":") %>%
spread(waves, pvalue)
#> # A tibble: 2 x 3
#> test `1:2` `1:3`
#> <chr> <dbl> <dbl>
#> 1 testA 0.664 0.921
#> 2 testB 0.146 0.418
Created on 2019-03-08 by the reprex package (v0.2.1)
To throw in a data.table solution:
library(stringr)
library(data.table)
library(magrittr) ## for the pipe operator
dt_sol <- function(df) {
## create patterns for the melt operation:
## all columns from the same wave should go in one column
grps <- str_extract(names(df)[-1],
"[0-9]+$") %>%
unique() %>%
paste0("wave", ., "$")
grp_names <- sub("\\$", "", grps)
## melt the data table: all test*_wave_i data go into column wave_i
df.m <- melt(df,
measure = patterns(grps),
value.name = grp_names,
variable.name = "test")
## define the names for the new column, we want to extract estimate and p.value
new_cols <- c(outer(c("p.value", "estimate"),
grp_names[-1],
paste, sep = "_"))
## use lapply on .SD which equals to all wave_i columns but the first one
## return estimate and p.value
df.m[,
setNames(unlist(lapply(.SD,
function(col) {
t.test(wave1, col, paired = TRUE)[c("p.value", "estimate")]
}), recursive = FALSE), new_cols),
test, ## group by each test
.SDcols = grp_names[-1]]
}
dt <- copy(df)
setDT(dt)
dt_sol(dt)
# test p.value_wave2 estimate_wave2 p.value_wave3 estimate_wave3
# 1: 1 0.6642769 0.40 0.9209554 -0.1
# 2: 2 0.1456059 -1.45 0.4184603 0.7
Benchmark
Comparing the data.table solution to the tidyverse solution we get an 3-fold speed increase with teh data.tablesolution:
dp_sol <- function(df) {
df %>%
gather(test, value, -ID) %>%
separate(test, c("test", "wave")) %>%
inner_join(., ., by = c("ID", "test")) %>%
filter(wave.x == 1, wave.x < wave.y) %>%
nest(ID, value.x, value.y) %>%
mutate(pvalue = data %>%
map(~t.test(.$value.x, .$value.y, paired = TRUE)) %>%
map(broom::tidy) %>%
map_dbl(pluck, "p.value"))
}
library(microbenchmark)
microbenchmark(dplyr = dp_sol(df),
data.table = dt_sol(dt))
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# dplyr 6.119273 6.897456 7.639569 7.348364 7.996607 14.938182 100 b
# data.table 1.902547 2.307395 2.790910 2.758789 3.133091 4.923153 100 a
With a slightly bigger input:
make_df <- function(nr_tests = 2,
nr_waves = 3,
n_per_wave = 20) {
mat <- cbind(seq(1, n_per_wave),
matrix(round(rnorm(nr_tests * nr_waves * n_per_wave), 0),
nrow = n_per_wave))
c_names <- c(outer(1:nr_waves, 1:nr_tests, function(w, t) glue::glue("test{t}_wave{w}")))
colnames(mat) <- c("ID", c_names)
as.data.frame(mat)
}
df2 <- make_df(100, 100, 10)
dt2 <- copy(df2)
setDT(dt2)
microbenchmark(dplyr = dp_sol(df2),
data.table = dt_sol(dt2)
# Unit: seconds
# expr min lq mean median uq max neval cld
# dplyr 3.469837 3.669819 3.877548 3.821475 3.984518 5.268596 100 b
# data.table 1.018939 1.126244 1.193548 1.173175 1.252855 1.743075 100 a
Using all combinations without replacement:
Just for testA group:
comb <- arrangements::combinations(names(df)[grep("testA",names(df))], k = 2,n = 3,replace = F )
tTest <- function(x, data = df){
ttest <- t.test(x =data[x[1]] , y = data[x[2]])
return(data.frame(var1 = x[1],
var2 = x[2],
t = ttest[["statistic"]][["t"]],
pvalue = ttest[["p.value"]]))
}
result <- apply(comb, 1, tTest, data = df)
Result:
dplyr::bind_rows(result)
var1 var2 t pvalue
1 testA_wave1 testA_wave2 0.5009236 0.6193176
2 testA_wave1 testA_wave3 -0.6426433 0.5243146
3 testA_wave2 testA_wave3 -1.1564854 0.2547069
For all groups:
comb <- arrangements::combinations(x = names(df)[-1], k = 2,n = 6, replace = F )
result <- apply(comb, 1, tTest, data = df)
Result:
dplyr::bind_rows(result)
var1 var2 t pvalue
1 testA_wave1 testA_wave2 0.5009236 0.6193176
2 testA_wave1 testA_wave3 -0.6426433 0.5243146
3 testA_wave1 testB_wave1 0.4199215 0.6769510
4 testA_wave1 testB_wave2 -0.3447992 0.7321465
5 testA_wave1 testB_wave3 0.0000000 1.0000000
6 testA_wave2 testA_wave3 -1.1564854 0.2547069
7 testA_wave2 testB_wave1 -0.1070172 0.9153442
8 testA_wave2 testB_wave2 -0.8516264 0.3997630
9 testA_wave2 testB_wave3 -0.5640491 0.5762010
10 testA_wave3 testB_wave1 1.1068781 0.2754186
11 testA_wave3 testB_wave2 0.2966237 0.7683692
12 testA_wave3 testB_wave3 0.7211103 0.4755291
13 testB_wave1 testB_wave2 -0.7874100 0.4360152
14 testB_wave1 testB_wave3 -0.4791735 0.6346043
15 testB_wave2 testB_wave3 0.3865414 0.7013933
To throw another, somewhat more concise, data.table solution into the mix, in which we melt the data into long format:
setDT(df)
x = melt(df[,-1])[, tname := sub('_.+','',variable)][, wave := sub('.+_','',variable)]
x[wave != 'wave1', .(p.value =
t.test(x[tname==test & wave == 'wave1', value], value, paired = TRUE)$p.value),
by = .(test=tname,wave)]
# test wave p.value
# 1: testA wave2 0.6642769
# 2: testA wave3 0.9209554
# 3: testB wave2 0.1456059
# 4: testB wave3 0.4184603

Filter based on several columns with similar naming structure

I would like to filter my tibble on several columns with similar names. Specificall I'd like to compare x with x_new, y with y_new and so on but without specifying the explicit name, but by using the structure in columns names.
I tried to use filter_at, but this is not working as I don't know how to evaluate the formula in the last line properly.
my_df %>%
filter_at(vars(contains("_new")), any_vars(funs({
x <- .
x_name <- quo_name(quo(x))
x_new_name <- str_replace(x_name, "_new", "")
paste(x_name, "!=", x_new_name)
})
))
Data
my_df <- tibble(x = 1:5,
x_new = c(1:4, 1),
y = letters[1:5],
y_new = c(letters[1:3], "a", "e"))
# A tibble: 5 x 4
# x x_new y y_new
# <int> <dbl> <chr> <chr>
# 1 1 1. a a
# 2 2 2. b b
# 3 3 3. c c
# 4 4 4. d a
# 5 5 1. e e
Expected output
# A tibble: 2 x 4
# x x_new y y_new
# <int> <dbl> <chr> <chr>
# 1 4 4. d a
# 2 5 1. e e
We could do this with map. Create a vector of unique names by removing the suffix part of the column names ('nm1'). Loop through the 'nm1', select the columns that matches the column name, reduce it to a single logical vector by checking whether the rows are not equal, then reduce the list of logical vectors to a single logical vector and extract the rows based on that
library(tidyverse)
nm1 <- unique(sub("_.*", "", names(my_df)))
map(nm1, ~ my_df %>%
select_at(vars(matches(.x))) %>%
reduce(`!=`)) %>%
reduce(`|`) %>%
magrittr::extract(my_df, ., )
# x x_new y y_new
# <int> <dbl> <chr> <chr>
#1 4 4 d a
#2 5 1 e e
Another option is to create an expression and then evaluate
library(rlang)
nm1 <- names(my_df) %>%
split(sub("_.*", "", .)) %>%
map(~ paste(.x, collapse=" != ") %>%
paste0("(", ., ")")) %>%
reduce(paste, sep = "|")
my_df %>%
filter(!! parse_expr(nm1))
# A tibble: 2 x 4
# x x_new y y_new
# <int> <dbl> <chr> <chr>
#1 4 4 d a
#2 5 1 e e

Passing parameters into function that uses dplyr

I have the following function to describe a variable
library(dplyr)
describe = function(.data, variable){
args <- as.list(match.call())
evalue = eval(args$variable, .data)
summarise(.data,
'n'= length(evalue),
'mean' = mean(evalue),
'sd' = sd(evalue))
}
I want to use dplyr for describing the variable.
set.seed(1)
df = data.frame(
'g' = sample(1:3, 100, replace=T),
'x1' = rnorm(100),
'x2' = rnorm(100)
)
df %>% describe(x1)
# n mean sd
# 1 100 -0.01757949 0.9400179
The problem is that when I try to apply the same descrptive using function group_by the describe function is not applied in each group
df %>% group_by(g) %>% describe(x1)
# # A tibble: 3 x 4
# g n mean sd
# <int> <int> <dbl> <dbl>
# 1 1 100 -0.01757949 0.9400179
# 2 2 100 -0.01757949 0.9400179
# 3 3 100 -0.01757949 0.9400179
How would you change the function to obtain what is desired using an small number of modifications?
You need tidyeval:
describe = function(.data, variable){
evalue = enquo(variable)
summarise(.data,
'n'= length(!!evalue),
'mean' = mean(!!evalue),
'sd' = sd(!!evalue))
}
df %>% group_by(g) %>% describe(x1)
# A tibble: 3 x 4
g n mean sd
<int> <int> <dbl> <dbl>
1 1 27 -0.23852862 1.0597510
2 2 38 0.11327236 0.8470885
3 3 35 0.01079926 0.9351509
The dplyr vignette 'Programming with dplyr' has a thorough description of using enquo and !!
Edit:
In response to Axeman's comment, I'm not 100% why the group_by and describe does not work here.
However, using debugonce with the funciton in it's original form
debugonce(describe)
df %>% group_by(g) %>% describe(x1)
one can see that evalue is not grouped and is just a numeric vector of length 100.
Base NSE appears to work, too:
describe <- function(data, var){
var_q <- substitute(var)
data %>%
summarise(n = n(),
mean = mean(eval(var_q)),
sd = sd(eval(var_q)))
}
df %>% describe(x1)
n mean sd
1 100 -0.1266289 1.006795
df %>% group_by(g) %>% describe(x1)
# A tibble: 3 x 4
g n mean sd
<int> <int> <dbl> <dbl>
1 1 33 -0.1379206 1.107412
2 2 29 -0.4869704 0.748735
3 3 38 0.1581745 1.020831

Resources