I've been trying to do a little testing of the most efficient ways to replace NA's in dataframes.
I started with a comparison of of NA's to 0's replacement solutions on a 1 million row, 12 column dataset.
Throwing all the pipe capable ones into microbenchmark I got the following results.
Question 1: Is there a way to test the subset left assignment statements (e.g.:df1[is.na(df1)] <- 0) inside the benchmark function?
library(dplyr)
library(tidyr)
library(microbenchmark)
set.seed(24)
df1 <- as.data.frame(matrix(sample(c(NA, 1:5), 1e6 *12, replace=TRUE),
dimnames = list(NULL, paste0("var", 1:12)), ncol=12))
op <- microbenchmark(
mut_all_ifelse = df1 %>% mutate_all(funs(ifelse(is.na(.), 0, .))),
mut_at_ifelse = df1 %>% mutate_at(funs(ifelse(is.na(.), 0, .)), .cols = c(1:12)),
# df1[is.na(df1)] <- 0 would sit here, but I can't make it work inside this function
replace = df1 %>% replace(., is.na(.), 0),
mut_all_replace = df1 %>% mutate_all(funs(replace(., is.na(.), 0))),
mut_at_replace = df1 %>% mutate_at(funs(replace(., is.na(.), 0)), .cols = c(1:12)),
replace_na = df1 %>% replace_na(list(var1 = 0, var2 = 0, var3 = 0, var4 = 0, var5 = 0, var6 = 0, var7 = 0, var8 = 0, var9 = 0, var10 = 0, var11 = 0, var12 = 0)),
times = 1000L
)
print(op) #standard data frame of the output
Unit: milliseconds
expr min lq mean median uq max neval
mut_all_ifelse 769.87848 844.5565 871.2476 856.0941 895.4545 1274.5610 1000
mut_at_ifelse 713.48399 847.0322 875.9433 861.3224 899.7102 1006.6767 1000
replace 258.85697 311.9708 334.2291 317.3889 360.6112 455.7596 1000
mut_all_replace 96.81479 164.1745 160.6151 167.5426 170.5497 219.5013 1000
mut_at_replace 96.23975 166.0804 161.9302 169.3984 172.7442 219.0359 1000
replace_na 103.04600 161.2746 156.7804 165.1649 168.3683 210.9531 1000
boxplot(op) #boxplot of output
library(ggplot2) #nice log plot of the output
qplot(y=time, data=op, colour=expr) + scale_y_log10()
To test the subset assignment operator I had run these tests originally.
set.seed(24)
> Book1 <- as.data.frame(matrix(sample(c(NA, 1:5), 1e8 *12, replace=TRUE),
+ dimnames = list(NULL, paste0("var", 1:12)), ncol=12))
> system.time({
+ Book1 %>% mutate_all(funs(ifelse(is.na(.), 0, .))) })
user system elapsed
52.79 24.66 77.45
>
> system.time({
+ Book1 %>% mutate_at(funs(ifelse(is.na(.), 0, .)), .cols = c(1:12)) })
user system elapsed
52.74 25.16 77.91
>
> system.time({
+ Book1[is.na(Book1)] <- 0 })
user system elapsed
16.65 7.86 24.51
>
> system.time({
+ Book1 %>% replace_na(list(var1 = 0, var2 = 0, var3 = 0, var4 = 0, var5 = 0, var6 = 0, var7 = 0, var8 = 0, var9 = 0,var10 = 0, var11 = 0, var12 = 0)) })
user system elapsed
3.54 2.13 5.68
>
> system.time({
+ Book1 %>% mutate_at(funs(replace(., is.na(.), 0)), .cols = c(1:12)) })
user system elapsed
3.37 2.26 5.63
>
> system.time({
+ Book1 %>% mutate_all(funs(replace(., is.na(.), 0))) })
user system elapsed
3.33 2.26 5.58
>
> system.time({
+ Book1 %>% replace(., is.na(.), 0) })
user system elapsed
3.42 1.09 4.51
In these tests the base replace() comes in first.
In the benchmarking trials, the replace falls farther back in the ranks while the tidyr replace_na() wins (by a nose)
Running the singular tests repeatedly and on different shapes and sizes of data frames always finds the base replace() in the lead.
Question 2: How could it's benchmark performance be the only result that falls so far out of line with the simple test results?
More perplexingly -
Question 3: How can all the mutate_all/_at(replace()) work faster than the simple replace()?
Many folks report this: http://datascience.la/dplyr-and-a-very-basic-benchmark/ (and all the links in that article) but I still haven't found an explanation for why beyond that hashing and C++ are used.)
with special thanks already to Tyler Rinker: https://www.r-bloggers.com/microbenchmarking-with-r/
and akrun: https://stackoverflow.com/a/41530071/5088194
You can include a complex/multi statement in microbenchmark by wrapping it with {} which, basically, converts to a single expression:
microbenchmark(expr1 = { df1[is.na(df1)] = 0 },
exp2 = { tmp = 1:10; tmp[3] = 0L; tmp2 = tmp + 12L; tmp2 ^ 2 },
times = 10)
#Unit: microseconds
# expr min lq mean median uq max neval cld
# expr1 124953.716 137244.114 158576.030 142405.685 156744.076 284779.353 10 b
# exp2 2.784 3.132 17.748 23.142 24.012 38.976 10 a
Worth noting the side-effects of this:
tmp
#[1] 1 2 0 4 5 6 7 8 9 10
in contrast to, say, something like:
rm(tmp)
microbenchmark(expr1 = { df1[is.na(df1)] = 0 },
exp2 = local({ tmp = 1:10; tmp[3] = 0L; tmp2 = tmp + 12L; tmp2 ^ 2 }),
times = 10)
#Unit: microseconds
# expr min lq mean median uq max neval cld
# expr1 127250.18 132935.149 165296.3030 154509.553 169917.705 314820.306 10 b
# exp2 10.44 12.181 42.5956 54.636 57.072 97.789 10 a
tmp
#Error: object 'tmp' not found
Noticing the side effect a benchmark has, we see that the first operation that removes NA values leaves a fairly light job for the following alternatives:
# re-assign because we changed it before
set.seed(24)
df1 = as.data.frame(matrix(sample(c(NA, 1:5), 1e6 * 12, TRUE),
dimnames = list(NULL, paste0("var", 1:12)), ncol = 12))
unique(sapply(df1, typeof))
#[1] "integer"
any(sapply(df1, anyNA))
#[1] TRUE
system.time({ df1[is.na(df1)] <- 0 })
# user system elapsed
# 0.39 0.14 0.53
The previous benchmark leaves us with:
unique(sapply(df1, typeof))
#[1] "double"
any(sapply(df1, anyNA))
#[1] FALSE
And replacing NA, when there are none, is/should be taken account to do nothing on the input.
Aside of that, note that in all your alternatives you sub-assign a "double" (typeof(0)) to "integer" columns-vectors (sapply(df1, typeof)). While, I don't think there is any case (in the above alternatives) where df1 gets modified in place (since after the creation of a "data.frame", there is stored info to copy its vector-columns in case of modification), there -still- is a minor but avoidable overhead in coercing to "double" and storing as a "double". R before replacing elements in an "integer" vector will allocate and copy (in case of "integer" replacement) or allocate and coerce (in case of "double" replacement). Also, after the first coercion (from a side effect of a benchmark, as noted above), R will operate on "double"s and that contains slower manipulations than on "integer"s. I can't find a straightforward R way to investigate that difference, but in a nutshell (in danger of not being totally accurate) we can simulate these operations by:
# simulate R's copying of int to int
# allocate a new int and copy
int2int = inline::cfunction(sig = c(x = "integer"), body = '
SEXP ans = PROTECT(allocVector(INTSXP, LENGTH(x)));
memcpy(INTEGER(ans), INTEGER(x), LENGTH(x) * sizeof(int));
UNPROTECT(1);
return(ans);
')
# R's coercing of int to double
# 'coerceVector', internally, allocates a double and coerces to populate it
int2dbl = inline::cfunction(sig = c(x = "integer"), body = '
SEXP ans = PROTECT(coerceVector(x, REALSXP));
UNPROTECT(1);
return(ans);
')
# simulate R's copying form double to double
dbl2dbl = inline::cfunction(sig = c(x = "double"), body = '
SEXP ans = PROTECT(allocVector(REALSXP, LENGTH(x)));
memcpy(REAL(ans), REAL(x), LENGTH(x) * sizeof(double));
UNPROTECT(1);
return(ans);
')
And on a benchmark:
x.int = 1:1e7; x.dbl = as.numeric(x.int)
microbenchmark(int2int(x.int), int2dbl(x.int), dbl2dbl(x.dbl), times = 50)
#Unit: milliseconds
# expr min lq mean median uq max neval cld
# int2int(x.int) 16.42710 16.91048 21.93023 17.42709 19.38547 54.36562 50 a
# int2dbl(x.int) 35.94064 36.61367 47.15685 37.40329 63.61169 78.70038 50 b
# dbl2dbl(x.dbl) 33.51193 34.18427 45.30098 35.33685 63.45788 75.46987 50 b
To conclude(!) the whole previous note, replacing 0 with 0L shall save some time...
Finally to replicate the benchmark in a more fair way, we could use:
library(dplyr)
library(tidyr)
library(microbenchmark)
set.seed(24)
df1 = as.data.frame(matrix(sample(c(NA, 1:5), 1e6 * 12, TRUE),
dimnames = list(NULL, paste0("var", 1:12)), ncol = 12))
Wrap in functions:
stopifnot(ncol(df1) == 12) #some of the alternatives are hardcoded to 12 columns
mut_all_ifelse = function(x, val) x %>% mutate_all(funs(ifelse(is.na(.), val, .)))
mut_at_ifelse = function(x, val) x %>% mutate_at(funs(ifelse(is.na(.), val, .)), .cols = c(1:12))
baseAssign = function(x, val) { x[is.na(x)] <- val; x }
baseFor = function(x, val) { for(j in 1:ncol(x)) x[[j]][is.na(x[[j]])] = val; x }
base_replace = function(x, val) x %>% replace(., is.na(.), val)
mut_all_replace = function(x, val) x %>% mutate_all(funs(replace(., is.na(.), val)))
mut_at_replace = function(x, val) x %>% mutate_at(funs(replace(., is.na(.), val)), .cols = c(1:12))
myreplace_na = function(x, val) x %>% replace_na(list(var1 = val, var2 = val, var3 = val, var4 = val, var5 = val, var6 = val, var7 = val, var8 = val, var9 = val, var10 = val, var11 = val, var12 = val))
Test for equality of results before the benchmarks:
identical(mut_all_ifelse(df1, 0), mut_at_ifelse(df1, 0))
#[1] TRUE
identical(mut_at_ifelse(df1, 0), baseAssign(df1, 0))
#[1] TRUE
identical(baseAssign(df1, 0), baseFor(df1, 0))
#[1] TRUE
identical(baseFor(df1, 0), base_replace(df1, 0))
#[1] TRUE
identical(base_replace(df1, 0), mut_all_replace(df1, 0))
#[1] TRUE
identical(mut_all_replace(df1, 0), mut_at_replace(df1, 0))
#[1] TRUE
identical(mut_at_replace(df1, 0), myreplace_na(df1, 0))
#[1] TRUE
Test with coercing to "double":
benchnum = microbenchmark(mut_all_ifelse(df1, 0),
mut_at_ifelse(df1, 0),
baseAssign(df1, 0),
baseFor(df1, 0),
base_replace(df1, 0),
mut_all_replace(df1, 0),
mut_at_replace(df1, 0),
myreplace_na(df1, 0),
times = 10)
benchnum
#Unit: milliseconds
# expr min lq mean median uq max neval cld
# mut_all_ifelse(df1, 0) 1368.5091 1441.9939 1497.5236 1509.2233 1550.1416 1629.6959 10 c
# mut_at_ifelse(df1, 0) 1366.1674 1389.2256 1458.1723 1464.5962 1503.4337 1553.7110 10 c
# baseAssign(df1, 0) 532.4975 548.9444 586.8198 564.3940 655.8083 667.8634 10 b
# baseFor(df1, 0) 169.6048 175.9395 206.7038 189.5428 197.6472 308.6965 10 a
# base_replace(df1, 0) 518.7733 547.8381 597.8842 601.1544 643.4970 666.6872 10 b
# mut_all_replace(df1, 0) 169.1970 183.5514 227.1978 194.0903 291.6625 346.4649 10 a
# mut_at_replace(df1, 0) 176.7904 186.4471 227.3599 202.9000 303.4643 309.2279 10 a
# myreplace_na(df1, 0) 172.4926 177.8518 199.1469 186.3645 192.1728 297.0419 10 a
Test without coercing to "double":
benchint = microbenchmark(mut_all_ifelse(df1, 0L),
mut_at_ifelse(df1, 0L),
baseAssign(df1, 0L),
baseFor(df1, 0L),
base_replace(df1, 0L),
mut_all_replace(df1, 0L),
mut_at_replace(df1, 0L),
myreplace_na(df1, 0L),
times = 10)
benchint
#Unit: milliseconds
# expr min lq mean median uq max neval cld
# mut_all_ifelse(df1, 0L) 1291.17494 1313.1910 1377.9265 1353.2812 1417.4389 1554.6110 10 c
# mut_at_ifelse(df1, 0L) 1295.34053 1315.0308 1372.0728 1353.0445 1431.3687 1478.8613 10 c
# baseAssign(df1, 0L) 451.13038 461.9731 477.3161 471.0833 484.9318 528.4976 10 b
# baseFor(df1, 0L) 98.15092 102.4996 115.7392 107.9778 136.2227 139.7473 10 a
# base_replace(df1, 0L) 428.54747 451.3924 471.5011 470.0568 497.7088 516.1852 10 b
# mut_all_replace(df1, 0L) 101.66505 102.2316 137.8128 130.5731 161.2096 243.7495 10 a
# mut_at_replace(df1, 0L) 103.79796 107.2533 119.1180 112.1164 127.7959 166.9113 10 a
# myreplace_na(df1, 0L) 100.03431 101.6999 120.4402 121.5248 137.1710 141.3913 10 a
And a simple way to visualize:
boxplot(benchnum, ylim = range(min(summary(benchint)$min, summary(benchnum)$min),
max(summary(benchint)$max, summary(benchnum)$max)))
boxplot(benchint, add = TRUE, border = "red", axes = FALSE)
legend("topright", c("coerce", "not coerce"), fill = c("black", "red"))
Note that df1 is unchanged after all this (str(df1)).
Related
Here's a reprex.
library(data.table)
# create a dummy data.table
dt <- data.table(
ts = seq.POSIXt(Sys.time(),by = 60,length.out = 1000),
regn = rpois(1000,100),
VR = rnorm(1000,50,sd = 5),
rv = rnorm(1000,500,sd = 20),
topic = sample(c("A","B","C","D","E"),size = 1000,replace = T),
hrblock = xts::align.time(seq.POSIXt(Sys.time(),by = 60,length.out = 1000),n = 60)
)
# run some groupings
# The groupings are exactly what I need in my production code except
# that here they are workng on random data. But still the code
# demonstrates the same reduction in speed as I get in the actual
# code.
microbenchmark::microbenchmark(
dt[,.(.N,t1=first(ts),t2= last(ts),
r1 = fifelse(regn %in% c(100,101,102), first(VR),NA_real_),
r2 = fifelse(regn %in% c(100,101,102), last(VR),NA_real_),
watts = fifelse(regn==101,mean(VR),NA_real_),
l1 = first(rv),l2=last(rv)),
.(hrblock,topic,regn)]
)
#> Unit: milliseconds
#> expr
#> dt[, .(.N, t1 = first(ts), t2 = last(ts), r1 = fifelse(regn %in% c(100, 101, 102), first(VR), NA_real_), r2 = fifelse(regn %in% c(100, 101, 102), last(VR), NA_real_), watts = fifelse(regn == 101, mean(VR), NA_real_), l1 = first(rv), l2 = last(rv)), .(hrblock, topic, regn)]
#> min lq mean median uq max neval
#> 51.30181 54.83056 57.41794 56.55636 57.99337 90.92381 100
Created on 2022-12-19 by the reprex package (v2.0.1)
So a 1000-row data.table is taking close to 56 milliseconds, which looks quite slow. In real life, I run summaries on hundreds of thousands or a million rows, and the user interface becomes very sluggish.
Am I making any fundamental mistake in the grouping?
I tried to use setkey before execution, and it did not speed up the code.
I am expecting a 5- to 10-fold improvement in the response time. Any help will be highly appreciated.
In case you have a large share of groups that contain only 1 row, I strongly suggest splitting the task: grouping operation for those groups with N > 1 and a simple operation for those groups N == 1. By this, you can make use of vectorization and avoid the usage of unnecessary function calls
microbenchmark::microbenchmark(
at_once = dt[,.(.N,t1=first(ts),t2= last(ts),
r1 = fifelse(regn %in% c(100,101,102), first(VR),NA_real_),
r2 = fifelse(regn %in% c(100,101,102), last(VR),NA_real_),
watts = fifelse(regn==101,mean(VR),NA_real_),
l1 = first(rv),l2=last(rv)),
.(hrblock,topic,regn)],
splitted = {
dt[, N := .N, by = .(hrblock,topic,regn)]
dt1 <- dt[N > 1,
.(N,
t1=first(ts),t2= last(ts),
r1 = fifelse(regn %in% c(100,101,102), first(VR),NA_real_),
r2 = fifelse(regn %in% c(100,101,102), last(VR),NA_real_),
watts = fifelse(regn==101,mean(VR),NA_real_),
l1 = first(rv),l2=last(rv)),
.(hrblock,topic,regn)]
dt2 <- dt[N == 1,
.(hrblock,topic,regn,
N,
t1 = ts,
t2 = ts,
r1 = fifelse(regn %in% c(100,101,102), VR, NA_real_),
r2 = fifelse(regn %in% c(100,101,102), VR, NA_real_),
watts = fifelse(regn == 101, VR ,NA_real_),
l1 = rv,
l2 = rv)]
rbind(dt1, dt2)
}
)
#>
#> Unit: milliseconds
#> expr min lq mean median uq max neval
#> at_once 33.666042 34.058334 37.66860 34.898542 39.61844 136.997209 100
#> splitted 2.610042 2.667168 3.02075 2.972376 3.05921 8.958875 100
all.equal(splitted, at_once)
#> [1] TRUE
I have an algorithm that at each iteration calculates means for certain groups (the groups do not change only their values).
The table of the values -
d1 <- data.frame(x = sample(LETTERS, N, replace = TRUE),
y1=rnorm(N))
head(d1)
# x y1
# 1 H -0.7852538
# 2 G -0.6739159
# 3 V -1.7783771
# 4 L -0.2849846
# 5 I -0.1760284
# 6 V -0.2785826
I can calculate the means (in several ways: dplyr, data.table and tapply). I have another data.frame consisting of two columns with the group names.
d2 <- data.frame('group.high' = sample(LETTERS, N * 2, replace = TRUE),
'group.low' = sample(LETTERS, N * 2, replace = TRUE))
head(d2)
# group.high group.low
# 1 U L
# 2 K J
# 3 C Q
# 4 Q A
# 5 Q U
# 6 K W
I want to add to columns, mean.high and mean.better, of the mean values of each group based on d1.
So far I have tried two options from dplyr and data.table. I had to use left_join twice in either of them. They are both similar in speed.
microbenchmark(
dplyr = {
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
### Solution 1
dplyr.d2 <- left_join(d2,data.frame('group.high' = names(means),
'mean.high' = means, stringsAsFactors = FALSE) ) %>%
left_join(., data.frame('group.low' = names(means),
'mean.low' = means, stringsAsFactors = FALSE))},
data.table = {
### Solution 2
d1 <- as.data.table(d1)
d2 <- as.data.table(d2)
means <- d1[ ,.(means = mean(y1)), by = x]
new.d2 <- data.table::merge.data.table(x = d2, y = means, by.x = 'group.high', by.y = 'x')
data.table.d2 <- data.table::merge.data.table(x = new.d2, y = means, by.x = 'group.low', by.y = 'x')
}
)
Unit: milliseconds
expr min lq mean median uq max neval cld
dplyr 34.0837 36.88650 53.22239 42.9227 47.50660 231.5066 100 a
data.table 40.2071 47.70735 87.46804 51.2517 59.05385 258.4999 100 b
Is there a better way? How can I speed the calculation?
As mentioned in the comments, there is an iterative process of updating the values. Here is an example.
N <- 10000
iterFuncDplyr <- function(d1, d2) {
dplyr.d2 <- left_join(d2,data.frame('group.high' = names(means),
'mean.high' = means, stringsAsFactors = FALSE) ) %>%
left_join(., data.frame('group.low' = names(means),
'mean.low' = means, stringsAsFactors = FALSE))
return(var(d1$y1))
}
iterFuncData <- function(d1, d2) {
means <- d1[ ,.(means = mean(y1)), by = x]
new.d2 <- data.table:::merge.data.table(x = d2, y = means, by.x = 'group.high', by.y = 'x')
data.table.d2 <- data.table:::merge.data.table(x = new.d2, y = means, by.x = 'group.low', by.y = 'x')
return(var(d1$y1))
}
d1 <- data.frame(x = sample(LETTERS, N, replace = TRUE),
y1=rnorm(N))
d2 <- data.frame('group.high' = sample(LETTERS, N * 2, replace = TRUE),
'group.low' = sample(LETTERS, N * 2, replace = TRUE))
library(data.table)
library(dplyr)
microbenchmark::microbenchmark(dplyr = {
temp.val <- 0
for (i in 1:10) {
d1$y1 <- temp.val + rnorm(N)
temp.val <- iterFuncDplyr(d1, d2)
}},
data.table = {
d1 <- as.data.table(d1)
d2 <- as.data.table(d2)
temp.val <- 0
for (i in 1:10) {
d1$y1 <- temp.val + rnorm(N)
temp.val <- iterFuncData(d1, d2)
}
}
)
Unit: milliseconds
expr min lq mean median uq max neval
dplyr 46.22904 50.67959 52.78275 51.96358 53.34825 108.2874 100
data.table 63.81111 67.13257 70.85537 69.85712 72.72446 127.4228 100
You could subset the named vector means to create new columns and match your output:
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
d2$mean.high <- means[d2$group.high]
d2$mean.low <- means[d2$group.low]
identical(as.matrix(d2), as.matrix(d3)) #factor vs character, used d3 w/ benchmark
[1] TRUE
Unit: microseconds
expr min lq mean median uq max neval
dplyr 4868.2 5316.25 5787.123 5524.15 5892.70 12187.3 100
data.table 8254.4 9606.60 10438.424 10118.35 10771.75 20966.5 100
subset 481.2 529.40 651.194 550.35 582.55 7849.9 100
Benchmark code:
d3 <- d2
microbenchmark::microbenchmark( # N = 10000
dplyr = {
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
### Solution 1
dplyr.d2 <- left_join(d2,data.frame('group.high' = names(means),
'mean.high' = means, stringsAsFactors = FALSE) ) %>%
left_join(., data.frame('group.low' = names(means),
'mean.low' = means, stringsAsFactors = FALSE))},
data.table = {
### Solution 2
d1 <- as.data.table(d1)
d2 <- as.data.table(d2)
means <- d1[ ,.(means = mean(y1)), by = x]
new.d2 <- data.table::merge.data.table(x = d2, y = means, by.x = 'group.high', by.y = 'x')
data.table.d2 <- data.table::merge.data.table(x = new.d2, y = means, by.x = 'group.low', by.y = 'x')
},
subset = {
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
d3$mean.high <- means[d2$group.high]
d3$mean.low <- means[d2$group.low]
}
)
Here is an answer very similar to Andrew's but relying on data.table instead of tapply() (which seems faster for very big N).
library(data.table)
# Create a named vector "means"
means <- setDT(d1)[, mean(y1), by = x][, setNames(V1, x)]
setDT(d2)[, c("mean.high.means", "mean.low.means") :=
.(means[as.character(group.high)], means[as.character(group.low)])]
Output:
group.high group.low mean.high.means mean.low.means
1: Z W 0.017032792 0.0091625547
2: A A 0.013796137 0.0137961371
3: V S -0.011570159 0.0004560325
4: D X 0.005475629 0.0200984250
5: U H -0.008249901 0.0054537833
---
199996: H K 0.005453783 0.0079905631
199997: A T 0.013796137 -0.0068537963
199998: W U 0.009162555 -0.0082499015
199999: T V -0.006853796 -0.0115701585
200000: G J 0.014829259 0.0206598470
Reproducible data:
N = 1e5
set.seed(1)
d1 <- data.frame(
x = sample(LETTERS, N, replace = TRUE),
y1 = rnorm(N)
)
d2 <- data.frame(
group.high = sample(LETTERS, N * 2, replace = TRUE),
group.low = sample(LETTERS, N * 2, replace = TRUE)
)
I'm trying to produce several aggregate statistics, and some of them need to be produced on a subset of each group. The data.table is quite large, 10 million rows, but using by without column subsetting is blazing fast (less than a second). Adding just one additional column which needs to be calculated on a subset of each group increases the running time by factor of 12.
Is the a faster way to do this? Below is my full code.
library(data.table)
library(microbenchmark)
N = 10^7
DT = data.table(id1 = sample(1:400, size = N, replace = TRUE),
id2 = sample(1:100, size = N, replace = TRUE),
id3 = sample(1:50, size = N, replace = TRUE),
filter_var = sample(1:10, size = N, replace = TRUE),
x1 = sample(1:1000, size = N, replace = TRUE),
x2 = sample(1:1000, size = N, replace = TRUE),
x3 = sample(1:1000, size = N, replace = TRUE),
x4 = sample(1:1000, size = N, replace = TRUE),
x5 = sample(1:1000, size = N, replace = TRUE) )
setkey(DT, id1,id2,id3)
microbenchmark(
DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5)
) , by = c('id1','id2','id3')] , unit = 's', times = 10L)
min lq mean median uq max neval
0.942013 0.9566891 1.004134 0.9884895 1.031334 1.165144 10
microbenchmark( DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5),
sum_x1_F1 = sum(x1[filter_var < 5]) #this line slows everything down
) , by = c('id1','id2','id3')] , unit = 's', times = 10L)
min lq mean median uq max neval
12.24046 12.4123 12.83447 12.72026 13.49059 13.61248 10
GForce makes grouped operations run faster and will work on expressions like list(x = funx(X), y = funy(Y)), ...) where X and Y are column names and funx and funy belong to the set of optimized functions.
For a full description of what works, see ?GForce.
To test if an expression works, read the messages from DT[, expr, by=, verbose=TRUE].
In the OP's case, we have sum_x1_F1 = sum(x1[filter_var < 5]) which is not covered by GForce even though sum(v) is. In this special case, we can make a var v = x1*condition and sum that:
DT[, v := x1*(filter_var < 5)]
system.time( DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5),
sum_x1_F1 = sum(v)
) , by = c('id1','id2','id3')])
# user system elapsed
# 0.63 0.19 0.81
For comparison, timing the OP's code on my computer:
system.time( DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5),
sum_x1_F1 = sum(x1[filter_var < 5]) #this line slows everything down
) , by = c('id1','id2','id3')])
# user system elapsed
# 9.00 0.02 9.06
I have some code that generates a matrix of distances between all points in a dataframe that uses functions from tidyverse. However, it works prohibitively slow. Does anyone know of a method to do the same thing that is faster?
Example data and working code:
library(tidyverse)
locs <- data.frame(ID = 1:4000, x = runif (4000, 0, 1), y = runif (4000, 0, 1))
df1 <- locs %>%
mutate(k = 1)
df2 <- df1 %>%
full_join(df1, by = "k") %>%
mutate(length = sqrt((x.x - x.y)^2 + (y.x - y.y)^2)) %>%
select(ID.x, ID.y, length)
dists <- matrix(data = df2$length, nrow = nrow(df1), ncol = nrow(df1))
You could use the base R function dist:
locs <- data.frame(ID = 1:10, x = runif (10, 0, 1), y = runif (10, 0, 1))
dist(locs[,2:3], upper = T, diag = T)
Output:
1 2 3 4 5 6 7 8 9 10
1 0.00000000 1.10309601 0.98790825 0.54490600 0.42478532 1.06323764 0.31094245 0.52593635 0.44695830 0.85010761
2 1.10309601 0.00000000 0.29292865 0.93412638 0.74551902 0.17160290 0.83557056 0.62393711 0.74218236 0.57669081
3 0.98790825 0.29292865 0.00000000 0.69626767 0.72278486 0.13085561 0.78064096 0.46359296 0.73098652 0.72732431
4 0.54490600 0.93412638 0.69626767 0.00000000 0.65426980 0.81617143 0.59851262 0.36551106 0.68253093 1.00018238
5 0.42478532 0.74551902 0.72278486 0.65426980 0.00000000 0.75537605 0.11384534 0.36844164 0.02911855 0.42844270
6 1.06323764 0.17160290 0.13085561 0.81617143 0.75537605 0.00000000 0.82826619 0.55014297 0.75867851 0.68258388
7 0.31094245 0.83557056 0.78064096 0.59851262 0.11384534 0.82826619 0.00000000 0.37224997 0.13688270 0.54088523
8 0.52593635 0.62393711 0.46359296 0.36551106 0.36844164 0.55014297 0.37224997 0.00000000 0.39086196 0.64185453
9 0.44695830 0.74218236 0.73098652 0.68253093 0.02911855 0.75867851 0.13688270 0.39086196 0.00000000 0.40400339
10 0.85010761 0.57669081 0.72732431 1.00018238 0.42844270 0.68258388 0.54088523 0.64185453 0.40400339 0.00000000
Benchmark with 1000 records:
library(dplyr)
library(microbenchmark)
locs <- data.frame(ID = 1:1000, x = runif (1000, 0, 1), y = runif (1000, 0, 1))
f1 <- function()
{
df1 <- locs %>%
mutate(k = 1)
df2 <- df1 %>%
full_join(df1, by = "k") %>%
mutate(length = sqrt((x.x - x.y)^2 + (y.x - y.y)^2)) %>%
select(ID.x, ID.y, length)
dists <- matrix(data = df2$length, nrow = nrow(df1), ncol = nrow(df1))
}
f2 <- function(){dist(locs[,2:3],upper = T,diag=T)}
microbenchmark(f1())
microbenchmark(f2())
Results:
Unit: milliseconds
expr min lq mean median uq max neval
f1() 81.74188 245.8014 276.4318 259.7682 294.01 567.9409 100
and
Unit: milliseconds
expr min lq mean median uq max neval
f2() 6.956302 7.330661 8.675304 8.11507 8.981121 18.77783 100
Hope this helps!
I have two vectors r and s. I want to find the outer difference of these two arrays and not be negative like the following
r = rnorm(100000)
s = c(0.02, 0.04, 0.3, 0.43, 0.5, 0.7, 0.8, 0.9)
res = t(pmax(outer(r, s, "-"), 0))
system.time({
res = t(pmax(outer(r, s, "-"), 0))
})
## system elapsed
## 0.05 0.00 0.05
or
system.time({
x = pmax(r - rep(s, each = length(r)), 0)
res = matrix(x, nrow = length(s), byrow = TRUE)
})
## system elapsed
## 0.05 0.00 0.05
How can I get faster the result x in R?
I get slightly faster performance by running the outer function seperately and the subsetting zero < 0 entries like this...
res1 <- t( outer( r , s , "-" ) )
res1[ res1 < 0 ] <- 0
But if you want even more speed then try using Rcpp. It's easy enough, just run the following code snippet....
if( ! require( Rcpp ) ) install.packages( "Rcpp" )
Rcpp::cppFunction( '
NumericMatrix gtzero(NumericVector r , NumericVector s){
int cols = r.size();
int rows = s.size();
NumericMatrix out(rows, cols);
for( int i = 0; i < cols; i++){
NumericMatrix::Column ncol = out( _, i );
ncol = ifelse( r[i] - s > 0 , r[i] - s , 0 );
}
return out;
}
')
Then use the function like this:
gtzero( r , s )
This turns out to be about 6 times faster than using outer and pmax and 3 times faster than outer then [ subsetting:
require( microbenchmark )
bm <- microbenchmark( eval( rose.baseR ) , eval( simon.baseR ) , eval( simon.Rcpp ) )
print( bm , "relative" , order = "median" , digits = 2 )
#Unit: relative
# expr min lq median uq max neval
# eval(simon.Rcpp) 1 1.0 1.0 1.0 1.0 100
# eval(simon.baseR) 3 3.1 3.2 3.2 1.5 100
# eval(rose.baseR) 3 3.4 6.0 5.9 1.8 100
And gives the exact same result:
identical( res0 , res2 )
#[1] TRUE
The following expressions were evaluated:
set.seed(123)
r = rnorm(100000)
s = c(0.02, 0.04, 0.3, 0.43, 0.5, 0.7, 0.8, 0.9)
rose.baseR <- quote({
res0 <- t(pmax(outer(r, s, "-"), 0))
})
simon.baseR <- quote({
res1 <- outer( r , s , "-" )
res1[ res1 < 0 ] <- 0
})
simon.Rcpp <- quote({
res2 <- gtzero(r,s)
})
Following #thelatemail's comment:
fun1 <- function(r,s) t(pmax(outer(r, s, "-"), 0))
fun2 <- function(r,s) {
x = pmax(r - rep(s, each = length(r)), 0)
matrix(x, nrow = length(s), byrow = TRUE)
}
fun3 <- function(r,s) {
dr <- length(r)
ds <- length(s)
R <- rep(s, rep.int(length(r), length(s)))
S <- rep(r, times = ceiling(length(s)/length(r)))
res <- pmax(S - R, 0)
dim(res) <- c(dr, ds)
t(res)
}
library(microbenchmark)
microbenchmark(res1 <- fun1(r,s),
res2 <- fun2(r,s),
res3 <- fun3(r,s),
times=20)
# Unit: milliseconds
# expr min lq median uq max neval
# res1 <- fun1(r, s) 43.28387 46.68182 66.03417 78.78109 83.75569 20
# res2 <- fun2(r, s) 50.52941 54.36576 56.77067 60.87218 91.14043 20
# res3 <- fun3(r, s) 34.18374 35.37835 37.97405 40.10642 70.78626 20
identical(res1, res3)
#[1] TRUE