Adaptive moving average - top performance in R - r

I am looking for some performance gains in terms of rolling/sliding window functions in R. It is quite common task which can be used in any ordered observations data set. I would like to share some of my findings, maybe somebody would be able to provide feedback to make it even faster.
Important note is that I focus on the case align="right" and adaptive rolling window, so width is a vector (same length as our observation vector). In case if we have width as scalar there are already very well developed functions in zoo and TTR packages which would be very hard to beat (4 years later: it was easier than I expected) as some of them are even using Fortran (but still user-defined FUNs can be faster using mentioned below wapply).
RcppRoll package is worth to mention due to its great performance, but so far there is no function which answers to that question. Would be great if someone could extend it to answer the question.
Consider we have a following data:
x = c(120,105,118,140,142,141,135,152,154,138,125,132,131,120)
plot(x, type="l")
And we want to apply rolling function over x vector with variable rolling window width.
set.seed(1)
width = sample(2:4,length(x),TRUE)
In this particular case we would have rolling function adaptive to sample of c(2,3,4).
We will apply mean function, expected results:
r = f(x, width, FUN = mean)
print(r)
## [1] NA NA 114.3333 120.7500 141.0000 135.2500 139.5000
## [8] 142.6667 147.0000 146.0000 131.5000 128.5000 131.5000 127.6667
plot(x, type="l")
lines(r, col="red")
Any indicator can be employed to produce width argument as different variants of adaptive moving averages, or any other function.
Looking for a top performance.

December 2018 update
Efficient implementation of adaptive rolling functions has been made in
data.table recently - more info in ?froll manual. Additionally an efficient alternative solution using base R has been identified (fastama below). Unfortunately Kevin Ushey's answer does not address the question thus it is not included in benchmark.
Scale of benchmark has been increased as it pointless to compare microseconds.
set.seed(108)
x = rnorm(1e6)
width = rep(seq(from = 100, to = 500, by = 5), length.out=length(x))
microbenchmark(
zoo=rollapplyr(x, width = width, FUN=mean, fill=NA),
mapply=base_mapply(x, width=width, FUN=mean, na.rm=T),
wmapply=wmapply(x, width=width, FUN=mean, na.rm=T),
ama=ama(x, width, na.rm=T),
fastama=fastama(x, width),
frollmean=frollmean(x, width, na.rm=T, adaptive=TRUE),
frollmean_exact=frollmean(x, width, na.rm=T, adaptive=TRUE, algo="exact"),
times=1L
)
#Unit: milliseconds
# expr min lq mean median uq max neval
# zoo 32371.938248 32371.938248 32371.938248 32371.938248 32371.938248 32371.938248 1
# mapply 13351.726032 13351.726032 13351.726032 13351.726032 13351.726032 13351.726032 1
# wmapply 15114.774972 15114.774972 15114.774972 15114.774972 15114.774972 15114.774972 1
# ama 9780.239091 9780.239091 9780.239091 9780.239091 9780.239091 9780.239091 1
# fastama 351.618042 351.618042 351.618042 351.618042 351.618042 351.618042 1
# frollmean 7.708054 7.708054 7.708054 7.708054 7.708054 7.708054 1
# frollmean_exact 194.115012 194.115012 194.115012 194.115012 194.115012 194.115012 1
ama = function(x, n, na.rm=FALSE, fill=NA, nf.rm=FALSE) {
# more or less the same as previous forloopply
stopifnot((nx<-length(x))==length(n))
if (nf.rm) x[!is.finite(x)] = NA_real_
ans = rep(NA_real_, nx)
for (i in seq_along(x)) {
ans[i] = if (i >= n[i])
mean(x[(i-n[i]+1):i], na.rm=na.rm)
else as.double(fill)
}
ans
}
fastama = function(x, n, na.rm, fill=NA) {
if (!missing(na.rm)) stop("fast adaptive moving average implemented in R does not handle NAs, input having NAs will result in incorrect answer so not even try to compare to it")
# fast implementation of adaptive moving average in R, in case of NAs incorrect answer
stopifnot((nx<-length(x))==length(n))
cs = cumsum(x)
ans = rep(NA_real_, nx)
for (i in seq_along(cs)) {
ans[i] = if (i == n[i])
cs[i]/n[i]
else if (i > n[i])
(cs[i]-cs[i-n[i]])/n[i]
else as.double(fill)
}
ans
}
Old answer:
I chose 4 available solutions which doesn't need to do to C++, quite easy to find or google.
# 1. rollapply
library(zoo)
?rollapplyr
# 2. mapply
base_mapply <- function(x, width, FUN, ...){
FUN <- match.fun(FUN)
f <- function(i, width, data){
if(i < width) return(NA_real_)
return(FUN(data[(i-(width-1)):i], ...))
}
mapply(FUN = f,
seq_along(x), width,
MoreArgs = list(data = x))
}
# 3. wmapply - modified version of wapply found: https://rmazing.wordpress.com/2013/04/23/wapply-a-faster-but-less-functional-rollapply-for-vector-setups/
wmapply <- function(x, width, FUN = NULL, ...){
FUN <- match.fun(FUN)
SEQ1 <- 1:length(x)
SEQ1[SEQ1 < width] <- NA_integer_
SEQ2 <- lapply(SEQ1, function(i) if(!is.na(i)) (i - (width[i]-1)):i)
OUT <- lapply(SEQ2, function(i) if(!is.null(i)) FUN(x[i], ...) else NA_real_)
return(base:::simplify2array(OUT, higher = TRUE))
}
# 4. forloopply - simple loop solution
forloopply <- function(x, width, FUN = NULL, ...){
FUN <- match.fun(FUN)
OUT <- numeric()
for(i in 1:length(x)) {
if(i < width[i]) next
OUT[i] <- FUN(x[(i-(width[i]-1)):i], ...)
}
return(OUT)
}
Below are the timings for prod function. mean function might be already optimized inside rollapplyr. All results equal.
library(microbenchmark)
# 1a. length(x) = 1000, window = 5-20
x <- runif(1000,0.5,1.5)
width <- rep(seq(from = 5, to = 20, by = 5), length(x)/4)
microbenchmark(
rollapplyr(data = x, width = width, FUN = prod, fill = NA),
base_mapply(x = x, width = width, FUN = prod, na.rm=T),
wmapply(x = x, width = width, FUN = prod, na.rm=T),
forloopply(x = x, width = width, FUN = prod, na.rm=T),
times=100L
)
Unit: milliseconds
expr min lq median uq max neval
rollapplyr(data = x, width = width, FUN = prod, fill = NA) 59.690217 60.694364 61.979876 68.55698 153.60445 100
base_mapply(x = x, width = width, FUN = prod, na.rm = T) 14.372537 14.694266 14.953234 16.00777 99.82199 100
wmapply(x = x, width = width, FUN = prod, na.rm = T) 9.384938 9.755893 9.872079 10.09932 84.82886 100
forloopply(x = x, width = width, FUN = prod, na.rm = T) 14.730428 15.062188 15.305059 15.76560 342.44173 100
# 1b. length(x) = 1000, window = 50-200
x <- runif(1000,0.5,1.5)
width <- rep(seq(from = 50, to = 200, by = 50), length(x)/4)
microbenchmark(
rollapplyr(data = x, width = width, FUN = prod, fill = NA),
base_mapply(x = x, width = width, FUN = prod, na.rm=T),
wmapply(x = x, width = width, FUN = prod, na.rm=T),
forloopply(x = x, width = width, FUN = prod, na.rm=T),
times=100L
)
Unit: milliseconds
expr min lq median uq max neval
rollapplyr(data = x, width = width, FUN = prod, fill = NA) 71.99894 74.19434 75.44112 86.44893 281.6237 100
base_mapply(x = x, width = width, FUN = prod, na.rm = T) 15.67158 16.10320 16.39249 17.20346 103.6211 100
wmapply(x = x, width = width, FUN = prod, na.rm = T) 10.88882 11.54721 11.75229 12.19790 106.1170 100
forloopply(x = x, width = width, FUN = prod, na.rm = T) 15.70704 16.06983 16.40393 17.14210 108.5005 100
# 2a. length(x) = 10000, window = 5-20
x <- runif(10000,0.5,1.5)
width <- rep(seq(from = 5, to = 20, by = 5), length(x)/4)
microbenchmark(
rollapplyr(data = x, width = width, FUN = prod, fill = NA),
base_mapply(x = x, width = width, FUN = prod, na.rm=T),
wmapply(x = x, width = width, FUN = prod, na.rm=T),
forloopply(x = x, width = width, FUN = prod, na.rm=T),
times=100L
)
Unit: milliseconds
expr min lq median uq max neval
rollapplyr(data = x, width = width, FUN = prod, fill = NA) 753.87882 781.8789 809.7680 872.8405 1116.7021 100
base_mapply(x = x, width = width, FUN = prod, na.rm = T) 148.54919 159.9986 231.5387 239.9183 339.7270 100
wmapply(x = x, width = width, FUN = prod, na.rm = T) 98.42682 105.2641 117.4923 183.4472 245.4577 100
forloopply(x = x, width = width, FUN = prod, na.rm = T) 533.95641 602.0652 646.7420 672.7483 922.3317 100
# 2b. length(x) = 10000, window = 50-200
x <- runif(10000,0.5,1.5)
width <- rep(seq(from = 50, to = 200, by = 50), length(x)/4)
microbenchmark(
rollapplyr(data = x, width = width, FUN = prod, fill = NA),
base_mapply(x = x, width = width, FUN = prod, na.rm=T),
wmapply(x = x, width = width, FUN = prod, na.rm=T),
forloopply(x = x, width = width, FUN = prod, na.rm=T),
times=100L
)
Unit: milliseconds
expr min lq median uq max neval
rollapplyr(data = x, width = width, FUN = prod, fill = NA) 912.5829 946.2971 1024.7245 1071.5599 1431.5289 100
base_mapply(x = x, width = width, FUN = prod, na.rm = T) 171.3189 180.6014 260.8817 269.5672 344.4500 100
wmapply(x = x, width = width, FUN = prod, na.rm = T) 123.1964 131.1663 204.6064 221.1004 484.3636 100
forloopply(x = x, width = width, FUN = prod, na.rm = T) 561.2993 696.5583 800.9197 959.6298 1273.5350 100

For reference, you should definitely check out RcppRoll if you have only a single window length to 'roll' over:
library(RcppRoll) ## install.packages("RcppRoll")
library(microbenchmark)
x <- runif(1E5)
all.equal( rollapplyr(x, 10, FUN=prod), roll_prod(x, 10) )
microbenchmark( times=5,
rollapplyr(x, 10, FUN=prod),
roll_prod(x, 10)
)
gives me
> library(RcppRoll)
> library(microbenchmark)
> x <- runif(1E5)
> all.equal( rollapplyr(x, 10, FUN=prod), roll_prod(x, 10) )
[1] TRUE
> microbenchmark( times=5,
+ zoo=rollapplyr(x, 10, FUN=prod),
+ RcppRoll=roll_prod(x, 10)
+ )
Unit: milliseconds
expr min lq median uq max neval
zoo 924.894069 968.467299 997.134932 1029.10883 1079.613569 5
RcppRoll 1.509155 1.553062 1.760739 1.90061 1.944999 5
It's a bit faster ;) and the package is flexible enough for users to define and use their own rolling functions (with C++). I may extend the package in the future to allow multiple window widths, but I am sure it will be tricky to get right.
If you want to define the prod yourself, you can do so -- RcppRoll allows you to define your own C++ functions to pass through and generate a 'rolling' function if you'd like. rollit gives a somewhat nicer interface, while rollit_raw just lets you write a C++ function yourself, somewhat like you might do with Rcpp::cppFunction. The philosophy being, you should only have to express the computation you wish to perform on a particular window, and RcppRoll can take care of iterating over windows of some size.
library(RcppRoll)
library(microbenchmark)
x <- runif(1E5)
my_rolling_prod <- rollit(combine="*")
my_rolling_prod2 <- rollit_raw("
double output = 1;
for (int i=0; i < n; ++i) {
output *= X(i);
}
return output;
")
all.equal( roll_prod(x, 10), my_rolling_prod(x, 10) )
all.equal( roll_prod(x, 10), my_rolling_prod2(x, 10) )
microbenchmark( times=5,
rollapplyr(x, 10, FUN=prod),
roll_prod(x, 10),
my_rolling_prod(x, 10),
my_rolling_prod2(x, 10)
)
gives me
> library(RcppRoll)
> library(microbenchmark)
> # 1a. length(x) = 1000, window = 5-20
> x <- runif(1E5)
> my_rolling_prod <- rollit(combine="*")
C++ source file written to /var/folders/m7/_xnnz_b53kjgggkb1drc1f8c0000gn/T//RtmpcFMJEV/file80263aa7cca2.cpp .
Compiling...
Done!
> my_rolling_prod2 <- rollit_raw("
+ double output = 1;
+ for (int i=0; i < n; ++i) {
+ output *= X(i);
+ }
+ return output;
+ ")
C++ source file written to /var/folders/m7/_xnnz_b53kjgggkb1drc1f8c0000gn/T//RtmpcFMJEV/file802673777da2.cpp .
Compiling...
Done!
> all.equal( roll_prod(x, 10), my_rolling_prod(x, 10) )
[1] TRUE
> all.equal( roll_prod(x, 10), my_rolling_prod2(x, 10) )
[1] TRUE
> microbenchmark(
+ rollapplyr(x, 10, FUN=prod),
+ roll_prod(x, 10),
+ my_rolling_prod(x, 10),
+ my_rolling_prod2(x, 10)
+ )
> microbenchmark( times=5,
+ rollapplyr(x, 10, FUN=prod),
+ roll_prod(x, 10),
+ my_rolling_prod(x, 10),
+ my_rolling_prod2(x, 10)
+ )
Unit: microseconds
expr min lq median uq max neval
rollapplyr(x, 10, FUN = prod) 979710.368 1115931.323 1117375.922 1120085.250 1149117.854 5
roll_prod(x, 10) 1504.377 1635.749 1638.943 1815.344 2053.997 5
my_rolling_prod(x, 10) 1507.687 1572.046 1648.031 2103.355 7192.493 5
my_rolling_prod2(x, 10) 774.381 786.750 884.951 1052.508 1434.660 5
So really, as long as you are capable of expressing the computation you wish to perform in a particular window through either the rollit interface or with a C++ function passed through rollit_raw (whose interface is a bit rigid, but still functional), you are in good shape.

Somehow people have missed the ultra fast runmed() in base R (stats package). It's not adaptive, as far as I understand the original question, but for a rolling median, it's FAST! Comparing here to roll_median() from RcppRoll.
> microbenchmark(
+ runmed(x = x, k = 3),
+ roll_median(x, 3),
+ times=1000L
+ )
Unit: microseconds
expr min lq mean median uq max neval
runmed(x = x, k = 3) 41.053 44.854 47.60973 46.755 49.795 117.838 1000
roll_median(x, 3) 101.872 105.293 108.72840 107.574 111.375 178.657 1000

Related

Sample from a 0:Vector[i]

In R:
I have a vector **
y = sample(0:200, 1e4, replace = TRUE)
I want to create a variable ‘x’ such that:
x = sample(0:y[i], 1e4, replace = TRUE)
Where y[i] are the values of y1, y2, …, y1e4 created from the sample function before. For example if y1 = 50 then I would like the first entry of x = sample(0:50) etc. However I am not sure how to do this. I have tried for loops but have gotten no where.
Any help is much appreciated!
How about
x <- as.integer(runif(1e4)*sample(201, 1e4, TRUE))
Benchmarking:
f1 <- function() sapply(sample(0:200, 1e4, replace = TRUE), function(i) sample(0:i, size = 1))
f2 <- function() as.integer(runif(1e4)*sample(201, 1e4, TRUE))
microbenchmark::microbenchmark(f1 = f1(),
f2 = f2())
#> Unit: microseconds
#> expr min lq mean median uq max neval
#> f1 38877.3 47070.50 49294.770 48625.00 50175.35 97045.0 100
#> f2 508.2 522.05 555.602 531.45 549.45 2080.8 100
This should work:
y = sample(0:200, 1e4, replace = TRUE)
x = sapply(y, \(i) sample(0:i, size = 1))
Or the equivalent using a for loop:
x = numeric(length(y))
for(i in seq_along(y)) {
x[i] = sample(0:y[i], size = 1)
}
If efficiency matters, this might be a bit faster on very long input:
x = floor(runif(length(y), min = 0, max = y + 1))

Fastest way to map multiple character columns to numerical values

I have an algorithm that at each iteration calculates means for certain groups (the groups do not change only their values).
The table of the values -
d1 <- data.frame(x = sample(LETTERS, N, replace = TRUE),
y1=rnorm(N))
head(d1)
# x y1
# 1 H -0.7852538
# 2 G -0.6739159
# 3 V -1.7783771
# 4 L -0.2849846
# 5 I -0.1760284
# 6 V -0.2785826
I can calculate the means (in several ways: dplyr, data.table and tapply). I have another data.frame consisting of two columns with the group names.
d2 <- data.frame('group.high' = sample(LETTERS, N * 2, replace = TRUE),
'group.low' = sample(LETTERS, N * 2, replace = TRUE))
head(d2)
# group.high group.low
# 1 U L
# 2 K J
# 3 C Q
# 4 Q A
# 5 Q U
# 6 K W
I want to add to columns, mean.high and mean.better, of the mean values of each group based on d1.
So far I have tried two options from dplyr and data.table. I had to use left_join twice in either of them. They are both similar in speed.
microbenchmark(
dplyr = {
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
### Solution 1
dplyr.d2 <- left_join(d2,data.frame('group.high' = names(means),
'mean.high' = means, stringsAsFactors = FALSE) ) %>%
left_join(., data.frame('group.low' = names(means),
'mean.low' = means, stringsAsFactors = FALSE))},
data.table = {
### Solution 2
d1 <- as.data.table(d1)
d2 <- as.data.table(d2)
means <- d1[ ,.(means = mean(y1)), by = x]
new.d2 <- data.table::merge.data.table(x = d2, y = means, by.x = 'group.high', by.y = 'x')
data.table.d2 <- data.table::merge.data.table(x = new.d2, y = means, by.x = 'group.low', by.y = 'x')
}
)
Unit: milliseconds
expr min lq mean median uq max neval cld
dplyr 34.0837 36.88650 53.22239 42.9227 47.50660 231.5066 100 a
data.table 40.2071 47.70735 87.46804 51.2517 59.05385 258.4999 100 b
Is there a better way? How can I speed the calculation?
As mentioned in the comments, there is an iterative process of updating the values. Here is an example.
N <- 10000
iterFuncDplyr <- function(d1, d2) {
dplyr.d2 <- left_join(d2,data.frame('group.high' = names(means),
'mean.high' = means, stringsAsFactors = FALSE) ) %>%
left_join(., data.frame('group.low' = names(means),
'mean.low' = means, stringsAsFactors = FALSE))
return(var(d1$y1))
}
iterFuncData <- function(d1, d2) {
means <- d1[ ,.(means = mean(y1)), by = x]
new.d2 <- data.table:::merge.data.table(x = d2, y = means, by.x = 'group.high', by.y = 'x')
data.table.d2 <- data.table:::merge.data.table(x = new.d2, y = means, by.x = 'group.low', by.y = 'x')
return(var(d1$y1))
}
d1 <- data.frame(x = sample(LETTERS, N, replace = TRUE),
y1=rnorm(N))
d2 <- data.frame('group.high' = sample(LETTERS, N * 2, replace = TRUE),
'group.low' = sample(LETTERS, N * 2, replace = TRUE))
library(data.table)
library(dplyr)
microbenchmark::microbenchmark(dplyr = {
temp.val <- 0
for (i in 1:10) {
d1$y1 <- temp.val + rnorm(N)
temp.val <- iterFuncDplyr(d1, d2)
}},
data.table = {
d1 <- as.data.table(d1)
d2 <- as.data.table(d2)
temp.val <- 0
for (i in 1:10) {
d1$y1 <- temp.val + rnorm(N)
temp.val <- iterFuncData(d1, d2)
}
}
)
Unit: milliseconds
expr min lq mean median uq max neval
dplyr 46.22904 50.67959 52.78275 51.96358 53.34825 108.2874 100
data.table 63.81111 67.13257 70.85537 69.85712 72.72446 127.4228 100
You could subset the named vector means to create new columns and match your output:
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
d2$mean.high <- means[d2$group.high]
d2$mean.low <- means[d2$group.low]
identical(as.matrix(d2), as.matrix(d3)) #factor vs character, used d3 w/ benchmark
[1] TRUE
Unit: microseconds
expr min lq mean median uq max neval
dplyr 4868.2 5316.25 5787.123 5524.15 5892.70 12187.3 100
data.table 8254.4 9606.60 10438.424 10118.35 10771.75 20966.5 100
subset 481.2 529.40 651.194 550.35 582.55 7849.9 100
Benchmark code:
d3 <- d2
microbenchmark::microbenchmark( # N = 10000
dplyr = {
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
### Solution 1
dplyr.d2 <- left_join(d2,data.frame('group.high' = names(means),
'mean.high' = means, stringsAsFactors = FALSE) ) %>%
left_join(., data.frame('group.low' = names(means),
'mean.low' = means, stringsAsFactors = FALSE))},
data.table = {
### Solution 2
d1 <- as.data.table(d1)
d2 <- as.data.table(d2)
means <- d1[ ,.(means = mean(y1)), by = x]
new.d2 <- data.table::merge.data.table(x = d2, y = means, by.x = 'group.high', by.y = 'x')
data.table.d2 <- data.table::merge.data.table(x = new.d2, y = means, by.x = 'group.low', by.y = 'x')
},
subset = {
means <- tapply(d1$y1, INDEX = d1$x, FUN = mean)
d3$mean.high <- means[d2$group.high]
d3$mean.low <- means[d2$group.low]
}
)
Here is an answer very similar to Andrew's but relying on data.table instead of tapply() (which seems faster for very big N).
library(data.table)
# Create a named vector "means"
means <- setDT(d1)[, mean(y1), by = x][, setNames(V1, x)]
setDT(d2)[, c("mean.high.means", "mean.low.means") :=
.(means[as.character(group.high)], means[as.character(group.low)])]
Output:
group.high group.low mean.high.means mean.low.means
1: Z W 0.017032792 0.0091625547
2: A A 0.013796137 0.0137961371
3: V S -0.011570159 0.0004560325
4: D X 0.005475629 0.0200984250
5: U H -0.008249901 0.0054537833
---
199996: H K 0.005453783 0.0079905631
199997: A T 0.013796137 -0.0068537963
199998: W U 0.009162555 -0.0082499015
199999: T V -0.006853796 -0.0115701585
200000: G J 0.014829259 0.0206598470
Reproducible data:
N = 1e5
set.seed(1)
d1 <- data.frame(
x = sample(LETTERS, N, replace = TRUE),
y1 = rnorm(N)
)
d2 <- data.frame(
group.high = sample(LETTERS, N * 2, replace = TRUE),
group.low = sample(LETTERS, N * 2, replace = TRUE)
)

How can I subset rows or columns of a bigstatsr::FBM in Rcpp and store them in a vector?

I have a function that computes basic summary statistics from the rows (or columns) of a given Matrix and I am now trying to also use this function with a bigstatsr::FBM (I am aware that using columns should be more efficient).
The reason I want to store the rows / columns in a vector is that I would like to compute quantiles with std::nth_element. If there is a different way to do that with out the vector I would be equally happy.
This is the code I use for a regular matrix.
// [[Rcpp::plugins(cpp11)]]
// [[Rcpp::depends(RcppEigen)]]
#include <RcppEigen.h>
using namespace Rcpp;
// [[Rcpp::export]]
Eigen::MatrixXd summaryC(Eigen::MatrixXd x,int nrow) {
Eigen::MatrixXd result(nrow, 5);
int indices[6] = {-1, 0, 249, 500, 750, 999};
for (int i = 0; i < nrow; i++) {
Eigen::VectorXd v = x.row(i);
for (int q = 0; q < 5; ++q) {
std::nth_element(v.data() + indices[q] + 1,
v.data() + indices[q+1],
v.data() + v.size());
result(i,q) = v[indices[q+1]];
}
}
return result;
}
/*** R
x <- matrix(as.numeric(1:1000000), ncol = 1000)
summaryC(x = x, nrow = 1000)
***/
However I struggle to do this with an FBM as I am not fully grasping the intricacies of how the FBM - Pointer works.
I tried the following without success:
// [[Rcpp::depends(BH, bigstatsr, RcppEigen)]]
// [[Rcpp::plugins(cpp11)]]
#include <bigstatsr/BMAcc.h>
#include <RcppEigen.h>
// [[Rcpp::export]]
Eigen::MatrixXd summaryCbig(Environment fbm,int nrow, Eigen::VecttorXi ind_col) {
Eigen::MatrixXd result(nrow, 5);
XPtr<FBM> xpMat = fbm["address"];
BMAcc<double> macc(xpMat);
int indices[6] = {-1, 0, 249, 500, 750, 999};
for (int i = 0; i < nrow; i++) {
Eigen::VectorXd v = macc.row(i); // this does not work
Eigen::VectorXd v = macc(i,_); // this does not work
SubBMAcc<double> maccr(XPtr, i, ind_col -1); // This did not work with Eigen::VectorXi, but works with const NumericVector&
Eigen::VectorXd v = maccr // this does not work even for appropriate ind_col
for (int q = 0; q < 5; ++q) {
std::nth_element(v.data() + indices[q] + 1,
v.data() + indices[q+1],
v.data() + v.size());
macc(i,q) = v[indices[q+1]];
}
}
}
/*** R
x <- matrix(as.numeric(1:1000000), ncol = 1000)
summaryCbig(x = x, nrow = 1000, ind_col = 1:1000)
***/
Any help would be greatly appreciated, thank you!
Update - the big_apply - approach
I implemented the approach twice with two differently sized matrices X1 and X2. Code for X1:
X1 <- FBM(1000, 1000, init 1e6)
X2 <- FBM(10000, 10000, init = 9999)
library(bigstatsr)
microbenchmark::microbenchmark(
big_apply(X, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X1[ind, ])
}, a.combine = "rbind", ind = rows_along(X), ncores = nb_cores(), block.size = 500),
big_apply(X, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X1[ind, ])
}, a.combine = "rbind", ind = rows_along(X), ncores = 1, block.size = 500),
times = 5
)
When using X1 and block.size = 500, having 4 cores instead of 1 makes the task 5-10 times slower on my PC (4 CPU and using windows, unfortunately).
using the bigger matrix X2 and leaving block.size with the default takes 10 times longer with 4 cores instead of the non-parallelized version.
Result for X2:
min lq mean median uq max neval
16.149055 19.13568 19.369975 20.139363 20.474103 20.951676 5
1.297259 2.67385 2.584647 2.858035 2.867537 3.226552 5
Assuming you have
library(bigstatsr)
X <- FBM(1000, 1000, init = 1:1e6)
I would not reinvent the wheel and use:
big_apply(X, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X[ind, ])
}, a.combine = "rbind", ind = rows_along(X), ncores = nb_cores(), block.size = 500)
Choose the block.size (number of rows) wisely.
Function big_apply() is very useful if you want to apply an R(cpp) function to blocks of the FBM.
Edit: Of course, parallelism will me slower for small matrices, because of OVERHEAD of parallelism (usually, 1-3 seconds). See the results for X1 and X2:
library(bigstatsr)
X1 <- FBM(1000, 1000, init = 1e6)
microbenchmark::microbenchmark(
PAR = big_apply(X1, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X[ind, ])
}, a.combine = "rbind", ind = rows_along(X1), ncores = nb_cores(), block.size = 500),
SEQ = big_apply(X1, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X[ind, ])
}, a.combine = "rbind", ind = rows_along(X1), ncores = 1, block.size = 500),
times = 5
)
Unit: milliseconds
expr min lq mean median uq max neval cld
PAR 1564.20591 1602.0465 1637.77552 1629.9803 1651.04509 1741.59974 5 b
SEQ 68.92936 69.1002 76.70196 72.9173 85.31751 87.24543 5 a
X2 <- FBM(10000, 10000, init = 9999)
microbenchmark::microbenchmark(
PAR = big_apply(X2, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X[ind, ])
}, a.combine = "rbind", ind = rows_along(X2), ncores = nb_cores(), block.size = 500),
SEQ = big_apply(X2, a.FUN = function(X, ind) {
matrixStats::rowQuantiles(X[ind, ])
}, a.combine = "rbind", ind = rows_along(X2), ncores = 1, block.size = 500),
times = 5
)
Unit: seconds
expr min lq mean median uq max neval cld
PAR 4.757409 4.958869 5.071982 5.083381 5.218098 5.342153 5 a
SEQ 10.842828 10.846281 11.177460 11.360162 11.416967 11.421065 5 b
The bigger your matrix is, the more you will gain from parallelism.

Data.table - subsetting within groups during group by is slow

I'm trying to produce several aggregate statistics, and some of them need to be produced on a subset of each group. The data.table is quite large, 10 million rows, but using by without column subsetting is blazing fast (less than a second). Adding just one additional column which needs to be calculated on a subset of each group increases the running time by factor of 12.
Is the a faster way to do this? Below is my full code.
library(data.table)
library(microbenchmark)
N = 10^7
DT = data.table(id1 = sample(1:400, size = N, replace = TRUE),
id2 = sample(1:100, size = N, replace = TRUE),
id3 = sample(1:50, size = N, replace = TRUE),
filter_var = sample(1:10, size = N, replace = TRUE),
x1 = sample(1:1000, size = N, replace = TRUE),
x2 = sample(1:1000, size = N, replace = TRUE),
x3 = sample(1:1000, size = N, replace = TRUE),
x4 = sample(1:1000, size = N, replace = TRUE),
x5 = sample(1:1000, size = N, replace = TRUE) )
setkey(DT, id1,id2,id3)
microbenchmark(
DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5)
) , by = c('id1','id2','id3')] , unit = 's', times = 10L)
min lq mean median uq max neval
0.942013 0.9566891 1.004134 0.9884895 1.031334 1.165144 10
microbenchmark( DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5),
sum_x1_F1 = sum(x1[filter_var < 5]) #this line slows everything down
) , by = c('id1','id2','id3')] , unit = 's', times = 10L)
min lq mean median uq max neval
12.24046 12.4123 12.83447 12.72026 13.49059 13.61248 10
GForce makes grouped operations run faster and will work on expressions like list(x = funx(X), y = funy(Y)), ...) where X and Y are column names and funx and funy belong to the set of optimized functions.
For a full description of what works, see ?GForce.
To test if an expression works, read the messages from DT[, expr, by=, verbose=TRUE].
In the OP's case, we have sum_x1_F1 = sum(x1[filter_var < 5]) which is not covered by GForce even though sum(v) is. In this special case, we can make a var v = x1*condition and sum that:
DT[, v := x1*(filter_var < 5)]
system.time( DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5),
sum_x1_F1 = sum(v)
) , by = c('id1','id2','id3')])
# user system elapsed
# 0.63 0.19 0.81
For comparison, timing the OP's code on my computer:
system.time( DT[, .(
sum_x1 = sum(x1),
sum_x2 = sum(x2),
sum_x3 = sum(x3),
sum_x4 = sum(x4),
sum_x5 = sum(x5),
avg_x1 = mean(x1),
avg_x2 = mean(x2),
avg_x3 = mean(x3),
avg_x4 = mean(x4),
avg_x5 = mean(x5),
sum_x1_F1 = sum(x1[filter_var < 5]) #this line slows everything down
) , by = c('id1','id2','id3')])
# user system elapsed
# 9.00 0.02 9.06

Parallelize a rolling window regression in R

I'm running a rolling regression very similar to the following code:
library(PerformanceAnalytics)
library(quantmod)
data(managers)
FL <- as.formula(Next(HAM1)~HAM1+HAM2+HAM3+HAM4)
MyRegression <- function(df,FL) {
df <- as.data.frame(df)
model <- lm(FL,data=df[1:30,])
predict(model,newdata=df[31,])
}
system.time(Result <- rollapply(managers, 31, FUN="MyRegression",FL,
by.column = FALSE, align = "right", na.pad = TRUE))
I've got some extra processors, so I'm trying to find a way to parallelize the rolling window. If this was a non-rolling regression I could easily parallelize it using the apply family of functions...
The obvious one is to use lm.fit() instead of lm() so you don't incur all the overhead in processing the formula etc.
Update: So when I said obvious what I meant to say was blindingly obvious but deceptively difficult to implement!
After a bit of fiddling around, I came up with this
library(PerformanceAnalytics)
library(quantmod)
data(managers)
The first stage is to realise that the model matrix can be prebuilt, so we do that and convert it back to a Zoo object for use with rollapply():
mmat2 <- model.frame(Next(HAM1) ~ HAM1 + HAM2 + HAM3 + HAM4, data = managers,
na.action = na.pass)
mmat2 <- cbind.data.frame(mmat2[,1], Intercept = 1, mmat2[,-1])
mmatZ <- as.zoo(mmat2)
Now we need a function that will employ lm.fit() to do the heavy lifting without having to create design matrices at each iteration:
MyRegression2 <- function(Z) {
## store value we want to predict for
pred <- Z[31, -1, drop = FALSE]
## get rid of any rows with NA in training data
Z <- Z[1:30, ][!rowSums(is.na(Z[1:30,])) > 0, ]
## Next() would lag and leave NA in row 30 for response
## but we precomputed model matrix, so drop last row still in Z
Z <- Z[-nrow(Z),]
## fit the model
fit <- lm.fit(Z[, -1, drop = FALSE], Z[,1])
## get things we need to predict, in case pivoting turned on in lm.fit
p <- fit$rank
p1 <- seq_len(p)
piv <- fit$qr$pivot[p1]
## model coefficients
beta <- fit$coefficients
## this gives the predicted value for row 31 of data passed in
drop(pred[, piv, drop = FALSE] %*% beta[piv])
}
A comparison of timings:
> system.time(Result <- rollapply(managers, 31, FUN="MyRegression",FL,
+ by.column = FALSE, align = "right",
+ na.pad = TRUE))
user system elapsed
0.925 0.002 1.020
>
> system.time(Result2 <- rollapply(mmatZ, 31, FUN = MyRegression2,
+ by.column = FALSE, align = "right",
+ na.pad = TRUE))
user system elapsed
0.048 0.000 0.05
Which affords a pretty reasonable improvement over the original. And now check that the resulting objects are the same:
> all.equal(Result, Result2)
[1] TRUE
Enjoy!
New answer
I wrote a package, rollRegres, that does this much faster. It is ~ 58 times faster than Gavin Simpson's answer. Here is an example
# simulate data
set.seed(101)
n <- 10000
wdth <- 50
X <- matrix(rnorm(10 * n), n, 10)
y <- drop(X %*% runif(10)) + rnorm(n)
Z <- cbind(y, X)
# assign other function
lm_version <- function(Z, width = wdth) {
pred <- Z[width + 1, -1, drop = FALSE]
## fit the model
Z <- Z[-nrow(Z), ]
fit <- lm.fit(Z[, -1, drop = FALSE], Z[,1])
## get things we need to predict, in case pivoting turned on in lm.fit
p <- fit$rank
p1 <- seq_len(p)
piv <- fit$qr$pivot[p1]
## model coefficients
beta <- fit$coefficients
## this gives the predicted value for next obs
drop(pred[, piv, drop = FALSE] %*% beta[piv])
}
# show that they yield the same
library(rollRegres) # the new package
library(zoo)
all.equal(
rollapply(Z, wdth + 1, FUN = lm_version,
by.column = FALSE, align = "right", fill = NA_real_),
roll_regres.fit(
x = X, y = y, width = wdth, do_compute = "1_step_forecasts"
)$one_step_forecasts)
#R [1] TRUE
# benchmark
library(compiler)
lm_version <- cmpfun(lm_version)
microbenchmark::microbenchmark(
newnew = roll_regres.fit(
x = X, y = y, width = wdth, do_compute = "1_step_forecasts"),
prev = rollapply(Z, wdth + 1, FUN = lm_version,
by.column = FALSE, align = "right", fill = NA_real_),
times = 10)
#R Unit: milliseconds
#R expr min lq mean median uq max neval
#R newnew 10.27279 10.48929 10.91631 11.04139 11.13877 11.87121 10
#R prev 555.45898 565.02067 582.60309 582.22285 602.73091 605.39481 10
Old answer
You can reduce the run time by updating a decomposition. This yields an cost at each iteration instead of where n is you window width. Below is a code to compare the two. It would likely be much faster doing it in C++ but the LINPACK dchud and dchdd are not included with R so you would have to write a package to do so. Further, I recall reading that you may do faster with other implementations than the LINPACK dchud and dchdd for the R update
library(SamplerCompare) # for LINPACK `chdd` and `chud`
roll_forcast <- function(X, y, width){
n <- nrow(X)
p <- ncol(X)
out <- rep(NA_real_, n)
is_first <- TRUE
i <- width
while(i < n){
if(is_first){
is_first <- FALSE
qr. <- qr(X[1:width, ])
R <- qr.R(qr.)
# Use X^T for the rest
X <- t(X)
XtY <- drop(tcrossprod(y[1:width], X[, 1:width]))
} else {
x_new <- X[, i]
x_old <- X[, i - width]
# update R
R <- .Fortran(
"dchud", R, p, p, x_new, 0., 0L, 0L,
0., 0., numeric(p), numeric(p),
PACKAGE = "SamplerCompare")[[1]]
# downdate R
R <- .Fortran(
"dchdd", R, p, p, x_old, 0., 0L, 0L,
0., 0., numeric(p), numeric(p), integer(1),
PACKAGE = "SamplerCompare")[[1]]
# update XtY
XtY <- XtY + y[i] * x_new - y[i - width] * x_old
}
coef. <- .Internal(backsolve(R, XtY, p, TRUE, TRUE))
coef. <- .Internal(backsolve(R, coef., p, TRUE, FALSE))
i <- i + 1
out[i] <- X[, i] %*% coef.
}
out
}
# simulate data
set.seed(101)
n <- 10000
wdth = 50
X <- matrix(rnorm(10 * n), n, 10)
y <- drop(X %*% runif(10)) + rnorm(n)
Z <- cbind(y, X)
# assign other function
lm_version <- function(Z, width = wdth) {
pred <- Z[width + 1, -1, drop = FALSE]
## fit the model
Z <- Z[-nrow(Z), ]
fit <- lm.fit(Z[, -1, drop = FALSE], Z[,1])
## get things we need to predict, in case pivoting turned on in lm.fit
p <- fit$rank
p1 <- seq_len(p)
piv <- fit$qr$pivot[p1]
## model coefficients
beta <- fit$coefficients
## this gives the predicted value for row 31 of data passed in
drop(pred[, piv, drop = FALSE] %*% beta[piv])
}
# show that they yield the same
library(zoo)
all.equal(
rollapply(Z, wdth + 1, FUN = lm_version,
by.column = FALSE, align = "right", fill = NA_real_),
roll_forcast(X, y, wdth))
#R> [1] TRUE
# benchmark
library(compiler)
roll_forcast <- cmpfun(roll_forcast)
lm_version <- cmpfun(lm_version)
microbenchmark::microbenchmark(
new = roll_forcast(X, y, wdth),
prev = rollapply(Z, wdth + 1, FUN = lm_version,
by.column = FALSE, align = "right", fill = NA_real_),
times = 10)
#R> Unit: milliseconds
#R> expr min lq mean median uq max neval cld
#R> new 113.7637 115.4498 129.6562 118.6540 122.4930 230.3414 10 a
#R> prev 639.6499 674.1677 682.1996 678.6195 686.8816 763.8034 10 b

Resources