Say you have some participants and control in a given experiment that are evaluated in three characteristics, something like this:
part_A <- c(3, 5, 4)
part_B <- c(12, 15, 18)
part_C <- c(50, 40, 45)
ctrl_1 <- c(4, 5, 5)
ctrl_2 <- c(1, 0, 4)
ctrl_3 <- c(13, 16, 17)
ctrl_4 <- c(28, 30, 35)
ctrl_5 <- c(51, 43, 44)
I want to find for each participant which control case is the closest match.
If I used the dist() function, I could get it, but it would take a lot of time also calculating the distances between controls, which is useless to me (and in the real data, there are 1000 times more control cases than participant cases).
Is there a way to ask for the distances between each of these elements to each of those elements? And something that work for very large data sets?
In the example above, the result I want is:
Participant Closest_Ctrl
1 part_A ctrl_1
2 part_B ctrl_3
3 part_C ctrl_5
Here is a solution that should be sufficiently fast for a not-too-big number of participants:
ctrl <- do.call(cbind, mget(ls(pattern = "ctrl_\\d+")))
dat <- mget(ls(pattern = "part_[[:upper:]+]"))
res <- vapply(dat, function(x) colnames(ctrl)[which.min(sqrt(colSums(x - ctrl)^2))],
FUN.VALUE = character(1))
stack(res)
# values ind
#1 ctrl_1 part_A
#2 ctrl_3 part_B
#3 ctrl_5 part_C
If this is too slow I would quickly code it in Rcpp.
Convert input to data frames
parts <- do.call(data.frame, mget(ls(pattern = "part_[A-C]")))
ctrl <- do.call(data.frame, mget(ls(pattern = "ctrl_[1-5]")))
Generate output
# calculate distances
dists <- outer(parts, ctrl, Vectorize(function(x, y) sqrt(sum((x - y)^2))))
# generate output by calculating column with min value (max negative value)
data.frame(Participant = names(parts),
Closest_Ctrl = names(ctrl)[max.col(-dists)])
# Participant Closest_Ctrl
# 1 part_A ctrl_1
# 2 part_B ctrl_3
# 3 part_C ctrl_5
Benchmark
parts <- do.call(data.frame, mget(ls(pattern = "part_[A-C]")))
ctrl <- do.call(data.frame, mget(ls(pattern = "ctrl_[1-5]")))
parts <- do.call(cbind, replicate(100, parts, simplify = F))
ctrl <- do.call(cbind, replicate(100, ctrl, simplify = F))
r1 <- f1()
r2 <- f2()
all.equal(r1 %>% lapply(as.factor) %>% setNames(1:2),
r2[2:1] %>% lapply(as.factor) %>% setNames(1:2))
# [1] TRUE
f1 <- function(x){
dists <- outer(parts, ctrl, Vectorize(function(x, y) sqrt(sum((x - y)^2))))
# generate output by calculating column with min value (max negative value)
data.frame(Participant = names(parts),
Closest_Ctrl = names(ctrl)[max.col(-dists)])
}
f2 <- function(x){
res <- vapply(parts, function(x) colnames(ctrl)[which.min(sqrt(colSums(x - ctrl)^2))],
FUN.VALUE = character(1))
stack(res)
}
microbenchmark::microbenchmark(f1(), f2(), times = 5)
# Unit: milliseconds
# expr min lq mean median uq max neval
# f1() 305.7324 314.8356 435.3961 324.6116 461.4788 770.3221 5
# f2() 12359.6995 12831.7995 13567.8296 13616.5216 14244.0836 14787.0438 5
Benchmark 2
parts <- do.call(data.frame, mget(ls(pattern = "part_[A-C]")))
ctrl <- do.call(data.frame, mget(ls(pattern = "ctrl_[1-5]")))
parts <- do.call(cbind, replicate(10, parts, simplify = F))
ctrl <- do.call(cbind, replicate(10*1000, ctrl, simplify = F))
r1 <- f1()
r2 <- f2()
all.equal(r1 %>% lapply(as.factor) %>% setNames(1:2),
r2[2:1] %>% lapply(as.factor) %>% setNames(1:2))
# [1] TRUE
f1 <- function(x){
dists <- outer(parts, ctrl, Vectorize(function(x, y) sqrt(sum((x - y)^2))))
# generate output by calculating column with min value (max negative value)
data.frame(Participant = names(parts),
Closest_Ctrl = names(ctrl)[max.col(-dists)])
}
f2 <- function(x){
res <- vapply(parts, function(x) colnames(ctrl)[which.min(sqrt(colSums(x - ctrl)^2))],
FUN.VALUE = character(1))
stack(res)
}
microbenchmark::microbenchmark(f1(), f2(), times = 5)
# Unit: seconds
# expr min lq mean median uq max neval
# f1() 3.450176 4.211997 4.493805 4.339818 5.154191 5.312844 5
# f2() 119.120484 124.280423 132.637003 130.858727 131.148630 157.776749 5
Related
I have a symmetric matrix Siginv and a list Z containing N matrices of size TxK.
I have three approaches to compute what I need (see below). The first one literally takes the formula, and is slow, as expected. Are there faster ways of doing what I do?
library(microbenchmark)
K <- 2
N <- 50
Siginv <- matrix(rnorm(N^2), ncol=N)
Siginv[lower.tri(Siginv)] <- t(Siginv)[lower.tri(Siginv)] # just some symmetric matrix
Tdim <- 400
Z <- replicate(N, matrix(rnorm(Tdim*K), ncol=K), simplify = F)
microbenchmark({
I <- diag(Tdim)
Z.m <- do.call(rbind, Z)
meat.mat.GLS.kp <- t(Z.m)%*%(Siginv%x%I)%*%Z.m
}, {
combs <- expand.grid(1:N, 1:N)
cprods.GLS <- mapply(function(i,j) Siginv[j,i]*t(Z[[i]])%*%Z[[j]], combs[,1], combs[,2], SIMPLIFY = F)
meat.mat.GLS <- Reduce("+", cprods.GLS)
}, {
combs <- expand.grid(1:N, 1:N)
cprods.GLS2 <- mapply(function(i,j) Siginv[j,i]*crossprod(Z[[i]],Z[[j]]), combs[,1], combs[,2], SIMPLIFY = F)
meat.mat.GLS2 <- Reduce("+", cprods.GLS2)
}, times=5)
all.equal(meat.mat.GLS.kp, meat.mat.GLS, meat.mat.GLS2) # TRUE
Currently, the approaches compare as follows:
min lq mean median uq max neval cld
4499.66564 4911.42674 4874.35170 4958.81553 4977.55873 5024.29187 5 b
23.03861 23.09293 23.82407 23.29574 24.04696 25.64611 5 a
12.92261 13.08275 13.54088 13.15898 13.80212 14.73794 5 a
I have a list of 50000 string vectors, consisting of various combinations of 6000 unique strings.
Goal: I want to transform them in "relative frequencies" (table(x)/length(x)) and store them in a
sparse matrix. Low memory consumption is more important than speed. Currently memory is the bottleneck.
(Even though source data has about ~50 mb and data in target format ~10mb --> Transformation seems to be inefficient,...)
Generate sample data
dims <- c(50000, 6000)
nms <- paste0("A", 1:dims[2])
lengths <- sample(5:30, dims[1], replace = T)
data <- lapply(lengths, sample, x = nms, replace = T)
Possible attempts:
1) sapply() with simplify to sparse matrix?
library(Matrix)
sparseRow <- function(stringVec){
relFreq <- c(table(factor(stringVec, levels = nms)) / length(stringVec))
Matrix(relFreq, 1, dims[2], sparse = TRUE)
}
sparseRows <- sapply(data[1:5], sparseRow)
sparseMat <- do.call(rbind, sparseRows)
Problem: My bottleneck seems to be the sparseRows as the rows are not directly combined to a sparse matrix.
(If i run the code above on the full sample, i get an Error: cannot allocate vector of size 194 Kb
Error during wrapup: memory exhausted (limit reached?) - my hardware has 8 GB RAM.)
Obviously there is more memory consumption for creating a list of rows, before combining them instead of filling
the sparse matrix directly.
--> so using (s/l)apply is not memory friendly in my case?
object.size(sparseRows)
object.size(sparseMat)
2) Dirty workaround(?)
My goal seems to be to create an empty sparse matrix and fill it row wise. Below is a dirty way to do it (which works
on my hardware).
indxs <- lapply(data, function(data) sapply(data, function(x) which(x == nms),
USE.NAMES = FALSE))
relFreq <- lapply(indxs, function(idx) table(idx)/length(idx))
mm <- Matrix(0, nrow = dims[1], ncol = dims[2])
for(idx in 1:dims[1]){
mm[idx, as.numeric(names(relFreq[[idx]]))] <- as.numeric(relFreq[[idx]])
}
#sapply(1:dims[1], function(idx) mm[idx,
# as.numeric(names(relFreq[[idx]]))] <<- as.numeric(relFreq[[idx]]))
I would like to ask if there is a more elegant/efficient way to achieve that with lowest amount of RAM possible.
I would convert to data.table and then do the necessary calculations:
ld <- lengths(data)
D <- data.table(val = unlist(data),
id = rep(1:length(data), times = ld),
Ntotal = rep(ld, times = ld))
D <- D[, .N, keyby = .(id, val, Ntotal)]
D[, freq := N/Ntotal]
ii <- data.table(val = nms, ind = seq_along(nms))
D <- ii[D, on = 'val']
sp <- with(D, sparseMatrix(i = id, j = ind, x = freq,
dims = c(max(id), length(nms))))
Benchmarks for n = 100
data2 <- data[1:100]
Unit: milliseconds
expr min lq mean median uq max neval cld
OP 102.150200 106.235148 113.117848 109.98310 116.79734 142.859832 10 b
F. Privé 122.314496 123.804442 149.999595 126.76936 164.97166 233.034447 10 c
minem 5.617658 5.827209 6.307891 6.10946 6.15137 9.199257 10 a
user20650 11.012509 11.752350 13.580099 12.59034 14.31870 21.961725 10 a
Benchmarks on all data
Lets benchmark 3 of the fastest functions, because rest of them (OP's, user20650_v1 and F.Privé's) would be to slow on all of the data.
user20650_v2 <- function(x) {
dt2 = data.table(lst = rep(1:length(x), lengths(x)),
V1 = unlist(x))
dt2[, V1 := factor(V1, levels = nms)]
x3 = xtabs(~ lst + V1, data = dt2, sparse = TRUE)
x3/rowSums(x3)
}
user20650_v3 <- function(x) {
x3 = xtabs(~ rep(1:length(x), lengths(x)) + factor(unlist(x), levels = nms),
sparse = TRUE)
x3/rowSums(x3)
}
minem <- function(x) {
ld <- lengths(x)
D <- data.table(val = unlist(x), id = rep(1:length(x), times = ld),
Ntotal = rep(ld, times = ld))
D <- D[, .N, keyby = .(id, val, Ntotal)]
D[, freq := N/Ntotal]
ii <- data.table(val = nms, ind = seq_along(nms))
D <- ii[D, on = 'val']
sparseMatrix(i = D$id, j = D$ind, x = D$freq,
dims = c(max(D$id), length(nms)))
}
Compare the results of minem and user20650_v3:
x1 <- minem(data)
x2 <- user20650_v3(data)
all.equal(x1, x2)
# [1] "Component “Dimnames”: names for current but not for target"
# [2] "Component “Dimnames”: Component 1: target is NULL, current is character"
# [3] "Component “Dimnames”: Component 2: target is NULL, current is character"
# [4] "names for target but not for current"
x2 has additional names. remove them:
dimnames(x2) <- names(x2#x) <- NULL
all.equal(x1, x2)
# [1] TRUE # all equal
Timings:
x <- bench::mark(minem(data),
user20650_v2(data),
user20650_v3(data),
iterations = 5, check = F)
as.data.table(x)[, 1:10]
# expression min mean median max itr/sec mem_alloc n_gc n_itr total_time
# 1: minem(data) 324ms 345ms 352ms 371ms 2.896187 141MB 7 5 1.73s
# 2: user20650_v2(data) 604ms 648ms 624ms 759ms 1.544380 222MB 10 5 3.24s
# 3: user20650_v3(data) 587ms 607ms 605ms 633ms 1.646977 209MB 10 5 3.04s
relating memory:
OPdirty <- function(x) {
indxs <- lapply(x, function(x) sapply(x, function(x) which(x == nms),
USE.NAMES = FALSE))
relFreq <- lapply(indxs, function(idx) table(idx)/length(idx))
dims <- c(length(indxs), length(nms))
mm <- Matrix(0, nrow = dims[1], ncol = dims[2])
for (idx in 1:dims[1]) {
mm[idx, as.numeric(names(relFreq[[idx]]))] <- as.numeric(relFreq[[idx]])
}
mm
}
xx <- data[1:1000]
all.equal(OPdirty(xx), minem(xx))
# true
x <- bench::mark(minem(xx),
FPrive(xx),
OPdirty(xx),
iterations = 3, check = T)
as.data.table(x)[, 1:10]
expression min mean median max itr/sec mem_alloc n_gc n_itr total_time
1: minem(xx) 12.69ms 14.11ms 12.71ms 16.93ms 70.8788647 3.04MB 0 3 42.33ms
2: FPrive(xx) 1.46s 1.48s 1.47s 1.52s 0.6740317 214.95MB 4 3 4.45s
3: OPdirty(xx) 2.12s 2.14s 2.15s 2.16s 0.4666106 914.91MB 9 3 6.43s
See column mem_alloc...
Use a loop to fill a pre-allocated sparse matrix column-wise (and then transpose it):
res <- Matrix(0, dims[2], length(data), sparse = TRUE)
for (i in seq_along(data)) {
ind.match <- match(data[[i]], nms)
tab.match <- table(ind.match)
res[as.integer(names(tab.match)), i] <- as.vector(tab.match) / length(data[[i]])
}
# Verif
stopifnot(identical(t(res), sparseMat))
Benchmark:
data2 <- data[1:50]
microbenchmark::microbenchmark(
OP = {
sparseMat <- do.call(rbind, sapply(data2, sparseRow))
},
ME = {
res <- Matrix(0, dims[2], length(data2), sparse = TRUE)
for (i in seq_along(data2)) {
ind.match <- match(data2[[i]], nms)
tab.match <- table(ind.match)
res[as.integer(names(tab.match)), i] <- as.vector(tab.match) / length(data2[[i]])
}
res2 <- t(res)
}
)
stopifnot(identical(res2, sparseMat))
Unit: milliseconds
expr min lq mean median uq max neval cld
OP 56.28020 59.61689 63.24816 61.16986 62.80294 206.18689 100 b
ME 46.60318 48.27268 49.77190 49.50714 50.92287 55.23727 100 a
So, it's memory-efficient and not that slow.
I have the following data frame 'df'.
Each participant (here 10 participants) saw several stimuli (here 100), and made
a judgment about it (here a random number). For each stimuli, I know the true
answer (here a random number; a different number for each stimuli but always
the same answer for all participanst)
participant <- rep(1:10, each=100)
stimuli <- rep(1:100, 10)
judgment <- rnorm(1000)
df1 <- data.frame(participant, stimuli, judgment)
df2 <- data.frame(stimuli=1:100, criterion=rnorm(100))
df <- merge(df1, df2, by='stimuli') %>% arrange(participant, stimuli)
Here is what I am trying to do:
1) Taking n randomly selected participants (here n is between 1 and 10).
2) Computing the mean of their judgments per stimuli
3) Computing the correlation between this mean and the true answer
I want to perform step 1-3 for all n (that is, I want to take 1 randomly selected participants and perform steps 1-3, then I want to take 2 randomly selected participants and perform steps 1-3 ... 10 randomly selected participants and perform steps 1-3.
The results should be a data frame with 10 rows and 2 variables: N and the correlation. I want to work only with dplyr.
My solution is based on lapply. Here it is:
participants_id = unique (df$participant)
MyFun = function(Data) {
HelpFun = function(x, Data) {
# x is the index for the number of participants.
# It Will be used in the lapply call bellow
participants_x = sample(participants_id, x)
filter(Data, participant %in% participants_x) %>%
group_by(stimuli) %>%
summarise( mean_x = mean(judgment),
criterion = unique(criterion) ) %>%
summarise(cor = cor(.$mean_x, .$criterion))
}
N <- length(unique(Data$participant))
lapply(1:N, HelpFun, Data) %>% bind_rows()
}
MyFun(df)
The problem is that this code is slow. Since every selection is random, I
perform all this 10,000 times. And this slow. On my machine (Windows 10, 16 GB) 1000 simulations take 2 minutes. 10,000 simulations takes 20 minutes. (I also tried with loops but it did not help, although for some reasons it was a little bit faster). It has to be a solution faster. After all, a computations are not so complicated.
Below I wrote 100 simulations only in order to not interfere with your computer.
system.time(replicate(100, MyFun(df), simplify = FALSE ) %>% bind_rows())
Any idea about making all of this faster?
Using data.table and for loops we can get 10 times faster solution.
My function:
minem <- function(n) { # n - simulation count
require(data.table)
participants_id <- unique(df$participant)
N <- length(unique(df$participant))
dt <- as.data.table(df)
setkey(dt, stimuli)
L <- list()
for (j in 1:n) {
corss <- rep(0, N)
for (i in 1:N) {
participants_x <- sample(participants_id, i)
xx <- dt[participant %in% participants_x,
.(mean_x = mean(judgment),
criterion = first(criterion)),
by = stimuli]
corss[i] <- cor(xx$mean_x, xx$criterion)
}
L[[j]] <- corss
}
unlist(L)
}
head(minem(10))
# [1] 0.13642499 -0.02078109 -0.14418400 0.04966805 -0.09108837 -0.15403185
Your function:
Meir <- function(n) {
replicate(n, MyFun(df), simplify = FALSE) %>% bind_rows()
}
Benchmarks:
microbenchmark::microbenchmark(
Meir(10),
minem(10),
times = 10)
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# Meir(10) 1897.6909 1956.3427 1986.5768 1973.5594 2043.4337 2048.5809 10 b
# minem(10) 193.5403 196.0426 201.4132 202.1085 204.9108 215.9961 10 a
around 10 times faster
system.time(minem(1000)) # ~19 sek
Update
If your data size and memory limit allows then you can do it much faster with this approach:
minem2 <- function(n) {
require(data.table)
participants_id <- unique(df$participant)
N <- length(unique(df$participant))
dt <- as.data.table(df)
setkey(dt, participant)
L <- lapply(1:n, function(x)
sapply(1:N, function(i)
sample(participants_id, i)))
L <- unlist(L, recursive = F)
names(L) <- 1:length(L)
g <- sapply(seq_along(L), function(x) rep(names(L[x]), length(L[[x]])))
L <- data.table(participant = unlist(L), .id = as.integer(unlist(g)),
key = "participant")
L <- dt[L, allow.cartesian = TRUE]
xx <- L[, .(mean_x = mean(judgment), criterion = first(criterion)),
keyby = .(.id, stimuli)]
xx <- xx[, cor(mean_x, criterion), keyby = .id][[2]]
xx
}
microbenchmark::microbenchmark(
Meir(100),
minem(100),
minem2(100),
times = 2, unit = "relative")
# Unit: relative
# expr min lq mean median uq max neval cld
# Meir(100) 316.34965 316.34965 257.30832 257.30832 216.85190 216.85190 2 c
# minem(100) 31.49818 31.49818 26.48945 26.48945 23.05735 23.05735 2 b
# minem2(100) 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 2 a
But you will need to test yourself.
I would like to transpose a nested list. Assume the following nested list x is given:
a <- list(c("a","b","c","d"))
b <- list(c("d","c","b","a"))
c <- list(c("4","3","2","1"))
d <- list(c("1","2","3","4"))
x <- list(a,b,c,d)
The outcome should be a nested list where the first column of the original list x is the first nested list element, that is "a","d","4","1", the second column is the second nested list element, i.e. "b","c","3","2" and so on. In the end the structure is kind of a transpose of the original structure. How can this be done in R?
We could also do without lapply (using matrix):
relist(matrix(unlist(x), ncol = 4, byrow = T), skeleton = x)
Benchmarking
library(microbenchmark)
a <- list(c("a","b","c","d"))
b <- list(c("d","c","b","a"))
c <- list(c("4","3","2","1"))
d <- list(c("1","2","3","4"))
x <- list(a,b,c,d)
f_akrun <- function(x) {m1 <- do.call(rbind, lapply(x, function(y) do.call(rbind, y)));relist(m1, skeleton = x);}
f_m0h3n <- function(x) {relist(matrix(unlist(x), ncol = length(x[[1]][[1]]), byrow = T), skeleton = x)}
setequal(f_akrun(x), f_m0h3n(x))
# [1] TRUE
microbenchmark(f_akrun(x), f_m0h3n(x))
# Unit: microseconds
# expr min lq mean median uq max neval
# f_akrun(x) 135.591 137.301 144.3545 138.585 148.422 334.484 100
# f_m0h3n(x) 110.782 111.638 116.5477 112.493 117.412 212.153 100
We can try
m1 <- do.call(rbind, lapply(x, function(y) do.call(rbind, y)))
relist(m1, skeleton = x)
I have the following vector:
x = c(1, 2, 3, 10, 20, 30)
At each index, 3 consecutive elements are summed, resulting in the following vector:
c(6, 15, 33, 60)
Thus, first element is 1 + 2 + 3 = 6, the second element is 2 + 3 + 10 = 15, et.c
What you have is a vector, not an array. You can use rollapply function from zoo package to get what you need.
> x <- c(1, 2, 3, 10, 20, 30)
> #library(zoo)
> rollapply(x, 3, sum)
[1] 6 15 33 60
Take a look at ?rollapply for further details on what rollapply does and how to use it.
I put together a package for handling these kinds of 'roll'ing functions that offers functionality similar to zoo's rollapply, but with Rcpp on the backend. Check out RcppRoll on CRAN.
library(microbenchmark)
library(zoo)
library(RcppRoll)
x <- rnorm(1E5)
all.equal( m1 <- rollapply(x, 3, sum), m2 <- roll_sum(x, 3) )
## from flodel
rsum.cumsum <- function(x, n = 3L) {
tail(cumsum(x) - cumsum(c(rep(0, n), head(x, -n))), -n + 1)
}
microbenchmark(
unit="ms",
times=10,
rollapply(x, 3, sum),
roll_sum(x, 3),
rsum.cumsum(x, 3)
)
gives me
Unit: milliseconds
expr min lq median uq max neval
rollapply(x, 3, sum) 1056.646058 1068.867550 1076.550463 1113.71012 1131.230825 10
roll_sum(x, 3) 0.405992 0.442928 0.457642 0.51770 0.574455 10
rsum.cumsum(x, 3) 2.610119 2.821823 6.469593 11.33624 53.798711 10
You might find it useful if speed is a concern.
If speed is a concern, you could use a convolution filter and chop off the ends:
rsum.filter <- function(x, n = 3L) filter(x, rep(1, n))[-c(1, length(x))]
Or even faster, write it as the difference between two cumulative sums:
rsum.cumsum <- function(x, n = 3L) tail(cumsum(x) - cumsum(c(rep(0, n), head(x, -n))), -n + 1)
Both use base functions only. Some benchmarks:
x <- sample(1:1000)
rsum.rollapply <- function(x, n = 3L) rollapply(x, n, sum)
rsum.sapply <- function(x, n = 3L) sapply(1:(length(x)-n+1),function(i){
sum(x[i:(i+n-1)])})
library(microbenchmark)
microbenchmark(
rsum.rollapply(x),
rsum.sapply(x),
rsum.filter(x),
rsum.cumsum(x)
)
# Unit: microseconds
# expr min lq median uq max neval
# rsum.rollapply(x) 12891.315 13267.103 14635.002 17081.5860 28059.998 100
# rsum.sapply(x) 4287.533 4433.180 4547.126 5148.0205 12967.866 100
# rsum.filter(x) 170.165 208.661 269.648 290.2465 427.250 100
# rsum.cumsum(x) 97.539 130.289 142.889 159.3055 449.237 100
Also I imagine all methods will be faster if x and all applied weights were integers instead of numerics.
Using just the base R you could do:
v <- c(1, 2, 3, 10, 20, 30)
grp <- 3
res <- sapply(1:(length(v)-grp+1),function(x){sum(v[x:(x+grp-1)])})
> res
[1] 6 15 33 60
Another way, faster than sapply (comparable to #flodel's rsum.cumsum), is the following:
res <- rowSums(outer(1:(length(v)-grp+1),1:grp,FUN=function(i,j){v[(j - 1) + i]}))
Here's flodel's benchmark updated:
x <- sample(1:1000)
rsum.rollapply <- function(x, n = 3L) rollapply(x, n, sum)
rsum.sapply <- function(x, n = 3L) sapply(1:(length(x)-n+1),function(i){sum(x[i:(i+n-1)])})
rsum.filter <- function(x, n = 3L) filter(x, rep(1, n))[-c(1, length(x))]
rsum.cumsum <- function(x, n = 3L) tail(cumsum(x) - cumsum(c(rep(0, n), head(x, -n))), -n + 1)
rsum.outer <- function(x, n = 3L) rowSums(outer(1:(length(x)-n+1),1:n,FUN=function(i,j){x[(j - 1) + i]}))
library(microbenchmark)
microbenchmark(
rsum.rollapply(x),
rsum.sapply(x),
rsum.filter(x),
rsum.cumsum(x),
rsum.outer(x)
)
# Unit: microseconds
# expr min lq median uq max neval
# rsum.rollapply(x) 9464.495 9929.4480 10223.2040 10752.7960 11808.779 100
# rsum.sapply(x) 3013.394 3251.1510 3466.9875 4031.6195 7029.333 100
# rsum.filter(x) 161.278 178.7185 229.7575 242.2375 359.676 100
# rsum.cumsum(x) 65.280 70.0800 88.1600 95.1995 181.758 100
# rsum.outer(x) 66.880 73.7600 82.8795 87.0400 131.519 100
If you need real speed, try
rsum.cumdiff <- function(x, n = 3L) (cs <- cumsum(x))[-(1:(n-1))] - c(0,cs[1:(length(x)-n)])
It's all in base R, and updating flodel's microbenchmark speaks for itself
x <- sample(1:1000)
rsum.rollapply <- function(x, n = 3L) rollapply(x, n, sum)
rsum.sapply <- function(x, n = 3L) sapply(1:(length(x)-n+1),function(i){sum(x[i:(i+n-1)])})
rsum.filter <- function(x, n = 3L) filter(x, rep(1, n))[-c(1, length(x))]
rsum.cumsum <- function(x, n = 3L) tail(cumsum(x) - cumsum(c(rep(0, n), head(x, -n))), -n + 1)
rsum.outer <- function(x, n = 3L) rowSums(outer(1:(length(x)-n+1),1:n,FUN=function(i,j){x[(j - 1) + i]}))
rsum.cumdiff <- function(x, n = 3L) (cs <- cumsum(x))[-(1:(n-1))] - c(0, cs[1:(length(x)-n)])
all.equal(rsum.rollapply(x), rsum.sapply(x))
# [1] TRUE
all.equal(rsum.sapply(x), rsum.filter(x))
# [1] TRUE
all.equal(rsum.filter(x), rsum.outer(x))
# [1] TRUE
all.equal(rsum.outer(x), rsum.cumsum(x))
# [1] TRUE
all.equal(rsum.cumsum(x), rsum.cumdiff(x))
# [1] TRUE
library(microbenchmark)
microbenchmark(
rsum.rollapply(x),
rsum.sapply(x),
rsum.filter(x),
rsum.cumsum(x),
rsum.outer(x),
rsum.cumdiff(x)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# rsum.rollapply(x) 3369.211 4104.2415 4630.89799 4391.7560 4767.2710 12002.904 100
# rsum.sapply(x) 850.425 999.2730 1355.56383 1086.0610 1246.5450 6915.877 100
# rsum.filter(x) 48.970 67.1525 97.28568 96.2430 113.6975 248.728 100
# rsum.cumsum(x) 47.515 62.7885 89.12085 82.1825 106.6675 230.303 100
# rsum.outer(x) 69.819 85.3340 160.30133 92.6070 109.0920 5740.119 100
# rsum.cumdiff(x) 9.698 12.6070 70.01785 14.3040 17.4555 5346.423 100
## R version 3.5.1 "Feather Spray"
## zoo and microbenchmark compiled under R 3.5.3
Oddly enough, everything is faster the second time through microbenchmark:
microbenchmark(
rsum.rollapply(x),
rsum.sapply(x),
rsum.filter(x),
rsum.cumsum(x),
rsum.outer(x),
rsum.cumdiff(x)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# rsum.rollapply(x) 3127.272 3477.5750 3869.38566 3593.4540 3858.9080 7836.603 100
# rsum.sapply(x) 844.122 914.4245 1059.89841 965.3335 1032.2425 5184.968 100
# rsum.filter(x) 47.031 60.8490 80.53420 74.1830 90.9100 260.365 100
# rsum.cumsum(x) 45.092 55.2740 69.90630 64.4855 81.4555 122.668 100
# rsum.outer(x) 68.850 76.6070 88.49533 82.1825 91.8800 166.304 100
# rsum.cumdiff(x) 9.213 11.1520 13.18387 12.1225 13.5770 49.456 100
library runner may also be used
x <- c(1, 2, 3, 10, 20, 30)
runner::sum_run(x, k=3, na_pad = T)
#> [1] NA NA 6 15 33 60
or slider is also useful
x <- c(1, 2, 3, 10, 20, 30)
slider::slide_sum(x, before = 2, complete = T)
#> [1] NA NA 6 15 33 60
Created on 2021-06-14 by the reprex package (v2.0.0)