I have a list of 50000 string vectors, consisting of various combinations of 6000 unique strings.
Goal: I want to transform them in "relative frequencies" (table(x)/length(x)) and store them in a
sparse matrix. Low memory consumption is more important than speed. Currently memory is the bottleneck.
(Even though source data has about ~50 mb and data in target format ~10mb --> Transformation seems to be inefficient,...)
Generate sample data
dims <- c(50000, 6000)
nms <- paste0("A", 1:dims[2])
lengths <- sample(5:30, dims[1], replace = T)
data <- lapply(lengths, sample, x = nms, replace = T)
Possible attempts:
1) sapply() with simplify to sparse matrix?
library(Matrix)
sparseRow <- function(stringVec){
relFreq <- c(table(factor(stringVec, levels = nms)) / length(stringVec))
Matrix(relFreq, 1, dims[2], sparse = TRUE)
}
sparseRows <- sapply(data[1:5], sparseRow)
sparseMat <- do.call(rbind, sparseRows)
Problem: My bottleneck seems to be the sparseRows as the rows are not directly combined to a sparse matrix.
(If i run the code above on the full sample, i get an Error: cannot allocate vector of size 194 Kb
Error during wrapup: memory exhausted (limit reached?) - my hardware has 8 GB RAM.)
Obviously there is more memory consumption for creating a list of rows, before combining them instead of filling
the sparse matrix directly.
--> so using (s/l)apply is not memory friendly in my case?
object.size(sparseRows)
object.size(sparseMat)
2) Dirty workaround(?)
My goal seems to be to create an empty sparse matrix and fill it row wise. Below is a dirty way to do it (which works
on my hardware).
indxs <- lapply(data, function(data) sapply(data, function(x) which(x == nms),
USE.NAMES = FALSE))
relFreq <- lapply(indxs, function(idx) table(idx)/length(idx))
mm <- Matrix(0, nrow = dims[1], ncol = dims[2])
for(idx in 1:dims[1]){
mm[idx, as.numeric(names(relFreq[[idx]]))] <- as.numeric(relFreq[[idx]])
}
#sapply(1:dims[1], function(idx) mm[idx,
# as.numeric(names(relFreq[[idx]]))] <<- as.numeric(relFreq[[idx]]))
I would like to ask if there is a more elegant/efficient way to achieve that with lowest amount of RAM possible.
I would convert to data.table and then do the necessary calculations:
ld <- lengths(data)
D <- data.table(val = unlist(data),
id = rep(1:length(data), times = ld),
Ntotal = rep(ld, times = ld))
D <- D[, .N, keyby = .(id, val, Ntotal)]
D[, freq := N/Ntotal]
ii <- data.table(val = nms, ind = seq_along(nms))
D <- ii[D, on = 'val']
sp <- with(D, sparseMatrix(i = id, j = ind, x = freq,
dims = c(max(id), length(nms))))
Benchmarks for n = 100
data2 <- data[1:100]
Unit: milliseconds
expr min lq mean median uq max neval cld
OP 102.150200 106.235148 113.117848 109.98310 116.79734 142.859832 10 b
F. Privé 122.314496 123.804442 149.999595 126.76936 164.97166 233.034447 10 c
minem 5.617658 5.827209 6.307891 6.10946 6.15137 9.199257 10 a
user20650 11.012509 11.752350 13.580099 12.59034 14.31870 21.961725 10 a
Benchmarks on all data
Lets benchmark 3 of the fastest functions, because rest of them (OP's, user20650_v1 and F.Privé's) would be to slow on all of the data.
user20650_v2 <- function(x) {
dt2 = data.table(lst = rep(1:length(x), lengths(x)),
V1 = unlist(x))
dt2[, V1 := factor(V1, levels = nms)]
x3 = xtabs(~ lst + V1, data = dt2, sparse = TRUE)
x3/rowSums(x3)
}
user20650_v3 <- function(x) {
x3 = xtabs(~ rep(1:length(x), lengths(x)) + factor(unlist(x), levels = nms),
sparse = TRUE)
x3/rowSums(x3)
}
minem <- function(x) {
ld <- lengths(x)
D <- data.table(val = unlist(x), id = rep(1:length(x), times = ld),
Ntotal = rep(ld, times = ld))
D <- D[, .N, keyby = .(id, val, Ntotal)]
D[, freq := N/Ntotal]
ii <- data.table(val = nms, ind = seq_along(nms))
D <- ii[D, on = 'val']
sparseMatrix(i = D$id, j = D$ind, x = D$freq,
dims = c(max(D$id), length(nms)))
}
Compare the results of minem and user20650_v3:
x1 <- minem(data)
x2 <- user20650_v3(data)
all.equal(x1, x2)
# [1] "Component “Dimnames”: names for current but not for target"
# [2] "Component “Dimnames”: Component 1: target is NULL, current is character"
# [3] "Component “Dimnames”: Component 2: target is NULL, current is character"
# [4] "names for target but not for current"
x2 has additional names. remove them:
dimnames(x2) <- names(x2#x) <- NULL
all.equal(x1, x2)
# [1] TRUE # all equal
Timings:
x <- bench::mark(minem(data),
user20650_v2(data),
user20650_v3(data),
iterations = 5, check = F)
as.data.table(x)[, 1:10]
# expression min mean median max itr/sec mem_alloc n_gc n_itr total_time
# 1: minem(data) 324ms 345ms 352ms 371ms 2.896187 141MB 7 5 1.73s
# 2: user20650_v2(data) 604ms 648ms 624ms 759ms 1.544380 222MB 10 5 3.24s
# 3: user20650_v3(data) 587ms 607ms 605ms 633ms 1.646977 209MB 10 5 3.04s
relating memory:
OPdirty <- function(x) {
indxs <- lapply(x, function(x) sapply(x, function(x) which(x == nms),
USE.NAMES = FALSE))
relFreq <- lapply(indxs, function(idx) table(idx)/length(idx))
dims <- c(length(indxs), length(nms))
mm <- Matrix(0, nrow = dims[1], ncol = dims[2])
for (idx in 1:dims[1]) {
mm[idx, as.numeric(names(relFreq[[idx]]))] <- as.numeric(relFreq[[idx]])
}
mm
}
xx <- data[1:1000]
all.equal(OPdirty(xx), minem(xx))
# true
x <- bench::mark(minem(xx),
FPrive(xx),
OPdirty(xx),
iterations = 3, check = T)
as.data.table(x)[, 1:10]
expression min mean median max itr/sec mem_alloc n_gc n_itr total_time
1: minem(xx) 12.69ms 14.11ms 12.71ms 16.93ms 70.8788647 3.04MB 0 3 42.33ms
2: FPrive(xx) 1.46s 1.48s 1.47s 1.52s 0.6740317 214.95MB 4 3 4.45s
3: OPdirty(xx) 2.12s 2.14s 2.15s 2.16s 0.4666106 914.91MB 9 3 6.43s
See column mem_alloc...
Use a loop to fill a pre-allocated sparse matrix column-wise (and then transpose it):
res <- Matrix(0, dims[2], length(data), sparse = TRUE)
for (i in seq_along(data)) {
ind.match <- match(data[[i]], nms)
tab.match <- table(ind.match)
res[as.integer(names(tab.match)), i] <- as.vector(tab.match) / length(data[[i]])
}
# Verif
stopifnot(identical(t(res), sparseMat))
Benchmark:
data2 <- data[1:50]
microbenchmark::microbenchmark(
OP = {
sparseMat <- do.call(rbind, sapply(data2, sparseRow))
},
ME = {
res <- Matrix(0, dims[2], length(data2), sparse = TRUE)
for (i in seq_along(data2)) {
ind.match <- match(data2[[i]], nms)
tab.match <- table(ind.match)
res[as.integer(names(tab.match)), i] <- as.vector(tab.match) / length(data2[[i]])
}
res2 <- t(res)
}
)
stopifnot(identical(res2, sparseMat))
Unit: milliseconds
expr min lq mean median uq max neval cld
OP 56.28020 59.61689 63.24816 61.16986 62.80294 206.18689 100 b
ME 46.60318 48.27268 49.77190 49.50714 50.92287 55.23727 100 a
So, it's memory-efficient and not that slow.
Related
Say you have some participants and control in a given experiment that are evaluated in three characteristics, something like this:
part_A <- c(3, 5, 4)
part_B <- c(12, 15, 18)
part_C <- c(50, 40, 45)
ctrl_1 <- c(4, 5, 5)
ctrl_2 <- c(1, 0, 4)
ctrl_3 <- c(13, 16, 17)
ctrl_4 <- c(28, 30, 35)
ctrl_5 <- c(51, 43, 44)
I want to find for each participant which control case is the closest match.
If I used the dist() function, I could get it, but it would take a lot of time also calculating the distances between controls, which is useless to me (and in the real data, there are 1000 times more control cases than participant cases).
Is there a way to ask for the distances between each of these elements to each of those elements? And something that work for very large data sets?
In the example above, the result I want is:
Participant Closest_Ctrl
1 part_A ctrl_1
2 part_B ctrl_3
3 part_C ctrl_5
Here is a solution that should be sufficiently fast for a not-too-big number of participants:
ctrl <- do.call(cbind, mget(ls(pattern = "ctrl_\\d+")))
dat <- mget(ls(pattern = "part_[[:upper:]+]"))
res <- vapply(dat, function(x) colnames(ctrl)[which.min(sqrt(colSums(x - ctrl)^2))],
FUN.VALUE = character(1))
stack(res)
# values ind
#1 ctrl_1 part_A
#2 ctrl_3 part_B
#3 ctrl_5 part_C
If this is too slow I would quickly code it in Rcpp.
Convert input to data frames
parts <- do.call(data.frame, mget(ls(pattern = "part_[A-C]")))
ctrl <- do.call(data.frame, mget(ls(pattern = "ctrl_[1-5]")))
Generate output
# calculate distances
dists <- outer(parts, ctrl, Vectorize(function(x, y) sqrt(sum((x - y)^2))))
# generate output by calculating column with min value (max negative value)
data.frame(Participant = names(parts),
Closest_Ctrl = names(ctrl)[max.col(-dists)])
# Participant Closest_Ctrl
# 1 part_A ctrl_1
# 2 part_B ctrl_3
# 3 part_C ctrl_5
Benchmark
parts <- do.call(data.frame, mget(ls(pattern = "part_[A-C]")))
ctrl <- do.call(data.frame, mget(ls(pattern = "ctrl_[1-5]")))
parts <- do.call(cbind, replicate(100, parts, simplify = F))
ctrl <- do.call(cbind, replicate(100, ctrl, simplify = F))
r1 <- f1()
r2 <- f2()
all.equal(r1 %>% lapply(as.factor) %>% setNames(1:2),
r2[2:1] %>% lapply(as.factor) %>% setNames(1:2))
# [1] TRUE
f1 <- function(x){
dists <- outer(parts, ctrl, Vectorize(function(x, y) sqrt(sum((x - y)^2))))
# generate output by calculating column with min value (max negative value)
data.frame(Participant = names(parts),
Closest_Ctrl = names(ctrl)[max.col(-dists)])
}
f2 <- function(x){
res <- vapply(parts, function(x) colnames(ctrl)[which.min(sqrt(colSums(x - ctrl)^2))],
FUN.VALUE = character(1))
stack(res)
}
microbenchmark::microbenchmark(f1(), f2(), times = 5)
# Unit: milliseconds
# expr min lq mean median uq max neval
# f1() 305.7324 314.8356 435.3961 324.6116 461.4788 770.3221 5
# f2() 12359.6995 12831.7995 13567.8296 13616.5216 14244.0836 14787.0438 5
Benchmark 2
parts <- do.call(data.frame, mget(ls(pattern = "part_[A-C]")))
ctrl <- do.call(data.frame, mget(ls(pattern = "ctrl_[1-5]")))
parts <- do.call(cbind, replicate(10, parts, simplify = F))
ctrl <- do.call(cbind, replicate(10*1000, ctrl, simplify = F))
r1 <- f1()
r2 <- f2()
all.equal(r1 %>% lapply(as.factor) %>% setNames(1:2),
r2[2:1] %>% lapply(as.factor) %>% setNames(1:2))
# [1] TRUE
f1 <- function(x){
dists <- outer(parts, ctrl, Vectorize(function(x, y) sqrt(sum((x - y)^2))))
# generate output by calculating column with min value (max negative value)
data.frame(Participant = names(parts),
Closest_Ctrl = names(ctrl)[max.col(-dists)])
}
f2 <- function(x){
res <- vapply(parts, function(x) colnames(ctrl)[which.min(sqrt(colSums(x - ctrl)^2))],
FUN.VALUE = character(1))
stack(res)
}
microbenchmark::microbenchmark(f1(), f2(), times = 5)
# Unit: seconds
# expr min lq mean median uq max neval
# f1() 3.450176 4.211997 4.493805 4.339818 5.154191 5.312844 5
# f2() 119.120484 124.280423 132.637003 130.858727 131.148630 157.776749 5
I have the following data frame 'df'.
Each participant (here 10 participants) saw several stimuli (here 100), and made
a judgment about it (here a random number). For each stimuli, I know the true
answer (here a random number; a different number for each stimuli but always
the same answer for all participanst)
participant <- rep(1:10, each=100)
stimuli <- rep(1:100, 10)
judgment <- rnorm(1000)
df1 <- data.frame(participant, stimuli, judgment)
df2 <- data.frame(stimuli=1:100, criterion=rnorm(100))
df <- merge(df1, df2, by='stimuli') %>% arrange(participant, stimuli)
Here is what I am trying to do:
1) Taking n randomly selected participants (here n is between 1 and 10).
2) Computing the mean of their judgments per stimuli
3) Computing the correlation between this mean and the true answer
I want to perform step 1-3 for all n (that is, I want to take 1 randomly selected participants and perform steps 1-3, then I want to take 2 randomly selected participants and perform steps 1-3 ... 10 randomly selected participants and perform steps 1-3.
The results should be a data frame with 10 rows and 2 variables: N and the correlation. I want to work only with dplyr.
My solution is based on lapply. Here it is:
participants_id = unique (df$participant)
MyFun = function(Data) {
HelpFun = function(x, Data) {
# x is the index for the number of participants.
# It Will be used in the lapply call bellow
participants_x = sample(participants_id, x)
filter(Data, participant %in% participants_x) %>%
group_by(stimuli) %>%
summarise( mean_x = mean(judgment),
criterion = unique(criterion) ) %>%
summarise(cor = cor(.$mean_x, .$criterion))
}
N <- length(unique(Data$participant))
lapply(1:N, HelpFun, Data) %>% bind_rows()
}
MyFun(df)
The problem is that this code is slow. Since every selection is random, I
perform all this 10,000 times. And this slow. On my machine (Windows 10, 16 GB) 1000 simulations take 2 minutes. 10,000 simulations takes 20 minutes. (I also tried with loops but it did not help, although for some reasons it was a little bit faster). It has to be a solution faster. After all, a computations are not so complicated.
Below I wrote 100 simulations only in order to not interfere with your computer.
system.time(replicate(100, MyFun(df), simplify = FALSE ) %>% bind_rows())
Any idea about making all of this faster?
Using data.table and for loops we can get 10 times faster solution.
My function:
minem <- function(n) { # n - simulation count
require(data.table)
participants_id <- unique(df$participant)
N <- length(unique(df$participant))
dt <- as.data.table(df)
setkey(dt, stimuli)
L <- list()
for (j in 1:n) {
corss <- rep(0, N)
for (i in 1:N) {
participants_x <- sample(participants_id, i)
xx <- dt[participant %in% participants_x,
.(mean_x = mean(judgment),
criterion = first(criterion)),
by = stimuli]
corss[i] <- cor(xx$mean_x, xx$criterion)
}
L[[j]] <- corss
}
unlist(L)
}
head(minem(10))
# [1] 0.13642499 -0.02078109 -0.14418400 0.04966805 -0.09108837 -0.15403185
Your function:
Meir <- function(n) {
replicate(n, MyFun(df), simplify = FALSE) %>% bind_rows()
}
Benchmarks:
microbenchmark::microbenchmark(
Meir(10),
minem(10),
times = 10)
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# Meir(10) 1897.6909 1956.3427 1986.5768 1973.5594 2043.4337 2048.5809 10 b
# minem(10) 193.5403 196.0426 201.4132 202.1085 204.9108 215.9961 10 a
around 10 times faster
system.time(minem(1000)) # ~19 sek
Update
If your data size and memory limit allows then you can do it much faster with this approach:
minem2 <- function(n) {
require(data.table)
participants_id <- unique(df$participant)
N <- length(unique(df$participant))
dt <- as.data.table(df)
setkey(dt, participant)
L <- lapply(1:n, function(x)
sapply(1:N, function(i)
sample(participants_id, i)))
L <- unlist(L, recursive = F)
names(L) <- 1:length(L)
g <- sapply(seq_along(L), function(x) rep(names(L[x]), length(L[[x]])))
L <- data.table(participant = unlist(L), .id = as.integer(unlist(g)),
key = "participant")
L <- dt[L, allow.cartesian = TRUE]
xx <- L[, .(mean_x = mean(judgment), criterion = first(criterion)),
keyby = .(.id, stimuli)]
xx <- xx[, cor(mean_x, criterion), keyby = .id][[2]]
xx
}
microbenchmark::microbenchmark(
Meir(100),
minem(100),
minem2(100),
times = 2, unit = "relative")
# Unit: relative
# expr min lq mean median uq max neval cld
# Meir(100) 316.34965 316.34965 257.30832 257.30832 216.85190 216.85190 2 c
# minem(100) 31.49818 31.49818 26.48945 26.48945 23.05735 23.05735 2 b
# minem2(100) 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 2 a
But you will need to test yourself.
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 6 years ago.
Improve this question
I'm trying to make this function faster (ideally with RcppAmadillo or some other alternative). myfun takes a matrix, mat, that can get quite large, but is always two columns. myfun finds the closest rows for each row in the matrix that are +1 or -1 away in absolute value from each row
As an example below, the first row of mat is 3,3. Therefore, myfun will output a list with rows 2 and 3 being closest to row 1, but not row 5, which is +2 away.
library(microbenchmark)
dim(mat)
[1] 1000 2
head(mat)
x y
[1,] 3 3
[2,] 3 4
[3,] 3 2
[4,] 7 3
[5,] 4 4
[6,] 10 1
output
[[1]]
[1] 2 3
[[2]]
[1] 1
[[3]]
[1] 1
[[4]]
integer(0)
[[5]]
integer(0)
[[6]]
integer(0)
microbenchmark( myfun(mat), times = 100) #mat of 1000 rows
# Unit: milliseconds
# expr min lq mean median uq max neval
# myfun(mat) 89.30126 90.28618 95.50418 90.91281 91.50875 180.1505 100
microbenchmark( myfun(mat), times = 100) #mat of 10,000 rows
# Unit: seconds
# expr min lq mean median uq max neval
# myfun(layout.old) 5.912633 5.912633 5.912633 5.912633 5.912633 5.912633 1
This is what myfun looks like
myfun = function(x){
doo <- function(j) {
j.mat <- matrix(rep(j, length = length(x)), ncol = ncol(x), byrow = TRUE)
j.abs <- abs(j.mat - x)
return(which(rowSums(j.abs) == 1))
}
return(apply(x, 1, doo))
}
Below, I have a base R solution that is much faster than myfun provided by the OP.
myDistOne <- function(m) {
v1 <- m[,1L]; v2 <- m[,2L]
rs <- rowSums(m)
lapply(seq_along(rs), function(x) {
t1 <- which(abs(rs[x] - rs) == 1)
t2 <- t1[which(abs(v1[x] - v1[t1]) <= 1)]
t2[which(abs(v2[x] - v2[t2]) <= 1)]
})
}
Here are some benchmarks:
library(microbenchmark)
set.seed(9711)
m1 <- matrix(sample(50, 2000, replace = TRUE), ncol = 2) ## 1,000 rows
microbenchmark(myfun(m1), myDistOne(m1))
Unit: milliseconds
expr min lq mean median uq max neval cld
myfun(m1) 78.61637 78.61637 80.47931 80.47931 82.34225 82.34225 2 b
myDistOne(m1) 27.34810 27.34810 28.18758 28.18758 29.02707 29.02707 2 a
identical(myfun(m1), myDistOne(m1))
[1] TRUE
m2 <- matrix(sample(200, 20000, replace = TRUE), ncol = 2) ## 10,000 rows
microbenchmark(myfun(m2), myDistOne(m2))
Unit: seconds
expr min lq mean median uq max neval cld
myfun(m2) 5.219318 5.533835 5.758671 5.714263 5.914672 7.290701 100 b
myDistOne(m2) 1.230721 1.366208 1.433403 1.419413 1.473783 1.879530 100 a
identical(myfun(m2), myDistOne(m2))
[1] TRUE
Here is a very large example:
m3 <- matrix(sample(1000, 100000, replace = TRUE), ncol = 2) ## 50,000 rows
system.time(testJoe <- myDistOne(m3))
user system elapsed
26.963 10.988 37.973
system.time(testUser <- myfun(m3))
user system elapsed
148.444 33.297 182.639
identical(testJoe, testUser)
[1] TRUE
I'm sure there is a faster solution. Maybe by sorting the rowSums upfront and working from there could see an improvement (it could also get very messy).
Update
As I predicted, working from a sorted rowSums is much faster (and uglier!)
myDistOneFast <- function(m) {
v1 <- m[,1L]; v2 <- m[,2L]
origrs <- rowSums(m)
mySort <- order(origrs)
rs <- origrs[mySort]
myDiff <- c(0L, diff(rs))
brks <- which(myDiff > 0L)
lenB <- length(brks)
n <- nrow(m)
myL <- vector("list", length = n)
findRows <- function(v, s, r, u1, u2) {
lapply(v, function(x) {
sx <- s[x]
tv1 <- s[r]
tv2 <- tv1[which(abs(u1[sx] - u1[tv1]) <= 1)]
tv2[which(abs(u2[sx] - u2[tv2]) <= 1)]
})
}
t1 <- brks[1L]; t2 <- brks[2L]
## setting first index in myL
myL[mySort[1L:(t1-1L)]] <- findRows(1L:(t1-1L), mySort, t1:(t2-1L), v1, v2)
k <- t0 <- 1L
while (k < (lenB-1L)) {
t1 <- brks[k]; t2 <- brks[k+1L]; t3 <- brks[k+2L]
vec <- t1:(t2-1L)
if (myDiff[t1] == 1L) {
if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, c(t0:(t1-1L), t2:(t3-1L)), v1, v2)
} else {
myL[mySort[vec]] <- findRows(vec, mySort, t0:(t1-1L), v1, v2)
}
} else if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, t2:(t3-1L), v1, v2)
}
if (myDiff[t2] > 1L) {
if (myDiff[t3] > 1L) {
k <- k+2L; t0 <- t2
} else {
k <- k+1L; t0 <- t1
}
} else {k <- k+1L; t0 <- t1}
}
## setting second to last index in myL
if (k == lenB-1L) {
t1 <- brks[k]; t2 <- brks[k+1L]; t3 <- n+1L; vec <- t1:(t2-1L)
if (myDiff[t1] == 1L) {
if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, c(t0:(t1-1L), t2:(t3-1L)), v1, v2)
} else {
myL[mySort[vec]] <- findRows(vec, mySort, t0:(t1-1L), v1, v2)
}
} else if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, t2:(t3-1L), v1, v2)
}
k <- k+1L; t0 <- t1
}
t1 <- brks[k]; vec <- t1:n
if (myDiff[t1] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, t0:(t1-1L), v1, v2)
}
myL
}
The results are not even close. myDistOneFast is over 100x faster than the OP's original myfun on very large matrices and also scales well. Below are some benchmarks:
microbenchmark(OP = myfun(m1), Joe = myDistOne(m1), JoeFast = myDistOneFast(m1))
Unit: milliseconds
expr min lq mean median uq max neval
OP 57.60683 59.51508 62.91059 60.63064 61.87141 109.39386 100
Joe 22.00127 23.11457 24.35363 23.87073 24.87484 58.98532 100
JoeFast 11.27834 11.99201 12.59896 12.43352 13.08253 15.35676 100
microbenchmark(OP = myfun(m2), Joe = myDistOne(m2), JoeFast = myDistOneFast(m2))
Unit: milliseconds
expr min lq mean median uq max neval
OP 4461.8201 4527.5780 4592.0409 4573.8673 4633.9278 4867.5244 100
Joe 1287.0222 1316.5586 1339.3653 1331.2534 1352.3134 1524.2521 100
JoeFast 128.4243 134.0409 138.7518 136.3929 141.3046 172.2499 100
system.time(testJoeFast <- myDistOneFast(m3))
user system elapsed
0.68 0.00 0.69 ### myfun took over 100s!!!
To test equality, we have to sort each vector of indices. We also can't use identical for comparison as myL is initialized as an empty list, thus some of the indices contain NULL values (these correspond to integer(0) in the result from myfun and myDistOne).
testJoeFast <- lapply(testJoeFast, sort)
all(sapply(1:50000, function(x) all(testJoe[[x]]==testJoeFast[[x]])))
[1] TRUE
unlist(testJoe[which(sapply(testJoeFast, is.null))])
integer(0)
Here is an example with 500,000 rows:
set.seed(42)
m4 <- matrix(sample(2000, 1000000, replace = TRUE), ncol = 2)
system.time(myDistOneFast(m4))
user system elapsed
10.84 0.06 10.94
Here is an overview of how the algorithm works:
Calculate rowSums
Order the rowSums (i.e. returns the indices from the original vector of the sorted vector)
Call diff
Mark each non-zero instance
Determine which indices in small range satisfy the OP's request
Use the ordered vector calculated in 2 to determine original index
This is much faster than comparing one rowSum to all of the rowSum every time.
I am looking for an efficient solution for the following problem:
b <- matrix(c(0,0,0,1,1,0), nrow = 2, byrow = T)
weight <- c(1,1)
times <- 5
abc <- do.call(rbind, replicate(times, b, simplify=FALSE))
weight <- rep.int(weight,times)
sum1 <- as.numeric(rep.int(NA,nrow(abc)))
##Rprof()
for(j in 1:nrow(abc)){
a <- abc[j,]
sum1[j] <- sum(weight[rowSums(t(a == t(abc)) + 0) == ncol(abc)])
}
##Rprof(NULL)
##summaryRprof()
Is there a faster way to do this? Rprof shows that rowSums(), t(), == and + are quite slow. If nrows is 20,000 it takes like 21 seconds.
Thanks for helping!
Edit: I have a matrix abc and a vector weight with length equal to nrow(abc). The first value of weight corresponds to the first row of matrix abc and so on... Now, I would like to determine which rows of matrix abc are equal. Then, I want to remember the position of those rows in order to sum up the corresponding weights which have the same position. The appropriate sum I wanna store for each row.
Here is a way that looks valid and fast:
ff <- function(mat, weights)
{
rs <- apply(mat, 1, paste, collapse = ";")
unlist(lapply(unique(rs),
function(x)
sum(weights[match(rs, x, 0) > 0])))[match(rs, unique(rs))]
}
ff(abc, weight)
# [1] 5 5 5 5 5 5 5 5 5 5
And comparing with your function:
ffOP <- function(mat, weights)
{
sum1 <- as.numeric(rep.int(NA,nrow(mat)))
for(j in 1:nrow(mat)) {
a <- mat[j,]
sum1[j] <- sum(weights[rowSums(t(a == t(mat)) + 0) == ncol(mat)])
}
sum1
}
ffOP(abc, weight)
# [1] 5 5 5 5 5 5 5 5 5 5
library(microbenchmark)
m = do.call(rbind, replicate(1e3, matrix(0:11, 3, 4), simplify = F))
set.seed(101); w = runif(1e3*3)
all.equal(ffOP(m, w), ff(m, w))
#[1] TRUE
microbenchmark(ffOP(m, w), ff(m, w), times = 10)
#Unit: milliseconds
# expr min lq median uq max neval
# ffOP(m, w) 969.83968 986.47941 996.68563 1015.53552 1051.23847 10
# ff(m, w) 20.42426 20.64002 21.36508 21.97182 22.59127 10
For the record, I, also, implemented your approach in C and here are the benchmarkings:
#> microbenchmark(ffOP(m, w), ff(m, w), ffC(m, w), times = 10)
#Unit: milliseconds
# expr min lq median uq max neval
# ffOP(m, w) 957.66691 967.09429 991.35232 1000.53070 1016.74100 10
# ff(m, w) 20.60243 20.85578 21.70578 22.13434 23.04924 10
# ffC(m, w) 36.24618 36.40940 37.18927 37.39877 38.83358 10
I am in need of efficiency for finding the indexes (not the logical vector) between two vectors. I can do this with:
which(c("a", "q", "f", "c", "z") %in% letters[1:10])
In the same way it is better to find the position of the maximum number with which.max:
which(c(1:8, 10, 9) %in% max(c(1:8, 10, 9)))
which.max(c(1:8, 10, 9))
I am wondering if I have the most efficient way of finding the position of matching terms in the 2 vectors.
EDIT:
Per the questions/comments below. I am operating on a list of vectors. The problem involves operating on sentences that have been broken into a bag of words as seen below. The list may contain 10000-20000 or more character vectors. Then based on that index I will grab 4 words before and 4 words after the index and calculate a score.
x <- list(c('I', 'like', 'chocolate', 'cake'), c('chocolate', 'cake', 'is', 'good'))
y <- rep(x, 5000)
lapply(y, function(x) {
which(x %in% c("chocolate", "good"))
})
Here's a relatively faster way using data.table:
require(data.table)
vv <- vapply(y, length, 0L)
DT <- data.table(y = unlist(y), id = rep(seq_along(y), vv), pos = sequence(vv))
setkey(DT, y)
# OLD CODE which will not take care of no-match entries (commented)
# DT[J(c("chocolate", "good")), list(list(pos)), by=id]$V1
setkey(DT[J(c("chocolate", "good"))], id)[J(seq_along(vv)), list(list(pos))]$V1
The idea:
First we unlist your list into a column of DT named y. In addition, we create two other columns named id and pos. id tells the index in the list and pos tells the position within that id. Then, by creating a key column on id, we can do fast subsetting. With this subsetting we'll get corresponding pos values for each id. Before we collect all pos for each id in a list and then just output the list column (V1), we take care of those entries where there was no match for our query by setting key to id after first subsetting and subsetting on all possible values of id (as this'll result in NA for non-existing entries.
Benchmarking with the lapply code on your post:
x <- list(c('I', 'like', 'chocolate', 'cake'), c('chocolate', 'cake', 'is', 'good'))
y <- rep(x, 5000)
require(data.table)
arun <- function() {
vv <- vapply(y, length, 0L)
DT <- data.table(y = unlist(y), id = rep(seq_along(y), vv), pos = sequence(vv))
setkey(DT, y)
setkey(DT[J(c("chocolate", "good"))], id)[J(seq_along(vv)), list(list(pos))]$V1
}
tyler <- function() {
lapply(y, function(x) {
which(x %in% c("chocolate", "good"))
})
}
require(microbenchmark)
microbenchmark(a1 <- arun(), a2 <- tyler(), times=50)
Unit: milliseconds
expr min lq median uq max neval
a1 <- arun() 30.71514 31.92836 33.19569 39.31539 88.56282 50
a2 <- tyler() 626.67841 669.71151 726.78236 785.86444 955.55803 50
> identical(a1, a2)
# [1] TRUE
The C++ answer was faster comparing single characters, but I think using a vector of strings introduced enough overhead that now it's slower:
char1 <- c("a", "q", "f", "c", "z")
char2 <- letters[1:10]
library(inline)
cpp_whichin_src <- '
Rcpp::CharacterVector xa(a);
Rcpp::CharacterVector xb(b);
int n_xa = xa.size();
int n_xb = xb.size();
NumericVector res(n_xa);
std::vector<std::string> sa = Rcpp::as< std::vector<std::string> >(xa);
std::vector<std::string> sb = Rcpp::as< std::vector<std::string> >(xb);
for(int i=0; i < n_xa; i++) {
for(int j=0; j<n_xb; j++) {
if( sa[i] == sb[j] ) res[i] = i+1;
}
}
return res;
'
cpp_whichin <- cxxfunction(signature(a="character",b="character"), cpp_whichin_src, plugin="Rcpp")
which.in_cpp <- function(char1, char2) {
idx <- cpp_whichin(char1,char2)
idx[idx!=0]
}
which.in_naive <- function(char1, char2) {
which(char1 %in% char2)
}
which.in_CW <- function(char1, char2) {
unlist(sapply(char2,function(x) which(x==char1)))
}
which.in_cpp(char1,char2)
which.in_naive(char1,char2)
which.in_CW(char1,char2)
** Benchmarks **
library(microbenchmark)
microbenchmark(
which.in_cpp(char1,char2),
which.in_naive(char1,char2),
which.in_CW(char1,char2)
)
set.seed(1)
cmb <- apply(combn(letters,2), 2, paste,collapse="")
char1 <- sample( cmb, 100 )
char2 <- sample( cmb, 100 )
Unit: microseconds
expr min lq median uq max
1 which.in_cpp(char1, char2) 114.890 120.023 126.6930 135.5630 537.011
2 which.in_CW(char1, char2) 697.505 725.826 766.4385 813.8615 8032.168
3 which.in_naive(char1, char2) 17.391 20.289 22.4545 25.4230 76.826
# Same as above, but with 3 letter combos and 1000 sampled
Unit: microseconds
expr min lq median uq max
1 which.in_cpp(char1, char2) 8505.830 8715.598 8863.3130 8997.478 9796.288
2 which.in_CW(char1, char2) 23430.493 27987.393 28871.2340 30032.450 31926.546
3 which.in_naive(char1, char2) 129.904 135.736 158.1905 180.260 3821.785