Optimizing speed of nearest search function in R [closed] - r

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 6 years ago.
Improve this question
I'm trying to make this function faster (ideally with RcppAmadillo or some other alternative). myfun takes a matrix, mat, that can get quite large, but is always two columns. myfun finds the closest rows for each row in the matrix that are +1 or -1 away in absolute value from each row
As an example below, the first row of mat is 3,3. Therefore, myfun will output a list with rows 2 and 3 being closest to row 1, but not row 5, which is +2 away.
library(microbenchmark)
dim(mat)
[1] 1000 2
head(mat)
x y
[1,] 3 3
[2,] 3 4
[3,] 3 2
[4,] 7 3
[5,] 4 4
[6,] 10 1
output
[[1]]
[1] 2 3
[[2]]
[1] 1
[[3]]
[1] 1
[[4]]
integer(0)
[[5]]
integer(0)
[[6]]
integer(0)
microbenchmark( myfun(mat), times = 100) #mat of 1000 rows
# Unit: milliseconds
# expr min lq mean median uq max neval
# myfun(mat) 89.30126 90.28618 95.50418 90.91281 91.50875 180.1505 100
microbenchmark( myfun(mat), times = 100) #mat of 10,000 rows
# Unit: seconds
# expr min lq mean median uq max neval
# myfun(layout.old) 5.912633 5.912633 5.912633 5.912633 5.912633 5.912633 1
This is what myfun looks like
myfun = function(x){
doo <- function(j) {
j.mat <- matrix(rep(j, length = length(x)), ncol = ncol(x), byrow = TRUE)
j.abs <- abs(j.mat - x)
return(which(rowSums(j.abs) == 1))
}
return(apply(x, 1, doo))
}

Below, I have a base R solution that is much faster than myfun provided by the OP.
myDistOne <- function(m) {
v1 <- m[,1L]; v2 <- m[,2L]
rs <- rowSums(m)
lapply(seq_along(rs), function(x) {
t1 <- which(abs(rs[x] - rs) == 1)
t2 <- t1[which(abs(v1[x] - v1[t1]) <= 1)]
t2[which(abs(v2[x] - v2[t2]) <= 1)]
})
}
Here are some benchmarks:
library(microbenchmark)
set.seed(9711)
m1 <- matrix(sample(50, 2000, replace = TRUE), ncol = 2) ## 1,000 rows
microbenchmark(myfun(m1), myDistOne(m1))
Unit: milliseconds
expr min lq mean median uq max neval cld
myfun(m1) 78.61637 78.61637 80.47931 80.47931 82.34225 82.34225 2 b
myDistOne(m1) 27.34810 27.34810 28.18758 28.18758 29.02707 29.02707 2 a
identical(myfun(m1), myDistOne(m1))
[1] TRUE
m2 <- matrix(sample(200, 20000, replace = TRUE), ncol = 2) ## 10,000 rows
microbenchmark(myfun(m2), myDistOne(m2))
Unit: seconds
expr min lq mean median uq max neval cld
myfun(m2) 5.219318 5.533835 5.758671 5.714263 5.914672 7.290701 100 b
myDistOne(m2) 1.230721 1.366208 1.433403 1.419413 1.473783 1.879530 100 a
identical(myfun(m2), myDistOne(m2))
[1] TRUE
Here is a very large example:
m3 <- matrix(sample(1000, 100000, replace = TRUE), ncol = 2) ## 50,000 rows
system.time(testJoe <- myDistOne(m3))
user system elapsed
26.963 10.988 37.973
system.time(testUser <- myfun(m3))
user system elapsed
148.444 33.297 182.639
identical(testJoe, testUser)
[1] TRUE
I'm sure there is a faster solution. Maybe by sorting the rowSums upfront and working from there could see an improvement (it could also get very messy).
Update
As I predicted, working from a sorted rowSums is much faster (and uglier!)
myDistOneFast <- function(m) {
v1 <- m[,1L]; v2 <- m[,2L]
origrs <- rowSums(m)
mySort <- order(origrs)
rs <- origrs[mySort]
myDiff <- c(0L, diff(rs))
brks <- which(myDiff > 0L)
lenB <- length(brks)
n <- nrow(m)
myL <- vector("list", length = n)
findRows <- function(v, s, r, u1, u2) {
lapply(v, function(x) {
sx <- s[x]
tv1 <- s[r]
tv2 <- tv1[which(abs(u1[sx] - u1[tv1]) <= 1)]
tv2[which(abs(u2[sx] - u2[tv2]) <= 1)]
})
}
t1 <- brks[1L]; t2 <- brks[2L]
## setting first index in myL
myL[mySort[1L:(t1-1L)]] <- findRows(1L:(t1-1L), mySort, t1:(t2-1L), v1, v2)
k <- t0 <- 1L
while (k < (lenB-1L)) {
t1 <- brks[k]; t2 <- brks[k+1L]; t3 <- brks[k+2L]
vec <- t1:(t2-1L)
if (myDiff[t1] == 1L) {
if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, c(t0:(t1-1L), t2:(t3-1L)), v1, v2)
} else {
myL[mySort[vec]] <- findRows(vec, mySort, t0:(t1-1L), v1, v2)
}
} else if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, t2:(t3-1L), v1, v2)
}
if (myDiff[t2] > 1L) {
if (myDiff[t3] > 1L) {
k <- k+2L; t0 <- t2
} else {
k <- k+1L; t0 <- t1
}
} else {k <- k+1L; t0 <- t1}
}
## setting second to last index in myL
if (k == lenB-1L) {
t1 <- brks[k]; t2 <- brks[k+1L]; t3 <- n+1L; vec <- t1:(t2-1L)
if (myDiff[t1] == 1L) {
if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, c(t0:(t1-1L), t2:(t3-1L)), v1, v2)
} else {
myL[mySort[vec]] <- findRows(vec, mySort, t0:(t1-1L), v1, v2)
}
} else if (myDiff[t2] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, t2:(t3-1L), v1, v2)
}
k <- k+1L; t0 <- t1
}
t1 <- brks[k]; vec <- t1:n
if (myDiff[t1] == 1L) {
myL[mySort[vec]] <- findRows(vec, mySort, t0:(t1-1L), v1, v2)
}
myL
}
The results are not even close. myDistOneFast is over 100x faster than the OP's original myfun on very large matrices and also scales well. Below are some benchmarks:
microbenchmark(OP = myfun(m1), Joe = myDistOne(m1), JoeFast = myDistOneFast(m1))
Unit: milliseconds
expr min lq mean median uq max neval
OP 57.60683 59.51508 62.91059 60.63064 61.87141 109.39386 100
Joe 22.00127 23.11457 24.35363 23.87073 24.87484 58.98532 100
JoeFast 11.27834 11.99201 12.59896 12.43352 13.08253 15.35676 100
microbenchmark(OP = myfun(m2), Joe = myDistOne(m2), JoeFast = myDistOneFast(m2))
Unit: milliseconds
expr min lq mean median uq max neval
OP 4461.8201 4527.5780 4592.0409 4573.8673 4633.9278 4867.5244 100
Joe 1287.0222 1316.5586 1339.3653 1331.2534 1352.3134 1524.2521 100
JoeFast 128.4243 134.0409 138.7518 136.3929 141.3046 172.2499 100
system.time(testJoeFast <- myDistOneFast(m3))
user system elapsed
0.68 0.00 0.69 ### myfun took over 100s!!!
To test equality, we have to sort each vector of indices. We also can't use identical for comparison as myL is initialized as an empty list, thus some of the indices contain NULL values (these correspond to integer(0) in the result from myfun and myDistOne).
testJoeFast <- lapply(testJoeFast, sort)
all(sapply(1:50000, function(x) all(testJoe[[x]]==testJoeFast[[x]])))
[1] TRUE
unlist(testJoe[which(sapply(testJoeFast, is.null))])
integer(0)
Here is an example with 500,000 rows:
set.seed(42)
m4 <- matrix(sample(2000, 1000000, replace = TRUE), ncol = 2)
system.time(myDistOneFast(m4))
user system elapsed
10.84 0.06 10.94
Here is an overview of how the algorithm works:
Calculate rowSums
Order the rowSums (i.e. returns the indices from the original vector of the sorted vector)
Call diff
Mark each non-zero instance
Determine which indices in small range satisfy the OP's request
Use the ordered vector calculated in 2 to determine original index
This is much faster than comparing one rowSum to all of the rowSum every time.

Related

Random rearrangement of a vector in R [duplicate]

I want to permute a vector so that an element can't be in the same place after permutation, as it was in the original. Let's say I have a list of elements like this: AABBCCADEF
A valid shuffle would be: BBAADEFCCA
But these would be invalid: BAACFEDCAB or BCABFEDCAB
The closest answer I could find was this: python shuffle such that position will never repeat. But that's not quite what I want, because there are no repeated elements in that example.
I want a fast algorithm that generalizes that answer in the case of repetitions.
MWE:
library(microbenchmark)
set.seed(1)
x <- sample(letters, size=295, replace=T)
terrible_implementation <- function(x) {
xnew <- sample(x)
while(any(x == xnew)) {
xnew <- sample(x)
}
return(xnew)
}
microbenchmark(terrible_implementation(x), times=10)
Unit: milliseconds
expr min lq mean median uq max neval
terrible_implementation(x) 479.5338 2346.002 4738.49 2993.29 4858.254 17005.05 10
Also, how do I determine if a sequence can be permuted in such a way?
EDIT: To make it perfectly clear what I want, the new vector should satisfy the following conditions:
1) all(table(newx) == table(x))
2) all(x != newx)
E.g.:
newx <- terrible_implementation(x)
all(table(newx) == table(x))
[1] TRUE
all(x != newx)
[1] TRUE
#DATA
set.seed(1)
x <- sample(letters, size=295, replace=T)
foo = function(S){
if(max(table(S)) > length(S)/2){
stop("NOT POSSIBLE")
}
U = unique(S)
done_chrs = character(0)
inds = integer(0)
ans = character(0)
while(!identical(sort(done_chrs), sort(U))){
my_chrs = U[!U %in% done_chrs]
next_chr = my_chrs[which.min(sapply(my_chrs, function(x) length(setdiff(which(!S %in% x), inds))))]
x_inds = which(S %in% next_chr)
candidates = setdiff(seq_along(S), union(x_inds, inds))
if (length(candidates) == 1){
new_inds = candidates
}else{
new_inds = sample(candidates, length(x_inds))
}
inds = c(inds, new_inds)
ans[new_inds] = next_chr
done_chrs = c(done_chrs, next_chr)
}
return(ans)
}
ans_foo = foo(x)
identical(sort(ans_foo), sort(x)) & !any(ans_foo == x)
#[1] TRUE
library(microbenchmark)
microbenchmark(foo(x))
#Unit: milliseconds
# expr min lq mean median uq max neval
# foo(x) 19.49833 22.32517 25.65675 24.85059 27.96838 48.61194 100
I think this satisfies all your conditions. The idea is to order by the frequency, start with the most common element and shift the value to the next value in the frequency table by the number of times the most common element appears. This will guarantee all elements will be missed.
I've written in data.table, as it helped me during debugging, without losing too much performance. It's a modest improvement performance-wise.
library(data.table)
library(magrittr)
library(microbenchmark)
permute_avoid_same_position <- function(y) {
DT <- data.table(orig = y)
DT[, orig_order := .I]
count_by_letter <-
DT[, .N, keyby = orig] %>%
.[order(N)] %>%
.[, stable_order := .I] %>%
.[order(-stable_order)] %>%
.[]
out <- copy(DT)[count_by_letter, .(orig, orig_order, N), on = "orig"]
# Dummy element
out[, new := first(y)]
origs <- out[["orig"]]
nrow_out <- nrow(out)
maxN <- count_by_letter[["N"]][1]
out[seq_len(nrow_out) > maxN, new := head(origs, nrow_out - maxN)]
out[seq_len(nrow_out) <= maxN, new := tail(origs, maxN)]
DT[out, j = .(orig_order, orig, new), on = "orig_order"] %>%
.[order(orig_order)] %>%
.[["new"]]
}
set.seed(1)
x <- sample(letters, size=295, replace=T)
testthat::expect_true(all(table(permute_avoid_same_position(x)) == table(x)))
testthat::expect_true(all(x != permute_avoid_same_position(x)))
microbenchmark(permute_avoid_same_position(x), times = 5)
# Unit: milliseconds
# expr min lq mean median uq max
# permute_avoid_same_position(x) 5.650378 5.771753 5.875116 5.788618 5.938604 6.226228
x <- sample(1:1000, replace = TRUE, size = 1e6)
testthat::expect_true(all(table(permute_avoid_same_position(x)) == table(x)))
testthat::expect_true(all(x != permute_avoid_same_position(x)))
microbenchmark(permute_avoid_same_position(x), times = 5)
# Unit: milliseconds
# expr min lq mean median uq max
# permute_avoid_same_position(x) 239.7744 385.4686 401.521 438.2999 440.9746 503.0875
We could extract substrings by the boundary of the repeating elements, sample and replicate
library(stringr)
sapply(replicate(10, sample(str_extract_all(str1, "([[:alpha:]])\\1*")[[1]]),
simplify = FALSE), paste, collapse="")
#[1] "BBAAEFDCCA" "AAAFBBEDCC" "BBAAAEFCCD" "DFACCBBAAE" "AAFCCBBEAD"
#[6] "DAAAECCBBF" "AAFCCDBBEA" "CCEFADBBAA" "BBAAEADCCF" "AACCBBDFAE"
data
str1 <- "AABBCCADEF"

Iterate through the column and count the rows satisfying the condition in R

trying to write a for loop function to determine the number of schools with room costs in column 34 higher than board cost in column 23.
numrows <- dim(schools)[1]
for(ii in 1:numrows){
if(schools[ii, 34] > schools[ii, 23], na.rm = TRUE){
nrow(numrows)
}
}
I'm getting the following error
Error in if (schools[ii, 34] > schools[ii, 23]) { :
missing value where TRUE/FALSE needed
I did notice that some of the board costs are missing and i'd like to omit those in the comparisons. Also I'm expecting just the number of rows that satisfy the condition.
To further demonstrate my point, here is a simple example based on a 10,000-row sample data.frame
set.seed(2018)
df <- data.frame(one = runif(10^4), two = runif(10^4))
Running a microbenchmark analysis
library(microbenchmark)
res <- microbenchmark(
vectorised = sum(df[, 1] > df[, 2]),
for_loop = {
ss <- 0
for (i in seq_len(nrow(df))) if (df[i, 1] > df[i, 2]) ss <- ss + 1
ss
})
res
# Unit: microseconds
# expr min lq mean median uq
# vectorised 59.681 65.13 78.33118 72.8305 77.9195
# for_loop 346250.957 359535.08 398508.54996 379421.2305 426452.4265
# max neval
# 152.172 100
# 608490.869 100
library(ggplot2)
autoplot(res)
Notice the four order of magnitude (!!!) difference (that's a factor of 10,000!) between the for loop and the vectorised operation. Neither surprising nor interesting.
The structure of the data leading to the error
Error in if (schools[ii, 34] > schools[ii, 23]) { :
missing value where TRUE/FALSE needed
occurs when one or both of the values in the comparison is NA, because the NA propagates through the comparison x > y, e.g.,
> test = 1 > NA
> test
[1] NA
and the flow control if (test) {} can't determine whether the test is TRUE (and so the code should be executed) or FALSE
> if (test) {}
Error in if (test) { : missing value where TRUE/FALSE needed
A simple vectorized solution isn't good enough
> set.seed(123)
> n = 10; x = sample(n); y = sample(n); y[5] = NA
> sum(x > y)
[1] NA
though the 'fix' is obvious and inexpensive
> sum(x > y, na.rm = TRUE)
[1] 3
The for loop also fails, but it is not possible (as in part of the original question) to simply add an na.rm = TRUE clause to the if statement
s = 0
for (i in seq_along(x)) {
if (x[i] > y[i], na.rm = TRUE)
s <- s + 1
}
s
because this is not syntactically valid
Error: unexpected ',' in:
"for (i in seq_along(x)) {
if (x[i] > y[i],"
so a more creative solution needs to be found, e.g., testing whether the value of the comparison is actually TRUE
s <- 0
for (i in seq_along(x)) {
if (isTRUE(x[i] > y[i]))
s <- s + 1
}
s
Of course it is not useful to compare the performance of the incorrect code; one needs to write the correct solutions first
f1 <- function(x, y)
sum(x > y, na.rm = TRUE)
f2 <- function(x, y) {
s <- 0
for (i in seq_along(x))
if (isTRUE(x[i] > y[i]))
s <- s + 1
s
}
f1() seems much more compact and readable compared to f2(), but we need to make sure the results are sensible
> x > y
[1] FALSE TRUE FALSE FALSE NA TRUE FALSE FALSE FALSE TRUE
> f1(x, y)
[1] 3
and the same
> identical(f1(x, y), f2(x, y))
[1] FALSE
Hey wait, what's going on? They look the same?
> f2(x, y)
[1] 3
Actually, the results are numerically equal, but f1() returns an integer value whereas f2() returns a numeric
> all.equal(f1(x, y), f2(x, y))
[1] TRUE
> class(f1(x, y))
[1] "integer"
> class(f2(x, y))
[1] "numeric"
and if we're comparing performance we really need the results to be identical -- no sense comparing apples and oranges. We can update f2() to return an integer by making sure the sum s is always an integer -- use a suffix L, e.g., 0L, to create an integer value
> class(0)
[1] "numeric"
> class(0L)
[1] "integer"
and make sure an integer 1L is added to s on each successful iteration
f2a <- function(x, y) {
s <- 0L
for (i in seq_along(x))
if (isTRUE(x[i] > y[i]))
s <- s + 1L
s
}
We then have
> f2a(x, y)
[1] 3
> identical(f1(x, y), f2a(x, y))
[1] TRUE
and are now in a position to compare performance
> microbenchmark(f1(x, y), f2a(x, y))
Unit: microseconds
expr min lq mean median uq max neval
f1(x, y) 1.740 1.8965 2.05500 2.023 2.0975 6.741 100
f2a(x, y) 17.505 18.2300 18.67314 18.487 18.7440 34.193 100
Certainly f2a() is much slower, but for this size problem since the unit is 'microseconds' maybe this doesn't matter -- how do the solutions scale with problem size?
> set.seed(123)
> x = lapply(10^(3:7), sample)
> y = lapply(10^(3:7), sample)
> f = f1; microbenchmark(f(x[[1]], y[[1]]), f(x[[2]], y[[2]]), f(x[[3]], y[[3]]))
Unit: microseconds
expr min lq mean median uq max neval
f(x[[1]], y[[1]]) 9.655 9.976 10.63951 10.3250 11.1695 17.098 100
f(x[[2]], y[[2]]) 76.722 78.239 80.24091 78.9345 79.7495 125.589 100
f(x[[3]], y[[3]]) 764.034 895.075 914.83722 908.4700 922.9735 1106.027 100
> f = f2a; microbenchmark(f(x[[1]], y[[1]]), f(x[[2]], y[[2]]), f(x[[3]], y[[3]]))
Unit: milliseconds
expr min lq mean median uq
f(x[[1]], y[[1]]) 1.260307 1.296196 1.417762 1.338847 1.393495
f(x[[2]], y[[2]]) 12.686183 13.167982 14.067785 13.923531 14.666305
f(x[[3]], y[[3]]) 133.639508 138.845753 144.152542 143.349102 146.913338
max neval
3.345009 100
17.713220 100
165.990545 100
They both scale linearly (not surprising) but even for lengths of 100000 f2a() doesn't seem too bad -- 1/6th of a second -- and might be a candidate in a situation where the vectorization obfuscated the code rather than clarified it. The cost of extracting individual elements from columns of a data.frame change this calculus, but also point to the value of operating on atomic vectors rather than complicated data structures.
For what it's worth one can think of worse implementations, especially
f3 <- function(x, y) {
s <- logical(0)
for (i in seq_along(x))
s <- c(s, isTRUE(x[i] > y[i]))
sum(s)
}
which scales quadratically
> f = f3; microbenchmark(f(x[[1]], y[[1]]), f(x[[2]], y[[2]]), f(x[[3]], y[[3]]), times = 1)
Unit: milliseconds
expr min lq mean median
f(x[[1]], y[[1]]) 7.018899 7.018899 7.018899 7.018899
f(x[[2]], y[[2]]) 371.248504 371.248504 371.248504 371.248504
f(x[[3]], y[[3]]) 42528.280139 42528.280139 42528.280139 42528.280139
uq max neval
7.018899 7.018899 1
371.248504 371.248504 1
42528.280139 42528.280139 1
(because c(s, ...) copies all of s to add one element to it) and is a pattern found very often in people's code.
A second common slowdown is use of complicated data structures (like the data.frame) rather than simple data structures (like atomic vectors), e.g., comparing
f4 <- function(df) {
s <- 0L
x <- df[[1]]
y <- df[[2]]
for (i in seq_len(nrow(df))) {
if (isTRUE(x[i] > y[i]))
s <- s + 1L
}
s
}
f5 <- function(df) {
s <- 0L
for (i in seq_len(nrow(df))) {
if (isTRUE(df[i, 1] > df[i, 2]))
s <- s + 1L
}
s
}
with
> df <- Map(data.frame, x, y)
> identical(f1(x[[1]], y[[1]]), f4(df[[1]]))
[1] TRUE
> identical(f1(x[[1]], y[[1]]), f5(df[[1]]))
[1] TRUE
> microbenchmark(f1(x[[1]], y[[1]]), f2(x[[1]], y[[1]]), f4(df[[1]]), f5(df[[1]]), times = 10)
Unit: microseconds
expr min lq mean median uq
f1(x[[1]], y[[1]]) 10.042 10.324 13.3511 13.4425 14.690
f2a(x[[1]], y[[1]]) 1310.186 1316.869 1480.1526 1344.8795 1386.322
f4(df[[1]]) 1329.307 1336.869 1363.4238 1358.7080 1365.427
f5(df[[1]]) 37051.756 37106.026 38187.8278 37876.0940 38416.276
max neval
20.753 10
2676.030 10
1439.402 10
42292.588 10

in R, if matrix select first elements row-wise, if vector select first elements

Is there elegant R syntax to select, depending on the type of object, either the first n elements from a matrix row-wise, or the first n elements of a vector.
I can obviously do this with conditional statements, but I wonder if there is a simple solution. I also want to avoid calling t() on the whole matrix due to efficiency concerns.
M = matrix(1:12,3,4)
x = 1:12
slct = function(obj,n){
if(is.matrix(obj)) res = c(t(obj))[1:n]
if(is.vector(obj)) res = obj[1:n]
res
}
slct(M,5); slct(x,5)
So avoiding calling t() on the whole matrix is the key. I think the other solutions are more interesting and pedagogical, but the fastest one I see is the following.
Efficiency is probably just because these rely on C subroutines to do the same vectorization as others suggest. Probably if you need only a specific subset of the elements 1:n there are cases where it would be faster to modify the other methods.
I still wonder if there is some builtin that does this?
Here are my two solutions (thanks to some ideas from the other posts):
funOPmod2 = function(obj,n){
if(is.matrix(obj)){
nc = ncol(obj)
nr = (n %/% nc) + 1
subM = obj[1:nr,]
res = matrix(subM, ncol = nr,
byrow = TRUE)[1:n] }
if(is.vector(obj)) res = obj[1:n]
res
}
funOPmod = function(obj,n){
if(is.matrix(obj)){
nc = ncol(obj)
nr = (n %/% nc) + 1
res = t(obj[1:nr,])[1:n] }
if(is.vector(obj)) res = obj[1:n]
res
}
funOP = function(obj,n){
if(is.matrix(obj)) res = c(t(obj))[1:n]
if(is.vector(obj)) res = obj[1:n]
res
}
funRyan <- function(x, n){
if(is.vector(x)) i <- 1:n
if(is.matrix(x))
i <- cbind(ceiling(1:n/ncol(x)), rep_len(seq(ncol(x)), n))
x[i]
}
funEmil <- function(obj, n) {
myDim <- dim(obj)
vec <- 1:n
if (is.null(myDim))
return(obj[vec])
nr <- myDim[1]
nc <- myDim[2]
vec1 <- vec - 1L
rem <- vec1 %% nc
quot <- vec1 %/% nc
obj[quot + (rem * nr + 1L)]
}
n <- 25000
set.seed(42)
MBig <- matrix(sample(10^7, 10^6, replace = TRUE), nrow = 10^4)
## Returns same results
all.equal(funOPmod2(MBig, n), funOP(MBig, n))
all.equal(funOPmod(MBig, n), funOP(MBig, n))
all.equal(funOP(MBig, n), funEmil(MBig, n))
all.equal(funRyan(MBig, n), funEmil(MBig, n))
library(microbenchmark)
microbenchmark(funOP(MBig, n), funOPmod(MBig, n), funOPmod2(MBig, n), funRyan(MBig, n), funEmil(MBig, n), unit = "relative")
Unit: relative
expr min lq mean median uq max neval
funOP(MBig, n) 13.788456 13.343185 15.776079 13.104634 15.064036 13.1959488 100
funOPmod(MBig, n) 1.052210 1.089507 1.071219 1.118461 1.025714 0.4533697 100
funOPmod2(MBig, n) 1.000000 1.000000 1.000000 1.000000 1.000000 1.0000000 100
funRyan(MBig, n) 2.689417 2.694442 2.464471 2.637720 2.351565 0.9274931 100
funEmil(MBig, n) 2.760368 2.681478 2.434167 2.591716 2.308087 0.8921837 100
What about this?
slct = function(obj,n){
if(is.matrix(obj)) res = as.vector(matrix(M, dim(M),
byrow = TRUE))[1:n]
if(is.vector(obj)) res = obj[1:n]
res
}
> slct(M,5); slct(x,5)
[1] 1 5 9 2 6
[1] 1 2 3 4 5
Seems to be twice as fast according to benchmark:
Unit: microseconds
expr min lq mean median uq max neval cld
t() 7.654 8.420 9.077494 8.675 8.675 10440.259 1e+05 b
matrix 3.316 3.827 4.411272 4.082 4.083 9502.881 1e+05 a
Note: You should specify is.vector rather than is.numeric in second line, since is.numeric(M) yields TRUE.
You can take advantage of array-indexes in [.
# new function
slct2 <- function(x, n){
if(is.vector(x)) i <- 1:n
if(is.matrix(x))
i <- cbind(ceiling(1:n/ncol(mat)), rep_len(seq(ncol(mat)), n))
x[i]
}
# old function
slct = function(obj,n){
if(is.matrix(obj)) res = c(t(obj))[1:n]
if(is.vector(obj)) res = obj[1:n]
res
}
Benchmark
m <- 1e4
mat <- matrix(runif(m^2), m)
n <- floor(m*2.3)
all.equal(slct(mat, n), slct2(mat, n))
# [1] TRUE
microbenchmark(slct(mat, n), slct2(mat, n), times = 10)
# Unit: milliseconds
# expr min lq mean median uq max neval
# slct(mat, n) 2471.438599 2606.071460 3466.046729 3137.255011 4420.69364 4985.20781 10
# slct2(mat, n) 2.358151 4.748712 6.627644 4.973533 11.05927 13.73906 10
Can't you just use head?...
head(c(t(M)),5)
[1] 1 4 7 10 2
head(c(t(x)),5)
[1] 1 2 3 4 5
Here is base R solution:
funEmil <- function(obj, n) {
myDim <- dim(obj)
vec <- 1:n
if (is.null(myDim))
return(obj[vec])
nr <- myDim[1]
nc <- myDim[2]
vec1 <- vec - 1L
rem <- vec1 %% nc
quot <- vec1 %/% nc
obj[quot + (rem * nr + 1L)]
}
It relies on basic vectorized modular arithmetic %% and integer division %/%. It is also very fast:
set.seed(42)
MBig <- matrix(sample(10^7, 10^6, replace = TRUE), nrow = 10^4)
funOP = function(obj,n){
if(is.matrix(obj)) res = c(t(obj))[1:n]
if(is.vector(obj)) res = obj[1:n]
res
}
funRyan <- function(x, n){
if(is.vector(x)) i <- 1:n
if(is.matrix(x))
i <- cbind(ceiling(1:n/ncol(x)), rep_len(seq(ncol(x)), n))
x[i]
}
n <- 25000
## Returns same results
all.equal(funRyan(MBig, n), funEmil(MBig, n))
[1] TRUE
all.equal(funOP(MBig, n), funEmil(MBig, n))
[1] TRUE
library(microbenchmark)
microbenchmark(funOP(MBig, n), funRyan(MBig, n), funWoody(MBig, n), unit = "relative")
Unit: relative
expr min lq mean median uq max neval
funOP(MBig, n) 6.154284 5.915182 5.659250 5.880826 9.140565 1.0344393 100
funRyan(MBig, n) 1.015332 1.030278 1.028644 1.018446 1.032610 0.8330967 100
funEmil(MBig, n) 1.000000 1.000000 1.000000 1.000000 1.000000 1.0000000 100
Here are the benchmarks using the example by #Ryan and the OP's modified solution:
n <- 1e4
mat <- matrix(runif(n^2), n)
s <- floor(n*2.3)
microbenchmark(funOP(mat, s), funRyan(mat, s),
funWoody(mat, s), funOPmod(mat, s), unit = "relative", times = 10)
Unit: relative
expr min lq mean median uq max neval
funOP(mat, s) 6189.449838 5558.293891 3871.425974 5139.192594 2443.203331 2222.778805 10
funRyan(mat, s) 2.633685 3.032467 2.155205 2.863710 1.445421 1.537473 10
funEmil(mat, s) 2.654739 2.714287 1.969482 2.642673 1.277088 1.326510 10
funOPmod(mat, s) 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 10
The new modified is much faster and still give correct results.. very impressive!!
identical(funOPmod(mat, s), funRyan(mat, s))
[1] TRUE

Compare each row with other rows of matrix

I am looking for an efficient solution for the following problem:
b <- matrix(c(0,0,0,1,1,0), nrow = 2, byrow = T)
weight <- c(1,1)
times <- 5
abc <- do.call(rbind, replicate(times, b, simplify=FALSE))
weight <- rep.int(weight,times)
sum1 <- as.numeric(rep.int(NA,nrow(abc)))
##Rprof()
for(j in 1:nrow(abc)){
a <- abc[j,]
sum1[j] <- sum(weight[rowSums(t(a == t(abc)) + 0) == ncol(abc)])
}
##Rprof(NULL)
##summaryRprof()
Is there a faster way to do this? Rprof shows that rowSums(), t(), == and + are quite slow. If nrows is 20,000 it takes like 21 seconds.
Thanks for helping!
Edit: I have a matrix abc and a vector weight with length equal to nrow(abc). The first value of weight corresponds to the first row of matrix abc and so on... Now, I would like to determine which rows of matrix abc are equal. Then, I want to remember the position of those rows in order to sum up the corresponding weights which have the same position. The appropriate sum I wanna store for each row.
Here is a way that looks valid and fast:
ff <- function(mat, weights)
{
rs <- apply(mat, 1, paste, collapse = ";")
unlist(lapply(unique(rs),
function(x)
sum(weights[match(rs, x, 0) > 0])))[match(rs, unique(rs))]
}
ff(abc, weight)
# [1] 5 5 5 5 5 5 5 5 5 5
And comparing with your function:
ffOP <- function(mat, weights)
{
sum1 <- as.numeric(rep.int(NA,nrow(mat)))
for(j in 1:nrow(mat)) {
a <- mat[j,]
sum1[j] <- sum(weights[rowSums(t(a == t(mat)) + 0) == ncol(mat)])
}
sum1
}
ffOP(abc, weight)
# [1] 5 5 5 5 5 5 5 5 5 5
library(microbenchmark)
m = do.call(rbind, replicate(1e3, matrix(0:11, 3, 4), simplify = F))
set.seed(101); w = runif(1e3*3)
all.equal(ffOP(m, w), ff(m, w))
#[1] TRUE
microbenchmark(ffOP(m, w), ff(m, w), times = 10)
#Unit: milliseconds
# expr min lq median uq max neval
# ffOP(m, w) 969.83968 986.47941 996.68563 1015.53552 1051.23847 10
# ff(m, w) 20.42426 20.64002 21.36508 21.97182 22.59127 10
For the record, I, also, implemented your approach in C and here are the benchmarkings:
#> microbenchmark(ffOP(m, w), ff(m, w), ffC(m, w), times = 10)
#Unit: milliseconds
# expr min lq median uq max neval
# ffOP(m, w) 957.66691 967.09429 991.35232 1000.53070 1016.74100 10
# ff(m, w) 20.60243 20.85578 21.70578 22.13434 23.04924 10
# ffC(m, w) 36.24618 36.40940 37.18927 37.39877 38.83358 10

What is the right way to multiply data frame by vector?

I'm trying to multiply a data frame df by a vector v, so that the product is a data frame, where the i-th row is given by df[i,]*v. I can do this, for example, by
df <- data.frame(A=1:5, B=2:6); v <- c(0,2)
as.data.frame(t(t(df) * v))
A B
1 0 4
2 0 6
3 0 8
4 0 10
5 0 12
I am sure there has to be a more R-style approach (and a very simple one!), but nothing comes on my mind. I even tried something like
apply(df, MARGIN=1, function(x) x*v)
but still, non-readable constructions like as.data.frame(t(.)) are required.
How can I find an efficient and elegant workaround here?
This works too:
data.frame(mapply(`*`,df,v))
In that solution, you are taking advantage of the fact that data.frame is a type of list, so you can iterate over both the elements of df and v at the same time with mapply.
Unfortunately, you are limited in what you can output from mapply: as simple list, or a matrix. If your data are huge, this would likely be more efficient:
data.frame(mapply(`*`,df,v,SIMPLIFY=FALSE))
Because it would convert it to a list, which is more efficient to convert to a data.frame.
If you're looking for speed and memory efficiency - data.table to the rescue:
library(data.table)
dt = data.table(df)
for (i in seq_along(dt))
dt[, (i) := dt[[i]] * v[i]]
eddi = function(dt) { for (i in seq_along(dt)) dt[, (i) := dt[[i]] * v[i]] }
arun = function(df) { df * matrix(v, ncol=ncol(df), nrow=nrow(df), byrow=TRUE) }
nograpes = function(df) { data.frame(mapply(`*`,df,v,SIMPLIFY=FALSE)) }
N = 1e6
dt = data.table(A = rnorm(N), B = rnorm(N))
v = c(0,2)
microbenchmark(eddi(copy(dt)), arun(copy(dt)), nograpes(copy(dt)), times = 10)
#Unit: milliseconds
# expr min lq mean median uq max neval
# eddi(copy(dt)) 23.01106 24.31192 26.47132 24.50675 28.87794 34.28403 10
# arun(copy(dt)) 337.79885 363.72081 450.93933 433.21176 516.56839 644.70103 10
# nograpes(copy(dt)) 19.44873 24.30791 36.53445 26.00760 38.09078 95.41124 10
As Arun points out in the comments, one can also use the set function from the data.table package to do this in-place modification on data.frame's as well:
for (i in seq_along(df))
set(df, j = i, value = df[[i]] * v[i])
This of course also works for data.table's and could be significantly faster if the number of columns is large.
A language that lets you combine vectors with matrices has to make a decision at some point whether the matrices are row-major or column-major ordered. The reason:
> df * v
A B
1 0 4
2 4 0
3 0 8
4 8 0
5 0 12
is because R operates down the columns first. Doing the double-transpose trick subverts this. Sorry if this is just explaining what you know, but I don't know another way of doing it, except explicitly expanding v into a matrix of the same size.
Or write a nice function that wraps the not very R-style code into something that is R-stylish.
Whats wrong with
t(apply(df, 1, function(x)x*v))
?
library(purrr)
map2_dfc(df, v, `*`)
Benchmark
N = 1e6
dt = data.table(A = rnorm(N), B = rnorm(N))
v = c(0,2)
eddi = function(dt) { for (i in seq_along(dt)) dt[, (i) := dt[[i]] * v[i]]; dt }
arun = function(df) { df * matrix(v, ncol=ncol(df), nrow=nrow(df), byrow=TRUE) }
nograpes = function(df) { data.frame(mapply(`*`,df,v,SIMPLIFY=FALSE)) }
ryan = function(df) {map2_dfc(df, v, `*`) }
library(microbenchmark)
microbenchmark(
eddi(copy(dt))
, arun(copy(dt))
, nograpes(copy(dt))
, ryan(copy(dt))
, times = 100)
# Unit: milliseconds
# expr min lq mean median uq max neval
# eddi(copy(dt)) 8.367513 11.06719 24.26205 12.29132 19.35958 171.6212 100
# arun(copy(dt)) 94.031272 123.79999 186.42155 148.87042 251.56241 364.2193 100
# nograpes(copy(dt)) 7.910739 10.92815 27.68485 13.06058 21.39931 172.0798 100
# ryan(copy(dt)) 8.154395 11.02683 29.40024 13.73845 21.77236 181.0375 100
I think the fastest way (without testing data.table) is data.frame(t(t(df)*v)).
My tests:
testit <- function(nrow, ncol)
{
df <- as.data.frame(matrix(rnorm(nrow*ncol),nrow=nrow,ncol=ncol))
v <- runif(ncol)
r1 <- data.frame(t(t(df)*v))
r2 <- data.frame(mapply(`*`,df,v,SIMPLIFY=FALSE))
r3 <- df * rep(v, each=nrow(df))
stopifnot(identical(r1, r2) && identical(r1, r3))
microbenchmark(data.frame(t(t(df)*v)), data.frame(mapply(`*`,df,v,SIMPLIFY=FALSE)), df * rep(v, each=nrow(df)))
}
Result
> set.seed(1)
>
> testit(100,100)
Unit: milliseconds
expr min lq median uq max neval
data.frame(t(t(df) * v)) 2.297075 2.359541 2.455778 3.804836 33.05806 100
data.frame(mapply(`*`, df, v, SIMPLIFY = FALSE)) 9.977436 10.401576 10.658964 11.762009 15.09721 100
df * rep(v, each = nrow(df)) 14.309822 14.956705 16.092469 16.516609 45.13450 100
> testit(1000,10)
Unit: microseconds
expr min lq median uq max neval
data.frame(t(t(df) * v)) 754.844 805.062 844.431 1850.363 27955.79 100
data.frame(mapply(`*`, df, v, SIMPLIFY = FALSE)) 1457.895 1497.088 1567.604 2550.090 4732.03 100
df * rep(v, each = nrow(df)) 5383.288 5527.817 5875.143 6628.586 32392.81 100
> testit(10,1000)
Unit: milliseconds
expr min lq median uq max neval
data.frame(t(t(df) * v)) 17.07548 18.29418 19.91498 20.67944 57.62913 100
data.frame(mapply(`*`, df, v, SIMPLIFY = FALSE)) 99.90103 104.36028 108.28147 114.82012 150.05907 100
df * rep(v, each = nrow(df)) 112.21719 118.74359 122.51308 128.82863 164.57431 100

Resources