Split a vector and summing values - r

I'm a R newbie. I've got a vector
vec <- c(105,29,41,70,77,0,56,49,63,0,105)
and i would like to sum values till "0" occurs and then create a vector with such values, such as:
vec2 <- c(322,168,105)
But i really don't know where to start! Any suggestion?

Starting with this vector...
> vec
[1] 105 29 41 70 77 0 56 49 63 0 105
We can compute a logical TRUE/FALSE vector of where the zeroes are:
> vec == 0
[1] FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE FALSE
When you add FALSE and TRUE, FALSE is zero and TRUE is one, so if we add that vector up every time we get to a TRUE the value increases. So using cumsum for the cumulative sum, we get:
> cumsum(vec==0)
[1] 0 0 0 0 0 1 1 1 1 2 2
Now that result defines the groups that we want to add up within, so let's split vec by that result:
> split(vec, cumsum(vec==0))
$`0`
[1] 105 29 41 70 77
$`1`
[1] 0 56 49 63
$`2`
[1] 0 105
So apart from the zeroes in the second and subsequent parts of the list, that's the numbers we want to add up. Because we are adding we can add the zeroes and it doesn't make any difference (but if you wanted the mean you would have to drop the zeroes). Now we use sapply to iterate over list elements and compute the sum:
> sapply(split(vec, cumsum(vec==0)),sum)
0 1 2
322 168 105
Job done. Ignore the 0 1 2 labels.

The aggregate function is useful for this kind of thing. You create a grouping variable with cumsum (similar to how #Spacedman explained). Using the sum function as the aggregating operation. The [[2]] at the end just extracts what you want from what aggregate returns:
aggregate(vec, by = list(cumsum(vec == 0)), FUN = sum)[[2]]
[1] 322 168 105

Another option is by
as.numeric(by(vec, cumsum(vec == 0), sum))
#[1] 322 168 105
Benchmark
Benchmark comparison of the methods for a larger vector based on microbenchmark
# Create sample vector with N entries
set.seed(2018)
N <- 10000
vec <- sample(100, N, replace = T)
vec[sample(length(vec), 100)] <- 0
library(microbenchmark)
res <- microbenchmark(
vapply = {
I <- which(vec == 0)
vapply(1:(length(I)+1),
function(k) sum(vec[max(I[k-1],1):min(I[k], length(vec), na.rm = TRUE)]),
numeric(1))
},
by = {
as.numeric(by(vec, cumsum(vec == 0), sum))
},
aggregate = {
aggregate(vec, by = list(cumsum(vec == 0)), FUN = sum)[[2]]
},
split = {
sapply(split(vec, cumsum(vec == 0)), sum)
},
Reduce = {
ans <- numeric(0)
s <- n <- 0
Reduce(f = function (y,x) {
if(x == 0) {
ans <<- c(ans,s)
s <<- 0
}
n <<- n+1
s <<- x+s
if (n == length(vec))
ans <<- c(ans,s)
s
}, vec, init = 0, accumulate = TRUE)
ans
},
for_loop = {
I <- which(vec == 0)
n <- length(vec)
N <- length(I) + 1
res <- numeric(N)
for(k in seq_along(res)) {
if (k == 1) {
res[k] <- sum(vec[1:I[1]])
next
}
if (k == N) {
res[k] <- sum(vec[I[N-1]:n])
next
}
res[k] <- sum(vec[I[k-1]:I[k]])
}
res
}
)
res
# Unit: microseconds
# expr min lq mean median uq max
# vapply 435.658 487.4230 621.6155 511.3625 607.2005 6175.039
# by 3897.401 4187.2825 4721.3168 4436.5850 4936.2900 12365.351
# aggregate 4817.032 5392.0620 6002.2579 5831.2905 6310.3665 9782.524
# split 611.175 758.4485 895.2201 838.7665 957.0085 1516.556
# Reduce 21372.054 22169.9110 25363.8684 23022.6920 25503.6145 49255.714
# for_loop 15172.255 15846.5735 17252.6895 16445.7900 17572.7535 34401.827
library(ggplot2)
autoplot(res)

With vapply
Here is an option with vapply
I <- which(vec == 0)
vapply(1:(length(I)+1),
function(k) sum(vec[max(I[k-1],1):min(I[k], length(vec), na.rm = TRUE)]),
numeric(1))
# [1] 322 168 105
With Reduce
Here is a solution using Reduce
ans <- numeric(0)
s <- n <- 0
Reduce(f = function (y,x) {
if(x == 0) {
ans <<- c(ans,s)
s <<- 0
}
n <<- n+1
s <<- x+s
if(n == length(vec))
ans <<- c(ans,s)
s
}, vec, init = 0, accumulate = TRUE)
ans
# [1] 322 168 105
With A Loop
Or maybe an old fashioned loop
I <- which(vec == 0)
n <- length(vec)
N <- length(I) + 1
res <- numeric(N)
for(k in seq_along(res)) {
if (k == 1) {
res[k] <- sum(vec[1:I[1]])
next
}
if (k == N) {
res[k] <- sum(vec[I[N-1]:n])
next
}
res[k] <- sum(vec[I[k-1]:I[k]])
}
res
# [1] 322 168 105
Benchmarking
Data
Here is the data used for benchmarking
# c.f. #MauritsEvers
# Create sample vector with N entries
set.seed(2018)
N <- 10000
vec <- sample(100, N, replace = T)
vec[sample(length(vec), 100)] <- 0
Functions
Here are the functions for the second benchmarking figures:
reduce <- function(vec) {
ans <- numeric(0)
s <- n <- 0
Reduce(f = function (y,x) {
if(x == 0) {
ans <<- c(ans,s)
s <<- 0
}
n <<- n+1
s <<- x+s
if(n == length(vec))
ans <<- c(ans,s)
s
}, vec, init = 0, accumulate = TRUE)
ans
}
Vapply <- function (vec) {
I <- which(vec == 0)
vapply(1:(length(I)+1),
function(k) sum(vec[max(I[k-1],1):min(I[k], length(vec), na.rm = TRUE)]),
numeric(1))
}
By <- function (vec) as.numeric(by(vec, cumsum(vec == 0), sum))
Split <- function (vec) sapply(split(vec, cumsum(vec==0)),sum)
Aggregate <- function (vec) aggregate(vec, by = list(cumsum(vec == 0)), FUN = sum)[[2]]
for_loop <- function(vec) {
I <- which(vec == 0)
n <- length(vec)
N <- length(I)+1
res <- numeric(N)
for(k in seq_along(res)) {
if (k == 1) {
res[k] <- sum(vec[1:I[1]])
next
}
if (k == N) {
res[k] <- sum(vec[I[N-1]:n])
next
}
res[k] <- sum(vec[I[k-1]:I[k]])
}
res
}
Rowsum <- function (vec) rowsum(vec, cumsum(vec == 0))
Benchmarking
Here are the two benchmarking processes combined:
# c.f. #MauritsEvers
resBoth <- microbenchmark::microbenchmark(
Vapply = {
I <- which(vec == 0)
vapply(1:(length(I)+1),
function(k) sum(vec[max(I[k-1],1):min(I[k], length(vec), na.rm = TRUE)]),
numeric(1))
},
Vapply(vec),
By = {
as.numeric(by(vec, cumsum(vec == 0), sum))
},
By(vec),
Aggregate = {
aggregate(vec, by = list(cumsum(vec == 0)), FUN = sum)[[2]]
},
Aggregate(vec),
Split = {
sapply(split(vec, cumsum(vec == 0)), sum)
},
Split(vec),
reduce = {
ans <- numeric(0)
s <- n <- 0
Reduce(f = function (y,x) {
if(x == 0) {
ans <<- c(ans,s)
s <<- 0
}
n <<- n+1
s <<- x+s
if (n == length(vec))
ans <<- c(ans,s)
s
}, vec, init = 0, accumulate = TRUE)
ans
},
reduce(vec),
for_loop = {
I <- which(vec == 0)
n <- length(vec)
N <- length(I) + 1
res <- numeric(N)
for(k in seq_along(res)) {
if (k == 1) {
res[k] <- sum(vec[1:I[1]])
next
}
if (k == N) {
res[k] <- sum(vec[I[N-1]:n])
next
}
res[k] <- sum(vec[I[k-1]:I[k]])
}
res
},
for_loop(vec),
Rowsum = {rowsum(vec, cumsum(vec == 0))},
Rowsum(vec),
times = 10^3
)
Results
Here are the benchmarking results
resBoth
# Unit: microseconds
# expr min lq mean median uq max neval cld
# Vapply 234.121 281.5280 358.0708 311.7955 343.5215 4775.018 1000 ab
# Vapply(vec) 234.850 278.6100 376.3956 306.3260 334.4050 14564.278 1000 ab
# By 1866.029 2108.7175 2468.1208 2209.0025 2370.5520 23316.045 1000 c
# By(vec) 1870.769 2120.5695 2473.1643 2217.3900 2390.6090 21039.762 1000 c
# Aggregate 2738.324 3015.6570 3298.0863 3117.9480 3313.2295 13328.404 1000 d
# Aggregate(vec) 2733.583 2998.1530 3295.6874 3109.1955 3349.1500 8277.694 1000 d
# Split 359.202 412.0800 478.0553 444.1710 492.3080 4622.220 1000 b
# Split(vec) 366.131 410.4395 475.2633 444.1715 490.3025 4601.799 1000 b
# reduce 10862.491 13062.3755 15353.2826 14465.0870 16559.3990 76305.463 1000 g
# reduce(vec) 10403.004 12448.9965 14658.4035 13825.9995 15893.3255 67337.080 1000 f
# for_loop 6687.724 7429.4670 8518.0470 7818.0250 9023.9955 27541.136 1000 e
# for_loop(vec) 123.624 145.8690 187.2201 157.5390 177.4140 9928.200 1000 a
# Rowsum 235.579 264.3880 305.7516 282.2570 322.7360 792.068 1000 ab
# Rowsum(vec) 239.590 264.9350 307.2508 284.8100 322.0060 1778.143 1000 ab

rowsum() is known to be quite fast. We can use cumsum(vec == 0) for the grouping.
c(rowsum(vec, cumsum(vec == 0)))
# [1] 322 168 105

Related

Find a sequence in a matrix as efficiently as possible

Few requirements.
Before posting your answer please!!
1) Make sure that your function does not give errors with other data, simulate several similar matrices. (turn off the seed)
2) Make sure your function is faster than mine
3) Make sure that your function works exactly the same as mine, simulate it on different matrices (turn off the seed)
for example
for(i in 1:500){
m <- matrix(sample(c(F,T),30,T),ncol = 3) ; colnames(m) <- paste0("x",1:ncol(m))
res <- c(my_fun(m),your_function(m))
print(res)
if(sum(res)==1) break
}
m
4) the function should work with a matrix with any number of rows and columns
==========================================================
The function looks for a true in the first column of the logical matrix, if a true is found, go to column 2 and a new row, and so on..
If the sequence is found return true if not false
set.seed(15)
m <- matrix(sample(c(F,T),30,T),ncol = 3) ; colnames(m) <- paste0("x",1:ncol(m))
m
x1 x2 x3
[1,] FALSE TRUE TRUE
[2,] FALSE FALSE FALSE
[3,] TRUE TRUE TRUE
[4,] TRUE TRUE TRUE
[5,] FALSE FALSE FALSE
[6,] TRUE TRUE FALSE
[7,] FALSE TRUE FALSE
[8,] FALSE FALSE FALSE
[9,] FALSE FALSE TRUE
[10,] FALSE FALSE TRUE
my slow example function
find_seq <- function(m){
colum <- 1
res <- rep(FALSE,ncol(m))
for(i in 1:nrow(m)){
if(m[i,colum]==TRUE){
res[colum] <- TRUE
print(c(row=i,col=colum))
colum <- colum+1}
if(colum>ncol(m)) break
}
all(res)
}
find_seq(m)
row col
3 1
row col
4 2
row col
9 3
[1] TRUE
how to make it as fast as possible?
UPD=========================
microbenchmark::microbenchmark(Jean_Claude_Arbaut_fun(m),
+ ThomasIsCoding_fun(m),
+ my_fun(m))
Unit: microseconds
expr min lq mean median uq max neval cld
Jean_Claude_Arbaut_fun(m) 2.850 3.421 4.36179 3.9915 4.5615 27.938 100 a
ThomasIsCoding_fun(m) 14.824 15.965 17.92030 16.5350 17.1050 101.489 100 b
my_fun(m) 23.946 24.517 25.59461 25.0880 25.6580 42.192 100 c
Update
If you are pursuing the speed, you can try the following base R solution
TIC_fun <- function(m) {
p <- k <- 1
nr <- nrow(m)
nc <- ncol(m)
repeat {
if (p > nr) {
return(FALSE)
}
found <- FALSE
for (i in p:nr) {
if (m[i, k]) {
# print(c(row = i, col = k))
p <- i + 1
k <- k + 1
found <- TRUE
break
}
}
if (!found) {
return(FALSE)
}
if (k > nc) {
return(TRUE)
}
}
}
and you will see
Unit: microseconds
expr min lq mean median uq max neval
my_fun(m) 18.600 26.3010 41.46795 41.5510 44.3010 121.302 100
TIC_fun(m) 10.201 14.1515 409.89394 22.6505 24.4005 38906.601 100
Previous Answer
You can try the code below
lst <- with(as.data.frame(which(m, arr.ind = TRUE)), split(row, col))
# lst <- apply(m, 2, which)
setNames(
stack(
setNames(
Reduce(function(x, y) y[y > x][1],
lst,
init = -Inf,
accumulate = TRUE
)[-1],
names(lst)
)
),
c("row", "col")
)
which gives
row col
1 3 1
2 4 2
3 9 3
A more interesting implementation might be using the recursions (just for fun, but not recommanded due to the inefficiency)
f <- function(k) {
if (k == 1) {
return(data.frame(row = which(m[, k])[1], col = k))
}
s <- f(k - 1)
for (i in (tail(s, 1)$row + 1):nrow(m)) {
if (m[i, k]) {
return(rbind(s, data.frame(row = i, col = k)))
}
}
}
and which gives
> f(ncol(m))
row col
1 3 1
2 4 2
3 9 3
If I understand the problem correctly, a single loop through the rows is enough. Here is a way to do this with Rcpp. Here I only return the true/false answer, if you need the indices, it's also doable.
library(Rcpp)
cppFunction('
bool hasSequence(LogicalMatrix m) {
int nrow = m.nrow(), ncol = m.ncol();
if (nrow > 0 && ncol > 0) {
int j = 0;
for (int i = 0; i < nrow; i++) {
if (m(i, j)) {
if (++j >= ncol) {
return true;
}
}
}
}
return false;
}')
a <- matrix(c(F, F, T, T, F, T, F, F, F, F,
T, F, T, T, F, T, T, F, F, F,
T, F, T, T, F, F, F, F, T, T), ncol = 3)
a
hasSequence(a)
In order to get also the indices, the following function returns a list, with at least one element (named 'found', true or false) and if found = true, another element, named 'indices':
cppFunction('
List findSequence(LogicalMatrix m) {
int nrow = m.nrow(), ncol = m.ncol();
IntegerVector indices(ncol);
if (nrow > 0 && ncol > 0) {
int j = 0;
for (int i = 0; i < nrow; i++) {
if (m(i, j)) {
indices(j) = i + 1;
if (++j >= ncol) {
return List::create(Named("found") = true,
Named("indices") = indices);
}
}
}
}
return List::create(Named("found") = false);
}')
findSequence(a)
A few links to learn about Rcpp:
High performance functions with Rcpp, Hadley Wickham
Rcpp for everyone, Masaki E. Tsuda
Interfacing R with C/C++, Matteo Fasiolo
Rcpp Gallery - Articles and code examples for the Rcpp package
You have to know at least a bit of C language (preferably C++, but for a basic usage, you can think of Rcpp as C with some magic syntax for R data types). The first link explains the basics of Rcpp types (vectors, matrices and lists, how to allocate, use and return them). The other links are good complements.
If your example is representative, we assume that nrow(m) >> ncol(m). In that case, it would be more efficient to move the interation from rows to columns:
ff = function(m)
{
i1 = 1
for(j in 1:ncol(m)) {
if(i1 > nrow(m)) return(FALSE)
i1 = match(TRUE, m[i1:nrow(m), j]) + i1
#print(i1)
if(is.na(i1)) return(FALSE)
}
return(TRUE)
}
A bit ugly (cause of the <<-), but it will get the job done..
tempval <- 0
lapply(split(m, col(m)), function(x) {
value <- which(x)[which(x) > tempval][1]
tempval <<- value
return(value)
})
# $`1`
# [1] 3
#
# $`2`
# [1] 4
#
# $`3`
# [1] 9
Here a function that focuses on case handling. It's faster than all, hope it's right :)
f <- \(m) {
stopifnot(dim(m)[2] == 3L)
e <- nrow(m)
x1 <- if (any(xx1 <- m[, 1])) {
which.max(xx1)
} else {
NA_integer_
}
x2 <- if (is.na(x1)) {
NA_integer_
}
else if (any(xx2 <- m[(x1 + 1):e, 2])) {
which.max(xx2) + x1
} else {
NA_integer_
}
x3 <- if (is.na(x2)) {
NA_integer_
}
else if (any(xx3 <- m[(x2 + 1):e, 3])) {
which.max(xx3) + x2
} else {
NA_integer_
}
!anyNA(c(x1, x2, x3))
}
f(m)
# [1] TRUE
m2 <- m
m2[, 3] <- FALSE
f(m2)
# [1] FALSE
Data:
set.seed(15)
m <- matrix(sample(c(FALSE, TRUE), 30, TRUE), ncol=3)
With accumulate:
purrr::accumulate(apply(m, 2, which), .init = -Inf, ~ min(.y[.y > min(.x)]))[-1]
# or
purrr::accumulate(apply(m, 2, which), .init = -Inf, ~ .y[.y > .x][1])[-1]
# x1 x2 x3
# 3 4 9

Using for loop instead of matrix index in [r]

I have been meaning to rewrite the following code with for loop in R:
x <- sample(1:100, 10)
x[x %% 2 == 0]
#[1] 6 26 72 62 32 86 100
which extracts those elements in vector x that are even number only.
I have come up with the following result in the end, but I believe there are more simple ways of coding this.
x <- sample(1:100, 10)
output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {
output[i] <- x[i]
output <- output[!is.na(output)]
}
}
output
#[1] 6 26 72 62 32 86 100
I would be grateful if you could help me with this.
You can skip the NA removes when adding the new hit to the end of output using length.
output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {
output[length(output) + 1] <- x[i]
}
}
output
Where length(output) gives you the current length of output. By adding 1 you place the new element at the end of output.
Or as #Roland (thanks!) commented using c
output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {
output <- c(output, x[i])
}
}
output
c combines output and x[i] to a new vector.
Or preallocate output with x and mark the non hits with NA
output <- x
for(i in seq_along(x)) {
if(x[i] %% 2 != 0) {
output[i] <- NA
}
}
output <- output[!is.na(output)]
output
Benchmarks:
fun <- alist(Vectorized = x[x %% 2 == 0]
, Question = {output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {output[i] <- x[i]; output <- output[!is.na(output)]}
}
output}
, NaOutside = {output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {output[i] <- x[i]}
}
output <- output[!is.na(output)]
output}
, Append = {output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {output[length(output) + 1] <- x[i]}
}
output}
, Append2 = {output <- integer(0); j <- 1
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {output[j] <- x[i]; j <- j + 1}
}
, C = {output <- integer(0)
for(i in seq_along(x)) {
if(x[i] %% 2 == 0) {
output <- c(output, x[i])
}
}
output}
, Preallocate = {output <- x
for(i in seq_along(x)) {
if(x[i] %% 2 != 0) {
output[i] <- NA
}
}
output <- output[!is.na(output)]
output}
)
library(microbenchmark)
set.seed(42)
x <- sample(1:100, 10)
microbenchmark(list = fun, control=list(order="block"))
#Unit: nanoseconds
# expr min lq mean median uq max neval
# Vectorized 644 655 781.54 666.5 721 8559 100
# Question 4377648 4419010 4682029.95 4492628.5 4579952 7512162 100
# NaOutside 3443488 3558401 3751542.62 3597273.0 3723662 5146015 100
# Append 3932586 4070234 4287628.11 4129849.0 4209361 6036142 100
# Append2 3966245 4094766 4360989.39 4147847.5 4312868 5899000 100
# C 3464081 3566902 3806531.77 3618758.5 3743058 6528224 100
# Preallocate 3162424 3263220 3435591.92 3290938.0 3374547 4823017 100
set.seed(42)
x <- sample(1:1e5, 1e4)
microbenchmark(list = fun, control=list(order="block"))
#Unit: microseconds
# expr min lq mean median uq max neval
# Vectorized 226.224 276.271 277.0027 278.993 284.4515 345.527 100
# Question 125550.120 126392.848 129287.7202 126812.309 128426.3655 157958.571 100
# NaOutside 6911.053 7020.831 7497.4403 7109.891 8158.7580 8779.448 100
# Append 7843.988 7982.987 8582.6769 8129.988 9287.5760 10775.894 100
# Append2 7647.340 7783.334 8347.7824 7954.683 9007.4500 10325.973 100
# C 27976.747 29776.632 29997.5407 30024.121 30250.9590 51630.868 100
# Preallocate 6119.198 6232.228 6679.9407 6367.618 7290.1015 8331.277 100

Count Trailing and Leading NA for each vector

I have some vectors in the following format
v1 <- c(NA,NA,NA,10,10,10,10)
v2 <- c(NA,NA, 3, 3, 3,NA,NA)
v3 <- c( 5, 5, NA,NA,NA,NA,NA)
For each vector I want to calculate how many leading NAs and trailing NAs.
For v1, LeadNA = 3, TrailNA = 0
For v2, LeadNA = 2, TrailNA = 2
For v3, LeadNA = 0, TrailNA = 5
1) Cumsum - An option would be to create a logical vector with cumsum on the presence of non-NA elements and get the sum (base R - No packages used)
f1 <- function(vec, trail = FALSE) {
if(trail) {
vec <- rev(vec)
}
sum(!cumsum(!is.na(vec)))
}
f1(v1)
#[1] 3
f1(v1, TRUE)
#[1] 0
sapply(mget(paste0("v", 1:3)), f1)
# v1 v2 v3
# 3 2 0
sapply(mget(paste0("v", 1:3)), f1, TRUE)
# v1 v2 v3
# 0 2 5
2 rle - Another base R option is rle (No packages are used)
with(rle(is.na(v2)), lengths[values & seq_along(values) %in% c(1, length(values))])
This turns out to be similar to #Bulat's solution
count_nas <- function(x) {
nas <- is.na(x)
if (sum(nas) == length(x)) {
warning('all elements were NA')
return(c(start_na = NA_integer_, end_na = NA_integer_))
}
c(start_na = which.min(nas) - 1,
end_na = which.min(rev(nas)) - 1)
}
count_nas(v1)
#start_na end_na
# 3 0
sapply(list(v1,v2,v3), count_nas)
# [,1] [,2] [,3]
#start_na 3 2 0
#end_na 0 2 5
As far as performance, this is the fastest method with #akrun's methods being in the ballpark.
v_test3 <- sample(10000)
v_test3[c(1:3, 9998:10000)] <- NA_integer_
Unit: microseconds
expr min lq mean median uq max neval
akrun_cumsum 175.7 182.15 193.580 186.55 200.80 354.7 100
akrun_rle 168.6 199.25 210.635 209.25 221.00 289.3 100
g_grothen_zoo 1848.5 1904.45 2008.994 1941.40 2001.35 4799.6 100
g_grothen_reduce 12467.3 12888.10 14174.157 13445.15 15054.35 28241.6 100
www_rleid 5357.2 5439.40 5741.471 5517.15 5947.15 8470.4 100
bulat_and_cole 63.5 66.45 73.681 71.25 75.75 96.9 100
Code for reproducibility:
library(microbenchmark)
library(zoo)
library(data.table)
v_test3 <- sample(10000)
v_test3[c(1:3, 9998:10000)] <- NA_integer_
count_nas <- function(x) {
nas <- is.na(x)
if (sum(nas) == length(x)) {
warning('all elements were NA')
return(c(start_na = NA_integer_, end_na = NA_integer_))
}
c(start_na = which.min(nas) - 1,
end_na = which.min(rev(nas)) - 1)
}
countNA <- function(x) {
len <- function(fromLast = FALSE) length(na.locf(x, fromLast = fromLast))
if (all(is.na(x))) c(left = NA, right = NA)
else length(x) - c(left = len(), right = len(TRUE))
}
f1 <- function(vec, trail = FALSE) {
if(trail) {
vec <- rev(vec)
}
sum(!cumsum(!is.na(vec)))
}
count_fun <- function(x){
y <- rleid(x)
z <- split(x, y)[c(1, length(unique(y)))]
ans <- sapply(z, function(x) sum(is.na(x)))
return(unname(ans))
}
countNA2 <- function(x) {
f <- function(x) sum(Reduce(all, is.na(x), acc = TRUE))
if (all(is.na(x))) c(left = NA, right = NA)
else c(left = f(x), right = f(rev(x)))
}
microbenchmark(
akrun_cumsum = {
f1(v_test3, TRUE)
f1(v_test3, FALSE)
}
,
akrun_rle = {
with(rle(is.na(v_test3)), lengths[values & seq_along(values) %in% c(1, length(values))])
}
,
g_grothen_zoo = {
countNA(v_test3)
}
,
g_grothen_reduce = {
countNA2(v_test3)
}
,
www_rleid = {
count_fun(v_test3)
}
,
bulat_and_cole = {
count_nas(v_test3)
}
)
Wrapper over which.max:
leading.nas <- function(x) {
if (length(x) == 0) {
0L
}
else {
which.min(!is.na(x)) - 1
}
}
A function returns two numbers. The first is the count of leading NA. The second is the count of trailing NA. It requires the rleid function from the data.table package.
library(data.table)
count_fun <- function(x){
y <- rleid(x)
z <- split(x, y)[c(1, length(unique(y)))]
ans <- sapply(z, function(x) sum(is.na(x)))
return(unname(ans))
}
count_fun(v1)
# [1] 3 0
count_fun(v2)
# [1] 2 2
count_fun(v3)
# [1] 0 5
1) na.locf Remove the leading NAs using na.locf and determine the difference in length between the original and reduced vector. Do the same for the trailing NAs. It is not clear what should be returned if the input vector is empty or all NAs so we return NA for both left and right.
library(zoo)
countNA <- function(x) {
len <- function(fromLast = FALSE) length(na.locf(x, fromLast = fromLast))
if (all(is.na(x))) c(left = NA, right = NA)
else length(x) - c(left = len(), right = len(TRUE))
}
countNA(v1)
## left right
## 3 0
countNA(v2)
## left right
## 2 2
countNA(v3)
## left right
## 0 5
It would also be possible to use na.fill to perform this calculation.
2) Reduce A second approach is to use Reduce. It gives the same answer. No packages are used.
countNA2 <- function(x) {
f <- function(x) sum(Reduce(all, is.na(x), acc = TRUE))
if (all(is.na(x))) c(left = NA, right = NA)
else c(left = f(x), right = f(rev(x)))
}

calculate min and max use for loops in R

i want to make a program which calculates min and max of a vector using for loops , here is my code
d <- c(34,67,123,554,76)
m<-0
e<-0
for(i in d) { if(d>i) { m<-d[i]
print("max")
} else if (d>i) { e<-d[i]
print("min")
}}
and gives me
[1] "max"
...
what's wrong ?
You can use all() to test the vector d against a single element of it:
d <- c(34,67,123,554,76)
m<-0
e<-0
for(i in d){
if(all(d <= i)){
m <- i
print(paste0("max = ", m))
}else if(all(d >= i)) {
e <- i
print(paste0("min = ", e))
}}
[1] "min = 34"
[1] "max = 554"
The reason for error/warning, you need to compare m and e to i not to full vector d.
Something like this:
# example input
d <- c(34,67,123,554,76)
foo <- function(x){
# some checks for input x
# ...
myMin <- x[1]
myMax <- x[1]
for(i in x[-1]) {
if(i < myMin) { myMin <- i}
if(i > myMax) { myMax <- i}
}
# return
c(Min = myMin, Max = myMax)
}
foo(d)
# Min Max
# 34 554

Get indexes of a vector of numbers in another vector

Let's suppose we have the following vector:
v <- c(2,2,3,5,8,0,32,1,3,12,5,2,3,5,8,33,1)
Given a sequence of numbers, for instance c(2,3,5,8), I am trying to find what the position of this sequence of numbers is in the vector v. The result I expect is something like:
FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE FALSE FALSE
I am trying to use which(v == c(2,3,5,8)), but it doesn't give me what I am looking for.
Using base R you could do the following:
v <- c(2,2,3,5,8,0,32,1,3,12,5,2,3,5,8,33,1)
x <- c(2,3,5,8)
idx <- which(v == x[1])
idx[sapply(idx, function(i) all(v[i:(i+(length(x)-1))] == x))]
# [1] 2 12
This tells you that the exact sequence appears twice, starting at positions 2 and 12 of your vector v.
It first checks the possible starting positions, i.e. where v equals the first value of x and then loops through these positions to check if the values after these positions also equal the other values of x.
Two other approaches using the shift-function trom data.table:
library(data.table)
# option 1
which(rowSums(mapply('==',
shift(v, type = 'lead', n = 0:(length(x) - 1)),
x)
) == length(x))
# option 2
which(Reduce("+", Map('==',
shift(v, type = 'lead', n = 0:(length(x) - 1)),
x)
) == length(x))
both give:
[1] 2 12
To get a full vector of the matching positions:
l <- length(x)
w <- which(Reduce("+", Map('==',
shift(v, type = 'lead', n = 0:(l - 1)),
x)
) == l)
rep(w, each = l) + 0:(l-1)
which gives:
[1] 2 3 4 5 12 13 14 15
The benchmark which was included earlier in this answer has been moved to a separate community wiki answer.
Used data:
v <- c(2,2,3,5,8,0,32,1,3,12,5,2,3,5,8,33,1)
x <- c(2,3,5,8)
You can use rollapply() from zoo
v <- c(2,2,3,5,8,0,32,1,3,12,5,2,3,5,8,33,1)
x <- c(2,3,5,8)
library("zoo")
searchX <- function(x, X) all(x==X)
rollapply(v, FUN=searchX, X=x, width=length(x))
The result TRUEshows you the beginning of the sequence.
The code could be simplified to rollapply(v, length(x), identical, x) (thanks to G. Grothendieck):
set.seed(2)
vl <- as.numeric(sample(1:10, 1e6, TRUE))
# vm <- vl[1:1e5]
# vs <- vl[1:1e4]
x <- c(2,3,5)
library("zoo")
searchX <- function(x, X) all(x==X)
i1 <- rollapply(vl, FUN=searchX, X=x, width=length(x))
i2 <- rollapply(vl, width=length(x), identical, y=x)
identical(i1, i2)
For using identical() both arguments must be of the same type (num and int are not the same).
If needed == coerces int to num; identical() does not any coercion.
I feel like looping should be efficient:
w = seq_along(v)
for (i in seq_along(x)) w = w[v[w+i-1L] == x[i]]
w
# [1] 2 12
This should be writable in C++ following #SymbolixAU approach for extra speed.
A basic comparison:
# create functions for selected approaches
redjaap <- function(v,x)
which(Reduce("+", Map('==', shift(v, type = 'lead', n = 0:(length(x) - 1)), x)) == length(x))
loop <- function(v,x){
w = seq_along(v)
for (i in seq_along(x)) w = w[v[w+i-1L] == x[i]]
w
}
# check consistency
identical(redjaap(v,x), loop(v,x))
# [1] TRUE
# check speed
library(microbenchmark)
vv <- rep(v, 1e4)
microbenchmark(redjaap(vv,x), loop(vv,x), times = 100)
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# redjaap(vv, x) 5.883809 8.058230 17.225899 9.080246 9.907514 96.35226 100 b
# loop(vv, x) 3.629213 5.080816 9.475016 5.578508 6.495105 112.61242 100 a
# check consistency again
identical(redjaap(vv,x), loop(vv,x))
# [1] TRUE
Here are two Rcpp solutions. The first one returns the location of v that is the starting position of the sequence.
library(Rcpp)
v <- c(2,2,3,5,8,0,32,1,3,12,5,2,3,5,8,33,1)
x <- c(2,3,5,8)
cppFunction('NumericVector SeqInVec(NumericVector myVector, NumericVector mySequence) {
int vecSize = myVector.size();
int seqSize = mySequence.size();
NumericVector comparison(seqSize);
NumericVector res(vecSize);
for (int i = 0; i < vecSize; i++ ) {
for (int j = 0; j < seqSize; j++ ) {
comparison[j] = mySequence[j] == myVector[i + j];
}
if (sum(comparison) == seqSize) {
res[i] = 1;
}else{
res[i] = 0;
}
}
return res;
}')
SeqInVec(v, x)
#[1] 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0
This second one returns the index values (as per the other answers) of every matched entry in the sequence.
cppFunction('NumericVector SeqInVec(NumericVector myVector, NumericVector mySequence) {
int vecSize = myVector.size();
int seqSize = mySequence.size();
NumericVector comparison(seqSize);
NumericVector res(vecSize);
int foundCounter = 0;
for (int i = 0; i < vecSize; i++ ) {
for (int j = 0; j < seqSize; j++ ) {
comparison[j] = mySequence[j] == myVector[i + j];
}
if (sum(comparison) == seqSize) {
for (int j = 0; j < seqSize; j++ ) {
res[foundCounter] = i + j + 1;
foundCounter++;
}
}
}
IntegerVector idx = seq(0, (foundCounter-1));
return res[idx];
}')
SeqInVec(v, x)
# [1] 2 3 4 5 12 13 14 15
Optimising
As #MichaelChirico points out in their comment, further optimisations can be made. For example, if we know the first entry in the sequence doesn't match a value in the vector, we don't need to do the rest of the comparison
cppFunction('NumericVector SeqInVecOpt(NumericVector myVector, NumericVector mySequence) {
int vecSize = myVector.size();
int seqSize = mySequence.size();
NumericVector comparison(seqSize);
NumericVector res(vecSize);
int foundCounter = 0;
for (int i = 0; i < vecSize; i++ ) {
if (myVector[i] == mySequence[0]) {
for (int j = 0; j < seqSize; j++ ) {
comparison[j] = mySequence[j] == myVector[i + j];
}
if (sum(comparison) == seqSize) {
for (int j = 0; j < seqSize; j++ ) {
res[foundCounter] = i + j + 1;
foundCounter++;
}
}
}
}
IntegerVector idx = seq(0, (foundCounter-1));
return res[idx];
}')
The answer with benchmarks shows the performance of these approaches
A benchmark on the posted answers:
Load the needed packages:
library(data.table)
library(microbenchmark)
library(Rcpp)
library(zoo)
Creating vector with which the benchmarks will be run:
set.seed(2)
vl <- sample(1:10, 1e6, TRUE)
vm <- vl[1:1e5]
vs <- vl[1:1e4]
x <- c(2,3,5)
Testing whether all solution give the same outcome on the small vector vs:
> all.equal(jaap1(vs,x), jaap2(vs,x))
[1] TRUE
> all.equal(jaap1(vs,x), docendo(vs,x))
[1] TRUE
> all.equal(jaap1(vs,x), a5c1(vs,x))
[1] TRUE
> all.equal(jaap1(vs,x), jogo1(vs,x))
[1] TRUE
> all.equal(jaap1(vs,x), moody(vs,x))
[1] "Numeric: lengths (24, 873) differ"
> all.equal(jaap1(vs,x), cata1(vs,x))
[1] "Numeric: lengths (24, 0) differ"
> all.equal(jaap1(vs,x), u989(vs,x))
[1] TRUE
> all.equal(jaap1(vs,x), frank(vs,x))
[1] TRUE
> all.equal(jaap1(vs,x), symb(vs,x))
[1] TRUE
> all.equal(jaap1(vs, x), symbOpt(vs, x))
[1] TRUE
Further inspection of the cata1 and moody solutions learns that they don't give the desired output. They are therefore not included in the benchmarks.
The benchmark for the smallest vector vs:
mbs <- microbenchmark(jaap1(vs,x), jaap2(vs,x), docendo(vs,x), a5c1(vs,x),
jogo1(vs,x), u989(vs,x), frank(vs,x), symb(vs,x), symbOpt(vs, x),
times = 100)
gives:
print(mbs, order = "median")
Unit: microseconds
expr min lq mean median uq max neval
symbOpt(vs, x) 40.658 47.0565 78.47119 51.5220 56.2765 2170.708 100
symb(vs, x) 106.208 112.7885 151.76398 117.0655 123.7450 1976.360 100
frank(vs, x) 121.303 129.0515 203.13616 132.1115 137.9370 6193.837 100
jaap2(vs, x) 187.973 218.7805 322.98300 235.0535 255.2275 6287.548 100
jaap1(vs, x) 306.944 341.4055 452.32426 358.2600 387.7105 6376.805 100
a5c1(vs, x) 463.721 500.9465 628.13475 516.2845 553.2765 6179.304 100
docendo(vs, x) 1139.689 1244.0555 1399.88150 1313.6295 1363.3480 9516.529 100
u989(vs, x) 8048.969 8244.9570 8735.97523 8627.8335 8858.7075 18732.750 100
jogo1(vs, x) 40022.406 42208.4870 44927.58872 43733.8935 45008.0360 124496.190 100
The benchmark for the medium vector vm:
mbm <- microbenchmark(jaap1(vm,x), jaap2(vm,x), docendo(vm,x), a5c1(vm,x),
jogo1(vm,x), u989(vm,x), frank(vm,x), symb(vm,x), symbOpt(vm, x),
times = 100)
gives:
print(mbm, order = "median")
Unit: microseconds
expr min lq mean median uq max neval
symbOpt(vm, x) 357.452 405.0415 974.9058 763.0205 1067.803 7444.126 100
symb(vm, x) 1032.915 1117.7585 1923.4040 1422.1930 1753.044 17498.132 100
frank(vm, x) 1158.744 1470.8170 1829.8024 1826.1330 1935.641 6423.966 100
jaap2(vm, x) 1622.183 2872.7725 3798.6536 3147.7895 3680.954 14886.765 100
jaap1(vm, x) 3053.024 4729.6115 7325.3753 5607.8395 6682.814 87151.774 100
a5c1(vm, x) 5487.547 7458.2025 9612.5545 8137.1255 9420.684 88798.914 100
docendo(vm, x) 10780.920 11357.7440 13313.6269 12029.1720 13411.026 21984.294 100
u989(vm, x) 83518.898 84999.6890 88537.9931 87675.3260 90636.674 105681.313 100
jogo1(vm, x) 471753.735 512979.3840 537232.7003 534780.8050 556866.124 646810.092 100
The benchmark for the largest vector vl:
mbl <- microbenchmark(jaap1(vl,x), jaap2(vl,x), docendo(vl,x), a5c1(vl,x),
jogo1(vl,x), u989(vl,x), frank(vl,x), symb(vl,x), symbOpt(vl, x),
times = 100)
gives:
print(mbl, order = "median")
Unit: milliseconds
expr min lq mean median uq max neval
symbOpt(vl, x) 4.679646 5.768531 12.30079 6.67608 11.67082 118.3467 100
symb(vl, x) 11.356392 12.656124 21.27423 13.74856 18.66955 149.9840 100
frank(vl, x) 13.523963 14.929656 22.70959 17.53589 22.04182 132.6248 100
jaap2(vl, x) 18.754847 24.968511 37.89915 29.78309 36.47700 145.3471 100
jaap1(vl, x) 37.047549 52.500684 95.28392 72.89496 138.55008 234.8694 100
a5c1(vl, x) 54.563389 76.704769 116.89269 89.53974 167.19679 248.9265 100
docendo(vl, x) 109.824281 124.631557 156.60513 129.64958 145.47547 296.0214 100
u989(vl, x) 1380.886338 1413.878029 1454.50502 1436.18430 1479.18934 1632.3281 100
jogo1(vl, x) 4067.106897 4339.005951 4472.46318 4454.89297 4563.08310 5114.4626 100
The used functions of each solution:
jaap1 <- function(v,x) {
l <- length(x);
w <- which(rowSums(mapply('==', shift(v, type = 'lead', n = 0:(length(x) - 1)), x) ) == length(x));
rep(w, each = l) + 0:(l-1)
}
jaap2 <- function(v,x) {
l <- length(x);
w <- which(Reduce("+", Map('==', shift(v, type = 'lead', n = 0:(length(x) - 1)), x)) == length(x));
rep(w, each = l) + 0:(l-1)
}
docendo <- function(v,x) {
l <- length(x);
idx <- which(v == x[1]);
w <- idx[sapply(idx, function(i) all(v[i:(i+(length(x)-1))] == x))];
rep(w, each = l) + 0:(l-1)
}
a5c1 <- function(v,x) {
l <- length(x);
w <- which(colSums(t(embed(v, l)[, l:1]) == x) == l);
rep(w, each = l) + 0:(l-1)
}
jogo1 <- function(v,x) {
l <- length(x);
searchX <- function(x, X) all(x==X);
w <- which(rollapply(v, FUN=searchX, X=x, width=l));
rep(w, each = l) + 0:(l-1)
}
moody <- function(v,x) {
l <- length(x);
v2 <- as.numeric(factor(c(v,NA),levels = x));
v2[is.na(v2)] <- l+1;
which(diff(v2) == 1)
}
cata1 <- function(v,x) {
l <- length(x);
w <- which(sapply(lapply(seq(length(v)-l)-1, function(i) v[seq(x)+i]), identical, x));
rep(w, each = l) + 0:(l-1)
}
u989 <- function(v,x) {
l <- length(x);
s <- paste(v, collapse = '-');
p <- paste0('\\b', paste(x, collapse = '-'), '\\b');
i <- c(1, unlist(gregexpr(p, s)));
m <- substring(s, head(i,-1), tail(i,-1));
ln <- lengths(strsplit(m, '-'));
w <- cumsum(c(ln[1], ln[-1]-1));
rep(w, each = l) + 0:(l-1)
}
frank <- function(v,x) {
l <- length(x);
w = seq_along(v);
for (i in seq_along(x)) w = w[v[w+i-1L] == x[i]];
rep(w, each = l) + 0:(l-1)
}
cppFunction('NumericVector SeqInVec(NumericVector myVector, NumericVector mySequence) {
int vecSize = myVector.size();
int seqSize = mySequence.size();
NumericVector comparison(seqSize);
NumericVector res(vecSize);
int foundCounter = 0;
for (int i = 0; i < vecSize; i++ ) {
for (int j = 0; j < seqSize; j++ ) {
comparison[j] = mySequence[j] == myVector[i + j];
}
if (sum(comparison) == seqSize) {
for (int j = 0; j < seqSize; j++ ) {
res[foundCounter] = i + j + 1;
foundCounter++;
}
}
}
IntegerVector idx = seq(0, (foundCounter-1));
return res[idx];
}')
symb <- function(v,x) {SeqInVec(v, x)}
cppFunction('NumericVector SeqInVecOpt(NumericVector myVector, NumericVector mySequence) {
int vecSize = myVector.size();
int seqSize = mySequence.size();
NumericVector comparison(seqSize);
NumericVector res(vecSize);
int foundCounter = 0;
for (int i = 0; i < vecSize; i++ ) {
if (myVector[i] == mySequence[0]) {
for (int j = 0; j < seqSize; j++ ) {
comparison[j] = mySequence[j] == myVector[i + j];
}
if (sum(comparison) == seqSize) {
for (int j = 0; j < seqSize; j++ ) {
res[foundCounter] = i + j + 1;
foundCounter++;
}
}
}
}
IntegerVector idx = seq(0, (foundCounter-1));
return res[idx];
}')
symbOpt <- function(v,x) {SeqInVecOpt(v,x)}
Since this is a cw-answer I'll add my own benchmark of some of the answers.
library(data.table)
library(microbenchmark)
set.seed(2); v <- sample(1:100, 5e7, TRUE); x <- c(2,3,5)
jaap1 <- function(v, x) {
which(rowSums(mapply('==',shift(v, type = 'lead', n = 0:(length(x) - 1)),
x)) == length(x))
}
jaap2 <- function(v, x) {
which(Reduce("+", Map('==',shift(v, type = 'lead', n = 0:(length(x) - 1)),
x)) == length(x))
}
dd1 <- function(v, x) {
idx <- which(v == x[1])
idx[sapply(idx, function(i) all(v[i:(i+(length(x)-1))] == x))]
}
dd2 <- function(v, x) {
idx <- which(v == x[1L])
xl <- length(x) - 1L
idx[sapply(idx, function(i) all(v[i:(i+xl)] == x))]
}
frank <- function(v, x) {
w = seq_along(v)
for (i in seq_along(x)) w = w[v[w+i-1L] == x[i]]
w
}
all.equal(jaap1(v, x), dd1(v, x))
all.equal(jaap2(v, x), dd1(v, x))
all.equal(dd2(v, x), dd1(v, x))
all.equal(frank(v, x), dd1(v, x))
bm <- microbenchmark(jaap1(v, x), jaap2(v, x), dd1(v, x), dd2(v, x), frank(v, x),
unit = "relative", times = 25)
plot(bm)
bm
Unit: relative
expr min lq mean median uq max neval
jaap1(v, x) 4.487360 4.591961 4.724153 4.870226 4.660023 3.9361093 25
jaap2(v, x) 2.026052 2.159902 2.116204 2.282644 2.138106 2.1133068 25
dd1(v, x) 1.078059 1.151530 1.119067 1.257337 1.201762 0.8646835 25
dd2(v, x) 1.000000 1.000000 1.000000 1.000000 1.000000 1.0000000 25
frank(v, x) 1.400735 1.376405 1.442887 1.427433 1.611672 1.3440097 25
Bottom line: without knowing the real data, all these benchmarks don't tell the whole story.
Here's a solution that leverages binary search on secondary indices in data.table. (Great vignette here)
This method has quite a bit of overhead so it's not particularly competitive on the 1e4 length vector in the benchmark, but it hangs near the top of the pack as the size increases.
matt <- function(v,x){
l <- length(x);
SL <- seq_len(l-1);
DT <- data.table(Seq_0 = v);
for (i in SL) set(DT, j = eval(paste0("Seq_",i)), value = shift(DT[["Seq_0"]],n = i, type = "lead"));
w <- DT[as.list(x),on = paste0("Seq_",c(0L,SL)), which = TRUE];
rep(w, each = l) + 0:(l-1)
}
Benchmarking
library(data.table)
library(microbenchmark)
library(Rcpp)
library(zoo)
set.seed(2)
vl <- sample(1:10, 1e6, TRUE)
vm <- vl[1:1e5]
vs <- vl[1:1e4]
x <- c(2,3,5)
Vector Length 1e4
Unit: microseconds
expr min lq mean median uq max neval
symb(vs, x) 138.342 143.048 161.6681 153.1545 159.269 259.999 10
frank(vs, x) 176.634 184.129 198.8060 193.2850 200.701 257.050 10
jaap2(vs, x) 282.231 299.025 342.5323 316.5185 337.760 524.212 10
jaap1(vs, x) 490.013 528.123 568.6168 538.7595 547.268 731.340 10
a5c1(vs, x) 706.450 742.270 751.3092 756.2075 758.859 793.446 10
dd2(vs, x) 1319.098 1348.082 2061.5579 1363.2265 1497.960 7913.383 10
docendo(vs, x) 1427.768 1459.484 1536.6439 1546.2135 1595.858 1696.070 10
dd1(vs, x) 1377.502 1406.272 2217.2382 1552.5030 1706.131 8084.474 10
matt(vs, x) 1928.418 2041.597 2390.6227 2087.6335 2430.470 4762.909 10
u989(vs, x) 8720.330 8821.987 8935.7188 8882.0190 9106.705 9163.967 10
jogo1(vs, x) 47123.615 47536.700 49158.2600 48449.2390 50957.035 52496.981 10
Vector Length 1e5
Unit: milliseconds
expr min lq mean median uq max neval
symb(vm, x) 1.319921 1.378801 1.464972 1.423782 1.577006 1.682156 10
frank(vm, x) 1.671155 1.739507 1.806548 1.760738 1.844893 2.097404 10
jaap2(vm, x) 2.298449 2.380281 2.683813 2.432373 2.566581 4.310258 10
matt(vm, x) 3.195048 3.495247 3.577080 3.607060 3.687222 3.844508 10
jaap1(vm, x) 4.079117 4.179975 4.776989 4.496603 5.206452 6.295954 10
a5c1(vm, x) 6.488621 6.617709 7.366226 6.720107 6.877529 12.500510 10
dd2(vm, x) 12.595699 12.812876 14.990739 14.058098 16.758380 20.743506 10
docendo(vm, x) 13.635357 13.999721 15.296075 14.729947 16.151790 18.541582 10
dd1(vm, x) 13.474589 14.177410 15.676348 15.446635 17.150199 19.085379 10
u989(vm, x) 94.844298 95.026733 96.309658 95.134400 97.460869 100.536654 10
jogo1(vm, x) 575.230741 581.654544 621.824297 616.474265 628.267155 723.010738 10
Vector Length 1e6
Unit: milliseconds
expr min lq mean median uq max neval
symb(vl, x) 13.34294 13.55564 14.01556 13.61847 14.78210 15.26076 10
frank(vl, x) 17.35628 17.45602 18.62781 17.56914 17.88896 25.38812 10
matt(vl, x) 20.79867 21.07157 22.41467 21.23878 22.56063 27.12909 10
jaap2(vl, x) 22.81464 22.92414 22.96956 22.99085 23.02558 23.10124 10
jaap1(vl, x) 40.00971 40.46594 43.01407 41.03370 42.81724 55.90530 10
a5c1(vl, x) 65.39460 65.97406 69.27288 66.28000 66.72847 83.77490 10
dd2(vl, x) 127.47617 132.99154 161.85129 134.63168 157.40028 342.37526 10
dd1(vl, x) 140.06140 145.45085 154.88780 154.23280 161.90710 171.60294 10
docendo(vl, x) 147.07644 151.58861 162.20522 162.49216 165.49513 183.64135 10
u989(vl, x) 2022.64476 2041.55442 2055.86929 2054.92627 2066.26187 2088.71411 10
jogo1(vl, x) 5563.31171 5632.17506 5863.56265 5872.61793 6016.62838 6244.63205 10
Here is a string-based approach in base R:
str <- paste(v, collapse = '-')
# "2-2-3-5-8-0-32-1-3-12-5-2-3-5-8-33-1"
pattern <- paste0('\\b', paste(x, collapse = '-'), '\\b')
# "\\b2-3-5-8\\b"
inds <- unlist(gregexpr(pattern, str)) # (1)
# 3 25
sapply(inds, function(i) lengths(strsplit(substr(str, 1, i),'-'))) # (2)
# [1] 2 12
\\b is used for exact matching.
(1) Finds the positions at which pattern is seen within str.
(2) Getting back the respective indices within the original vector v.
As for running-time efficiency, here is a much faster solution than my first solution:
str <- paste(v, collapse = '-')
pattern <- paste0('\\b', paste(x, collapse = '-'), '\\b')
inds <- c(1, unlist(gregexpr(pattern, str)))
m <- substring(str, head(inds,-1), tail(inds,-1))
ln <- lengths(strsplit(m, '-'))
cumsum(c(ln[1], ln[-1]-1))
Note: This doesn't always give the desired output
We can convert v to factors and keep only consecutive values in our transformed vector:
v2 <- as.numeric(factor(c(v,NA),levels = x)) # [1] 1 1 2 3 4 NA NA NA ...
v2[is.na(v2)] <- length(x)+1 # [1] 1 1 2 3 4 5 5 5 ...
output <- diff(v2) ==1
# [1] FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE FALSE FALSE
Data
v <- c(2,2,3,5,8,0,32,1,3,12,5,2,3,5,8,33,1)
x <- c(2,3,5,8)
I have modified talat's solution as I found this didn't work in all scenarios.
Firstly, if this step idx[sapply(idx, function(i) all(v[i:(i+(length(x)-1))] == x))] will produce NAs if v[i:(i+(length(x)-1))] == x)) contains NAs and no FALSE. Secondly, in order to match the desired outcome I have used the indices to create the final logical vector as desired.
seq_detect <- function(v, x) {
#If the integer is not detected then return early a vector of all falses
if(!any(v == x[1])){
return(vector(length = length(v)))
}
#Create an index of v where the first value in x appears
idx <- which(v == x[1])
#See if each of those indices do indeed match the whole pattern
index_seq_start_raw <- idx[sapply(idx, function(i) all(v[i:(i+(length(x)-1))] == x))]
#These may return NAs if above index outside range of 1:length(v)
if(all(is.na(index_seq_start_raw))){
return(vector(length = length(v)))
}
#If some NAs then remove these
(index_seq_start <- index_seq_start_raw[!is.na(index_seq_start_raw)])
#Create template of FALSES for output
output <- vector(length = length(v))
#Loop over index_seq_start and replace any matches with TRUEs
for(i in seq_along(1:length(index_seq_start))){
output[(index_seq_start[i]):(index_seq_start[i]+3)] <- TRUE
}
output
}
#This works on both the following pairs of vectors, where as due to indexing
#issues #talat's solution causes an error with v1 and x1.
v <- c(2, 2, 3, 5, 8, 0, 32, 1, 3, 12, 5, 2, 3, 5, 8, 33, 1)
x <- c(2, 3, 5, 8)
[1] FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE FALSE FALSE
v1 <- c(1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1)
x1 <- c(1, 2, 2, 1)
[1] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE

Resources