Run length sequence by time and ID - r

This problem does not seem to have been put out here before.
I want to find the number of subjects that score 1 for 6 consecutive hours.
The subjects have not been scored every hour so if an hour is missing the hours are not consecutive and the output for that 6-hour period should be NA.
The reason for assigning NA would be that we do not know how the subject has scored on the missing hour. This problem can be used to count consecutive hits, but only count it if a subject has participated.
My dataframe looks like this:
ID<-c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2)
hour<-c(1,2,3,7,8,9,10,11,12,17,18,19,1,2,3,4,5,6,8,9,15)
A<-c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1)
df<-data.frame(ID,hour,A)
I have tried to use the rle function (I am sure its possible) but I cannot get it to condition on both hour and ID.
The output would be like this:
ID<-c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2)
hour<-c(1,2,3,7,8,9,10,11,12,17,18,19,1,2,3,4,5,6,8,9,15)
A<-c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1)
six<-c(NA,NA,NA,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,1,NA,NA,NA)
df<-data.frame(ID,hour,A,six)
Thank you in advance.
I believe the original data set I gave was too small to make the solutions more generalizable.
I just tried the codes with this dataset and found that this will result in a wrong result.
ID<-c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4)
hour<-c(1,2,3,7,8,9,10,12,13,17,18,19,1,2,3,4,5,6,8,9,15,1:23,27,28,29,30,31)
A<-c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,rep(1,28))
df<-data.frame(ID,hour,A)
For the new dataset the output should be:
ID<-c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4)
hour<-c(1,2,3,7,8,9,10,12,13,17,18,19,1,2,3,4,5,6,8,9,15,1:23,27,28,29,30,31)
A<-c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,rep(1,28))
six<-c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)
df<-data.frame(ID,hour,A,six)

Here is an approach in tidyverse with the updated data set:
library(tidyverse)
df %>%
group_by(ID) %>%
expand(hour = seq(min(hour), max(hour))) %>%
left_join(df) %>%
mutate(rle = rep(rle(A)$lengths, times = rle(A)$lengths)) %>%
group_by(ID, rle) %>%
mutate(sum = cumsum(A),
six = ifelse(rle >= 6 & A == 1, 0, NA),
six = ifelse(sum == 6, 1, ifelse(sum > 6, NA, six))) %>%
filter(!is.na(A)) %>%
ungroup() %>%
select(ID, hour, A, six) %>%
as.data.frame() -> df_out2
check requested output:
ID<-c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4)
hour<-c(1,2,3,7,8,9,10,12,13,17,18,19,1,2,3,4,5,6,8,9,15,1:23,27,28,29,30,31)
A<-c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,rep(1,28))
six<-c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)
df<-data.frame(ID,hour,A,six)
all.equal(df, df_out2)
#output
TRUE
The old answer:
df %>%
mutate(rle = rep(rle(A)$lengths, times = rle(A)$lengths)) %>%
group_by(ID, rle) %>%
mutate(sum = cumsum(A),
six = ifelse(rle >= 6 & A == 1, 0, NA),
six = ifelse(sum == 6, 1, ifelse(sum > 6, NA, six))) %>%
ungroup() %>%
select(ID, hour, A, six) %>%
as.data.frame() -> df_out2
Lets check if the result is like requested:
ID <- c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2)
hour <- c(1,2,3,7,8,9,10,11,12,17,18,19,1,2,3,4,5,6,8,9,15)
A <- c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1)
six <- c(NA,NA,NA,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,1,NA,NA,NA)
df1 <- data.frame(ID, hour, A, six)
df1 is requested output
all.equal(df1, df_out2)
#output
TRUE
some benchmarking:
library(microbenchmark)
library(data.table)
akrun <- function(df){
setDT(df)[, grp := rleid(A)][, Anew := A *((hour - shift(hour, fill = hour[1])) ==1), grp
][, sixnew :=if(sum(A)>=5) rep(c(0, 1), c(.N-1, 1)) else NA_real_,.(rleid(Anew), grp)]
i1 <- df[, .I[which(is.na(sixnew) & shift(sixnew == 0, type = 'lead'))], grp]$V1
df[i1, sixnew := 0][, c("Anew", "grp") := NULL][]
}
missuse <- function(df){
df %>%
mutate(rle = rep(rle(A)$lengths, times = rle(A)$lengths)) %>%
group_by(ID, rle) %>%
mutate(sum = cumsum(A),
six = ifelse(rle >= 6 & A == 1, 0, NA),
six = ifelse(sum == 6, 1, ifelse(sum > 6, NA, six))) %>%
ungroup() %>%
select(ID, hour, A, six)
}
Mike <- function(df){
ave(df$A,
cumsum(!(df$hour == shift(df$hour, fill = 0) + 1)),
FUN = function(x) {
if(all(x==1) & length(x) >= 6) return(c(rep(0, length(x) - 1), 1))
else return(rep(NA, length(x)))})
}
microbenchmark(Mike(df),
akrun(df),
missuse(df))
#output
Unit: microseconds
expr min lq mean median uq max neval
Mike(df) 491.291 575.7115 704.2213 597.7155 629.0295 9578.684 100
akrun(df) 6568.313 6725.5175 7867.4059 6843.5790 7279.2240 69790.755 100
missuse(df) 11042.822 11321.0505 12434.8671 11512.3200 12616.3485 43170.935 100
way to go Mike H.!

To get the groupings you can compare the current hour to the lagged hour to see if they are "consecutive" or 1 integer apart and then take the cumsum of that. Once you have the groupings you can use a simple ave to get the output you want.
library(data.table)
df$six <- ave(df$A,
cumsum(!(df$hour == shift(df$hour, fill = 0) + 1)),
FUN = function(x) {
if(all(x==1) & length(x) >= 6) return(c(rep(0, length(x) - 1), 1))
else return(rep(NA, length(x)))}
)
df
# ID hour A six
#1 1 1 0 NA
#2 1 2 1 NA
#3 1 3 0 NA
#4 1 7 1 0
#5 1 8 1 0
#6 1 9 1 0
#7 1 10 1 0
#8 1 11 1 0
#9 1 12 1 1
#10 1 17 0 NA
#11 1 18 0 NA
#12 1 19 0 NA
#13 2 1 1 0
#14 2 2 1 0
#15 2 3 1 0
#16 2 4 1 0
#17 2 5 1 0
#18 2 6 1 1
#19 2 8 1 NA
#20 2 9 1 NA
#21 2 15 1 NA
If you only want to select a patient at most once, this will select the last time period:
df$six_adj <- ave(df$six, df$ID, df$six, FUN = function(x) {
if(all(x==1)) return(c(rep(0, length(x) - 1), 1))
else return(x)}
)

We can use rleid from data.table. Create a grouping column with run-length-id of 'A' ('grp'). Take the difference of 'hour' with the next value of 'hour', check if it is equal to 1 and multiply with 'A' to create 'Anew'. Grouped by run-length-id of 'Anew' and 'grp', if the sum of 'A' is greater than a particular value, replicate with 0s and 1s or else return NA. In the last phase, assign some spillover NAs to 0 by creating an index ('i1')
library(data.table)
setDT(df)[, grp := rleid(A)][, Anew := A *((hour - shift(hour, fill = hour[1])) ==1), grp
][, sixnew :=if(sum(A)>=5) rep(c(0, 1), c(.N-1, 1)) else NA_real_,.(rleid(Anew), grp)]
i1 <- df[, .I[which(is.na(sixnew) & shift(sixnew == 0, type = 'lead'))], grp]$V1
df[i1, sixnew := 0][, c("Anew", "grp") := NULL][]
# ID hour A six sixnew
# 1: 1 1 0 NA NA
# 2: 1 2 1 NA NA
# 3: 1 3 0 NA NA
# 4: 1 7 1 0 0
# 5: 1 8 1 0 0
# 6: 1 9 1 0 0
# 7: 1 10 1 0 0
# 8: 1 11 1 0 0
# 9: 1 12 1 1 1
#10: 1 17 0 NA NA
#11: 1 18 0 NA NA
#12: 1 19 0 NA NA
#13: 2 1 1 0 0
#14: 2 2 1 0 0
#15: 2 3 1 0 0
#16: 2 4 1 0 0
#17: 2 5 1 0 0
#18: 2 6 1 1 1
#19: 2 8 1 NA NA
#20: 2 9 1 NA NA
#21: 2 15 1 NA NA
Or slightly more compact option would be
i1 <- setDT(df)[, if(sum(A)>= 5) .I[.N] , rleid(c(TRUE, diff(hour)==1), A)]$V1
df[i1, sixnew := 1][unlist(Map(`:`, i1-5, i1-1)), sixnew := 0]
df
# ID hour A six sixnew
# 1: 1 1 0 NA NA
# 2: 1 2 1 NA NA
# 3: 1 3 0 NA NA
# 4: 1 7 1 0 0
# 5: 1 8 1 0 0
# 6: 1 9 1 0 0
# 7: 1 10 1 0 0
# 8: 1 11 1 0 0
# 9: 1 12 1 1 1
#10: 1 17 0 NA NA
#11: 1 18 0 NA NA
#12: 1 19 0 NA NA
#13: 2 1 1 0 0
#14: 2 2 1 0 0
#15: 2 3 1 0 0
#16: 2 4 1 0 0
#17: 2 5 1 0 0
#18: 2 6 1 1 1
#19: 2 8 1 NA NA
#20: 2 9 1 NA NA
#21: 2 15 1 NA NA
Benchmarks
Created a slightly bigger dataset and tested the solutions
-data
ID<-c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2)
hour<-c(1,2,3,7,8,9,10,11,12,17,18,19,1,2,3,4,5,6,8,9,15)
A<-c(0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1)
ID <- rep(1:5000, rep(c(12, 9), 2500))
A <- rep(A, 2500)
hour <- rep(hour, 2500)
dftest <- data.frame(ID, hour, A)
-functions
akrun <- function(df){
df1 <- copy(df)
setDT(df1)[, grp := rleid(A)][, Anew := A *((hour - shift(hour, fill = hour[1])) ==1), grp
][, sixnew :=if(sum(A)>=5) rep(c(0, 1), c(.N-1, 1))
else NA_real_,.(rleid(Anew), grp)]
i1 <- df1[, .I[which(is.na(sixnew) & shift(sixnew == 0, type = 'lead'))], grp]$V1
df1[i1, sixnew := 0][, c("Anew", "grp") := NULL][]
}
akrun2 <- function(df) {
df1 <- copy(df)
i1 <- setDT(df1)[, if(sum(A)>= 5) .I[.N] , rleid(c(TRUE, diff(hour)==1), A)]$V1
df1[i1, sixnew := 1][unlist(Map(`:`, i1-5, i1-1)), sixnew := 0]
}
missuse <- function(df){
df %>%
mutate(rle = rep(rle(A)$lengths, times = rle(A)$lengths)) %>%
group_by(ID, rle) %>%
mutate(sum = cumsum(A),
six = ifelse(rle >= 6 & A == 1, 0, NA),
six = ifelse(sum == 6, 1, ifelse(sum > 6, NA, six))) %>%
ungroup() %>%
select(ID, hour, A, six)
}
Mike <- function(df){
ave(df$A,
cumsum(!(df$hour == shift(df$hour, fill = 0) + 1)),
FUN = function(x) {
if(all(x==1) & length(x) >= 6) return(c(rep(0, length(x) - 1), 1))
else return(rep(NA, length(x)))})
}
-benchmark
microbenchmark(Mike(dftest),
akrun(dftest),
akrun2(dftest),
missuse(dftest),
times = 10L, unit = 'relative')
-output
#Unit: relative
# expr min lq mean median uq max neval cld
# Mike(dftest) 1.682794 1.754494 1.673811 1.68806 1.632765 1.640221 10 a
# akrun(dftest) 13.159245 12.950117 12.176965 12.33716 11.856271 11.095228 10 b
# akrun2(dftest) 1.000000 1.000000 1.000000 1.00000 1.000000 1.000000 10 a
# missuse(dftest) 37.899905 36.773837 34.726845 34.87672 33.155939 30.665840 10 c

Related

Countdown dates in R

Suppose I have the following dataset:
id1 <- c(1,1,1,1,2,2,2,2,1,1,1,1)
dates <- c("a","a","a","a","b","b","b","b","c","c","c","c")
x <- c(NA,0,NA,NA,NA,NA,0,NA,NA,NA,NA,0)
df <- data.frame(id1,dates,x)
My objective is to have a new column that explicitly tells counts the sequence of observations around 0 for every combination of id1 and dates. This would yield the following outcome:
desired_result <- c(-1,0,1,2,-2,-1,0,1,-3,-2,-1,0)
Any help is appreciated.
library(dplyr)
df %>%
group_by(id1, dates) %>%
mutate(x = row_number() - which(x == 0))
id1 dates x
1 1 a -1
2 1 a 0
3 1 a 1
4 1 a 2
5 2 b -2
6 2 b -1
7 2 b 0
8 2 b 1
9 1 c -3
10 1 c -2
11 1 c -1
12 1 c 0
With dplyr 1.1.0:
df %>%
mutate(x = row_number() - which(x == 0), .by = dates)

Replacing all values to 1 after a condition

My current data is like below,
df<-data.frame(id=c(1:5),t1=c(NA,1,0,0,0),t2=c(0,1,0,1,0),
t3=c(NA,0,0,0,1),t4=c(NA,NA,NA,0,0))
And the way I'm trying to restructure this is,
for each id, if there's a "1" in that row, all the 0s in the subsequent columns would change to 1. (but leaving the NA as an NA).
So for id#1, nothing would change since there's no 1 in that row, but for id#2, after 1 in the column t2, any 0s afterwards would be replaced by 1.
i.e., this is what I'm trying to get at the end:
final<-data.frame(id=c(1:5),t1=c(0,1,0,0,0),t2=c(0,1,0,1,0),
t3=c(NA,1,0,1,1),t4=c(NA,NA,NA,1,1))
I've been trying different ways but nothing seems to work... I'd really appreciate any help!!!
In base R we can apply the cummax by row after changing the NA to a lower value and then replace the value back to NA
df[-1] <- t(apply(replace(df[-1], is.na(df[-1]), -999), 1, cummax)) *
NA^(is.na(df[-1]))
df
# id t1 t2 t3 t4
#1 1 NA 0 NA NA
#2 2 1 1 1 NA
#3 3 0 0 0 NA
#4 4 0 1 1 1
#5 5 0 0 1 1
Or use rowCummaxs from matrixStats
library(matrixStats)
df[-1] <- rowCummaxs(as.matrix(replace(df[-1], is.na(df[-1]), -999))) *
NA^(is.na(df[-1]))
With tidyverse you can try:
library(tidyverse)
df %>%
pivot_longer(cols = starts_with("t"), names_to = "Time", values_to = "Value") %>%
group_by(id) %>%
mutate(Cummax = cummax(Value)) %>%
mutate(Value = replace(Value, Value == 0 & Cummax == 1, 1)) %>%
pivot_wider(id_cols = id, names_from = "Time", values_from = "Value")
Output
# A tibble: 5 x 5
# Groups: id [5]
id t1 t2 t3 t4
<int> <dbl> <dbl> <dbl> <dbl>
1 1 NA 0 NA NA
2 2 1 1 1 NA
3 3 0 0 0 NA
4 4 0 1 1 1
5 5 0 0 1 1
Another approach in base R using apply row-wise could be to find out column number where first 1 occurs and replace all the 0 values after it with 1.
df[-1] <- t(apply(df[-1], 1, function(x) {
a_id <- which(x == 1)[1]
if(length(a_id) > 0)
replace(x, x == 0 & seq_along(x) > a_id, 1)
else x
}))
df
# id t1 t2 t3 t4
#1 1 NA 0 NA NA
#2 2 1 1 1 NA
#3 3 0 0 0 NA
#4 4 0 1 1 1
#5 5 0 0 1 1

dplyr Rolling Conditional Counts

I have a data frame as follows:
df <- data.frame(
Item=c("A","A","A","A","A","B","B","B","B","B"),
Date=c("2018-1-1","2018-2-1","2018-3-1","2018-4-1","2018-5-1","2018-1-1","2018-2-1",
"2018-3-1","2018-4-1","2018-5-1"),
Value=rnorm(10))
I want to mutate a new column grouped by Item, to count the number of values higher than 0 within the window of 3 (or any other integer I specify).
I am familiar with tidyverse, therefore, a dplyr solution would be most welcome.
Think zoo:: package if you want to roll anything.
df$new<-
zoo::rollsum( df$Value > 0, 3, fill = NA )
# Item Date Value new
#1 A 2018-1-1 0.5852699 NA
#2 A 2018-2-1 -0.7383377 1
#3 A 2018-3-1 -0.3157693 1
#4 A 2018-4-1 1.2475237 1
#5 A 2018-5-1 -1.5479757 1
#6 B 2018-1-1 -0.6913331 0
#7 B 2018-2-1 -0.2423809 0
#8 B 2018-3-1 -1.6363024 0
#9 B 2018-4-1 -0.3256263 1
#10 B 2018-5-1 0.3563144 NA
You have an option of the "window-position". Have a closer look at argument align = c("center", "left", "right").
So as a dplyr chain:
df %>% group_by(Item) %>% dplyr::mutate( new = zoo::rollsum( Value > 0, 3, fill = NA ))
You could use the RcppRoll package.
require(RcppRoll)
df$new <- df$new <- RcppRoll::roll_sum(df$Value > 0, 3, fill = NA)
Using Tidyverse:
df %>%
group_by(Item) %>%
dplyr::mutate(new = RcppRoll::roll_sum(Value > 0, 3, fill = NA))
Speedwise this is faster than the zoo Package:
n <- 10000
df <- data.frame(
Item = sample(LETTERS, n, replace = TRUE),
Value = rnorm(n))
df_grouped <- df %>%
group_by(Item)
microbenchmark::microbenchmark(
RcppRoll = df_grouped <- df_grouped %>% dplyr::mutate(new_RcppRoll = RcppRoll::roll_sum(Value > 0, 3, fill = NA)),
zoo = df_grouped <- df_grouped %>% dplyr::mutate(new_zoo = zoo::rollsum( Value > 0, 3, fill = NA ))
)
Results in:
Unit: milliseconds
expr min lq mean median uq max neval
RcppRoll 2.509003 2.741993 2.929227 2.83913 2.983726 5.832962 100
zoo 11.172920 11.785113 13.288970 12.43320 13.607826 25.879754 100
And
all.equal(df_grouped$new_RcppRoll, df_grouped$new_zoo)
TRUE
Item Date Value
<fct> <date> <int>
1 A 2018-01-01 3
2 B 2018-01-01 2
3 B 2018-02-01 -5
4 A 2018-02-01 -3
5 A 2018-03-01 4
6 B 2018-03-01 -2
7 A 2018-04-01 5
8 B 2018-04-01 0
9 A 2018-05-01 1
10 B 2018-05-01 -4
Changed rnorm example for clarity, used sample(-5:5):
> df <- df %>% mutate(greater_than = (Value>0)*Value) %>%
group_by(Item) %>% arrange(Date) %>% mutate(greater_than =
zoo::rollapplyr(greater_than, 3, sum, partial = T))
df %>% arrange(Item) %>% head(10)
Should look like this:
1 A 2018-01-01 3 3
2 A 2018-02-01 -3 3
3 A 2018-03-01 4 7
4 A 2018-04-01 5 9
5 A 2018-05-01 1 10
6 B 2018-01-01 2 2
7 B 2018-02-01 -5 2
8 B 2018-03-01 -2 2
9 B 2018-04-01 0 0
10 B 2018-05-01 -4 0

Replace column value in a data frame based on other columns

I have the following data frame ordered by name and time.
set.seed(100)
df <- data.frame('name' = c(rep('x', 6), rep('y', 4)),
'time' = c(rep(1, 2), rep(2, 3), 3, 1, 2, 3, 4),
'score' = c(0, sample(1:10, 3), 0, sample(1:10, 2), 0, sample(1:10, 2))
)
> df
name time score
1 x 1 0
2 x 1 4
3 x 2 3
4 x 2 5
5 x 2 0
6 x 3 1
7 y 1 5
8 y 2 0
9 y 3 5
10 y 4 8
In df$score there are zeros followed by an unknown number of actual values, i.e. df[1:4,], and sometimes there are overlapping df$name between two df$score == 0, i.e. df[6:7,].
I want to change df$time where df$score != 0. Specifically, I want to assign the time value of the closest upper row with df$score == 0 if df$name is matching.
The following code gives the good output but my data have millions of rows so this solution is very inefficient.
score_0 <- append(which(df$score == 0), dim(df)[1] + 1)
for(i in 1:(length(score_0) - 1)) {
df$time[score_0[i]:(score_0[i + 1] - 1)] <-
ifelse(df$name[score_0[i]:(score_0[i + 1] - 1)] == df$name[score_0[i]],
df$time[score_0[i]],
df$time[score_0[i]:(score_0[i + 1] - 1)])
}
> df
name time score
1 x 1 0
2 x 1 4
3 x 1 3
4 x 1 5
5 x 2 0
6 x 2 1
7 y 1 5
8 y 2 0
9 y 2 5
10 y 2 8
Where score_0 gives the index where df$score == 0. We see that df$time[2:4] are now all equal to 1, that in df$time[6:7] only the first one changed because the second have df$name == 'y' and the closest upper row with df$score == 0 has df$name == 'x'. The last two rows also have changed correctly.
You can do it like this:
library(dplyr)
df %>% group_by(name) %>% mutate(ID=cumsum(score==0)) %>%
group_by(name,ID) %>% mutate(time = head(time,1)) %>%
ungroup() %>% select(name,time,score) %>% as.data.frame()
# name time score
# 1 x 1 0
# 2 x 1 8
# 3 x 1 10
# 4 x 1 6
# 5 x 2 0
# 6 x 2 5
# 7 y 1 4
# 8 y 2 0
# 9 y 2 5
# 10 y 2 9
Solution using dplyr and data.table:
library(data.table)
library(dplyr)
df %>%
mutate(
chck = score == 0,
chck_rl = ifelse(score == 0, lead(rleid(chck)), rleid(chck))) %>%
group_by(name, chck_rl) %>% mutate(time = first(time)) %>%
ungroup() %>%
select(-chck_rl, -chck)
Output:
# A tibble: 10 x 3
name time score
<chr> <dbl> <int>
1 x 1 0
2 x 1 2
3 x 1 9
4 x 1 7
5 x 2 0
6 x 2 1
7 y 1 8
8 y 2 0
9 y 2 2
10 y 2 3
Solution only using data.table:
library(data.table)
setDT(df)[, chck_rl := ifelse(score == 0, shift(rleid(score == 0), type = "lead"),
rleid(score == 0))][, time := first(time), by = .(name, chck_rl)][, chck_rl := NULL]
Output:
name time score
1: x 1 0
2: x 1 2
3: x 1 9
4: x 1 7
5: x 2 0
6: x 2 1
7: y 1 8
8: y 2 0
9: y 2 2
10: y 2 3

Calculate the number of occurrences of a specific event in the past AND future with groupings

this question is a modification of a problem I posted here where I have occurrences of a specific type on different days, but this time they are assigned to multiple users, for example:
df = data.frame(user_id = c(rep(1:2, each=5)),
cancelled_order = c(rep(c(0,1,1,0,0), 2)),
order_date = as.Date(c('2015-01-28', '2015-01-31', '2015-02-08', '2015-02-23', '2015-03-23',
'2015-01-25', '2015-01-28', '2015-02-06', '2015-02-21', '2015-03-26')))
user_id cancelled_order order_date
1 0 2015-01-28
1 1 2015-01-31
1 1 2015-02-08
1 0 2015-02-23
1 0 2015-03-23
2 0 2015-01-25
2 1 2015-01-28
2 1 2015-02-06
2 0 2015-02-21
2 0 2015-03-26
I'd like to calculate
1) the number of cancelled orders that each customer is going to have in the next x days (e.g. 7, 14), excluding the current one and
1) the number of cancelled orders that each customer had in the past x days (e.g. 7, 14) , excluding the current one.
The desired output would look like this:
solution
user_id cancelled_order order_date plus14 minus14
1 0 2015-01-28 2 0
1 1 2015-01-31 1 0
1 1 2015-02-08 0 1
1 0 2015-02-23 0 0
1 0 2015-03-23 0 0
2 0 2015-01-25 2 0
2 1 2015-01-28 1 0
2 1 2015-02-06 0 1
2 0 2015-02-21 0 0
2 0 2015-03-26 0 0
The solution that is perfectly fit for this purpose was presented by #joel.wilson using data.table
library(data.table)
vec <- c(14, 30) # Specify desired ranges
setDT(df)[, paste0("x", vec) :=
lapply(vec, function(i) sum(df$cancelled_order[between(df$order_date,
order_date,
order_date + i, # this part can be changed to reflect the past date ranges
incbounds = FALSE)])),
by = order_date]
However, it does not take into account grouping by user_id. When I tried to modify the formula by adding this grouping as by = c("user_id", "order_date") or by = list(user_id, order_date), it did not work. It seems it is something very basic, any hints on how to get around this detail?
Also, keep in mind that I'm after a solution that works, even if it is not based on the above code or data.table at all!
Thanks!
Here's one way:
library(data.table)
orderDT = with(df, data.table(id = user_id, completed = !cancelled_order, d = order_date))
vec = list(minus = 14L, plus = 14L)
orderDT[, c("dplus", "dminus") := .(
orderDT[!(completed)][orderDT[, .(id, d_plus = d + vec$plus, d_tom = d + 1L)], on=.(id, d <= d_plus, d >= d_tom), .N, by=.EACHI]$N
,
orderDT[!(completed)][orderDT[, .(id, d_minus = d - vec$minus, d_yest = d - 1L)], on=.(id, d >= d_minus, d <= d_yest), .N, by=.EACHI]$N
)]
id completed d dplus dminus
1: 1 TRUE 2015-01-28 2 0
2: 1 FALSE 2015-01-31 1 0
3: 1 FALSE 2015-02-08 0 1
4: 1 TRUE 2015-02-23 0 0
5: 1 TRUE 2015-03-23 0 0
6: 2 TRUE 2015-01-25 2 0
7: 2 FALSE 2015-01-28 1 0
8: 2 FALSE 2015-02-06 0 1
9: 2 TRUE 2015-02-21 0 0
10: 2 TRUE 2015-03-26 0 0
(I found OP's column names cumbersome and so shortened them.)
How it works
Each of the columns can be run on its own, like
orderDT[!(completed)][orderDT[, .(id, d_plus = d + vec$plus, d_tom = d + 1L)], on=.(id, d <= d_plus, d >= d_tom), .N, by=.EACHI]$N
And this can be broken down into steps by simplifying:
orderDT[!(completed)][
orderDT[, .(id, d_plus = d + vec$plus, d_tom = d + 1L)],
on=.(id, d <= d_plus, d >= d_tom),
.N,
by=.EACHI]$N
# original version
orderDT[!(completed)][
orderDT[, .(id, d_plus = d + vec$plus, d_tom = d + 1L)],
on=.(id, d <= d_plus, d >= d_tom),
.N,
by=.EACHI]
# don't extract the N column of counts
orderDT[!(completed)][
orderDT[, .(id, d_plus = d + vec$plus, d_tom = d + 1L)],
on=.(id, d <= d_plus, d >= d_tom)]
# don't create the N column of counts
orderDT[!(completed)]
# don't do the join
orderDT[, .(id, d_plus = d + vec$plus, d_tom = d + 1L)]
# see the second table used in the join
This uses a "non-equi" join, taking inequalities to define the date ranges. For more details, see the documentation page found by typing ?data.table.
I might have made this solution a bit complex:
library(dplyr)
library(tidyr)
vec <- c(7,14)
reslist <- lapply(vec, function(x){
df %>% merge(df %>% rename(cancelled_order2 = cancelled_order, order_date2 = order_date)) %>%
filter(abs(order_date-order_date2)<=x) %>%
group_by(user_id, order_date) %>% arrange(order_date2) %>% mutate(cumcancel = cumsum(cancelled_order2)) %>%
mutate(before = cumcancel - cancelled_order2,
after = max(cumcancel) - cumcancel) %>%
filter(order_date == order_date2) %>%
select(user_id, cancelled_order, order_date, before, after) %>%
mutate(within = x)})
do.call(rbind, reslist) %>% gather(key, value, -user_id, -cancelled_order, -order_date, -within) %>%
mutate(col = paste0(key,"_",within)) %>% select(-within, - key) %>% spread(col, value) %>% arrange(user_id, order_date)
PS:
I did spot a mistake in your output example (user_id 1, order_date 2015-02-23 ,minus14 should be 0, since there are 15 days between 02/08 and 02/23)
I recommend to use runner package. There is a function runner which executes any R function within sliding window.
To obtain sum from current 7-days window and 14-days window excluding current element one can use sum(x[length(x)]) for each window.
library(runner)
df %>%
group_by(user_id) %>%
mutate(
minus_7 = runner(cancelled_order, k = 7, idx = order_date,
f = function(x) sum(x[length(x)])),
minus_14 = runner(cancelled_order, k = 14, idx = order_date,
f = function(x) sum(x[length(x)])))
# A tibble: 10 x 5
# Groups: user_id [2]
user_id cancelled_order order_date minus_7 minus_14
<int> <dbl> <date> <dbl> <dbl>
1 1 0 2015-01-28 0 0
2 1 1 2015-01-31 1 1
3 1 1 2015-02-08 1 1
4 1 0 2015-02-23 0 0
5 1 0 2015-03-23 0 0
6 2 0 2015-01-25 0 0
7 2 1 2015-01-28 1 1
8 2 1 2015-02-06 1 1
9 2 0 2015-02-21 0 0
10 2 0 2015-03-26 0 0
For future elements it's bit tricky, because it's still 7-days window but lagged by -6 days (i:(i+6) = 7 days). Also in this case, first element of each window is excluded with sum(x[-1]).
df %>%
group_by(user_id) %>%
mutate(
plus_7 = runner(cancelled_order, k = 7, lag = -6, idx = order_date,
f = function(x) sum(x[-1])),
plus_14 = runner(cancelled_order, k = 14, lag = -13, idx = order_date,
f = function(x) sum(x[-1]))
)
# A tibble: 10 x 5
# Groups: user_id [2]
user_id cancelled_order order_date plus_7 plus_14
<int> <dbl> <date> <dbl> <dbl>
1 1 0 2015-01-28 1 2
2 1 1 2015-01-31 0 1
3 1 1 2015-02-08 0 0
4 1 0 2015-02-23 0 0
5 1 0 2015-03-23 0 0
6 2 0 2015-01-25 1 2
7 2 1 2015-01-28 0 1
8 2 1 2015-02-06 0 0
9 2 0 2015-02-21 0 0
10 2 0 2015-03-26 0 0
More information in package and function documentation.

Resources