I have two concurrent time series A and B, both containing events defined by start and end times - here is a sample:
A.df <- structure(list(A.eventid = 1:53,
A.start = structure(c(1563219814.52, 1563219852.37, 1563220313.16, 1563220472.66, 1563220704.35, 1563220879.51, 1563221108.24, 1563221158.33, 1563221387.43, 1563221400.7, 1563221602.34, 1563221828.33, 1563222165.52, 1563222314.2, 1563222557.28, 1563222669.44, 1563222905.52, 1563223091.62, 1563223237.19, 1563223273.64, 1563223580.14, 1563223908.66, 1563224093.27, 1563224497.41, 1563224554.64, 1563224705.57, 1563225011.55, 1563225192.59, 1563225305.14, 1563225414.38, 1563225432.21, 1563225898.61, 1563226034.51, 1563226110.18, 1563226206.49, 1563226528.13, 1563226570.18, 1563226788.53, 1563227026.21, 1563227502.2, 1563227709.3, 1563227832.51, 1563228127.44, 1563228188.4, 1563228293.59, 1563228558.39, 1563228680.32, 1563228819.44, 1563229208.51, 1563229282.14, 1563229528.52, 1563229959.21, 1563230268.65), class = c("POSIXct", "POSIXt")),
A.end = structure(c(1563219846.43, 1563220304.39, 1563220470.68, 1563220702.37, 1563220877.5, 1563221102.18, 1563221151.47, 1563221379.63, 1563221389.22, 1563221600.32, 1563221819.27, 1563222157.29, 1563222312.23, 1563222555.25, 1563222667.42, 1563222894.56, 1563223079.44, 1563223230.39, 1563223273.24, 1563223578.14, 1563223900.48, 1563224089.24, 1563224493.45, 1563224550.37, 1563224699.47, 1563225005.13, 1563225188.17, 1563225293.21, 1563225412.17, 1563225417.46, 1563225894.44, 1563226025.2, 1563226108.13, 1563226204.37, 1563226517.59, 1563226562.41, 1563226780.59, 1563227022.28, 1563227493.57, 1563227705.52, 1563227830.38, 1563228125.49, 1563228184.21, 1563228286.39, 1563228546.47, 1563228677.67, 1563228816.5, 1563229198.68, 1563229273.54, 1563229526.53, 1563229952.57, 1563230257.16, 1563230742.25), class = c("POSIXct", "POSIXt"))),
row.names = 1:53, class = "data.frame")
B.df <- structure(list(B.eventid = 1:52,
B.start = structure(c(1563221811.888, 1563222153.835, 1563222156.013, 1563222220.14, 1563222289.692, 1563222305.607, 1563222611.565, 1563222631.139, 1563222636.867, 1563222763.565, 1563222774.301, 1563222848.507, 1563222849.957, 1563222853.513, 1563223225.656, 1563223302.539, 1563223326.153, 1563223328.934, 1563223590.144, 1563223592.904, 1563224035.038, 1563224692.704, 1563226451.642, 1563226454.731, 1563226819.701, 1563226824.685, 1563227278.677, 1563227770.247, 1563227773.907, 1563227800.529, 1563227804.663, 1563227809.749, 1563227813.237, 1563227819.043, 1563227829.781, 1563227973.727, 1563229396.472, 1563229454.515, 1563229473.079, 1563229488.669, 1563229521.413, 1563229542.954, 1563229553.595, 1563229565.988, 1563229569.095, 1563229618.857, 1563229791.585, 1563229936.355, 1563230339.141, 1563230734.677, 1563231667.173, 1563231978.567), class = c("POSIXct", "POSIXt")),
B.end = structure(c(1563221815.058, 1563222154.295, 1563222158.633, 1563222222.07, 1563222289.872, 1563222308.617, 1563222614.265, 1563222633.509, 1563222640.367, 1563222769.045, 1563222774.801, 1563222848.677, 1563222850.237, 1563222856.103, 1563223226.166, 1563223305.339, 1563223328.763, 1563223333.234, 1563223591.454, 1563223593.084, 1563224043.618, 1563224695.234, 1563226454.622, 1563226456.771, 1563226822.551, 1563226827.225, 1563227282.067, 1563227771.787, 1563227774.477, 1563227802.199, 1563227806.653, 1563227811.569, 1563227817.897, 1563227823.643, 1563227830.351, 1563227978.177, 1563229401.282, 1563229457.905, 1563229478.359, 1563229492.439, 1563229527.723, 1563229545.694, 1563229558.975, 1563229568.658, 1563229571.255, 1563229621.117, 1563229792.055, 1563229952.055, 1563230344.351, 1563230739.647, 1563231672.983, 1563231979.987), class = c("POSIXct", "POSIXt"))),
row.names = 1:52, class = "data.frame")
Events in series A are longer, while events in B are shorter.
I've drawn a schematic to help explain:
For each A event during which ≥ 4 B events occur, I'd like to compare (also shown on the schematic):
X = the mean interval between B events occurring during the A event
with
Y = the interval between the last B event occuring during the A event, and the first B event occurring after the A event
My issues are with the calculation of X and Y.
To calculate X, I tried using foverlaps to group B events by the A events in which they occur. But, this excludes B events occurring within gaps between A events.
Also, my attempts to calculate the mean intervals between grouped B events using mutate and lag failed, as I couldn't restrict lag to working only within the groups (i.e. it calculated intervals between groups as well).
Finally, I'm not sure how to efficiently identify the start/end of the Y interval to calculate its duration.
I was thinking my R/coding was improving, but this has me floundering a bit - any help would be very much appreciated!
Assuming your B-events are in chronological order, do not overlap eachother and only fall within a maximum of 1 A.event...
Explanation and in-between-output are commented in code below.
I could not verify the output, since you provided no desired/expected output in your question. Results look plausible to me on first glance..
library(data.table)
setDT(A.df); setDT(B.df)
#get time to next B
B.df[, time.to.next.B := shift(B.start, type = "lead") - B.end ][]
#get A-event that the B-events falls into
B.df[ A.df,
A.eventid := i.A.eventid,
on = .(B.start >= A.start, B.end <= A.end )][]
# B.eventid B.start B.end time.to.next.B A.eventid
# 1: 1 2019-07-15 22:16:51 2019-07-15 22:16:55 338.777 secs 11
# 2: 2 2019-07-15 22:22:33 2019-07-15 22:22:34 1.718 secs 12
# 3: 3 2019-07-15 22:22:36 2019-07-15 22:22:38 61.507 secs NA
# 4: 4 2019-07-15 22:23:40 2019-07-15 22:23:42 67.622 secs 13
# 5: 5 2019-07-15 22:24:49 2019-07-15 22:24:49 15.735 secs 13
# 6: 6 2019-07-15 22:25:05 2019-07-15 22:25:08 302.948 secs 13
# ...
#summarise by A.eventid, get number of B-events, and B.eventid of last B-event
#only get A-eventis's with 4 or more B-events
ans <- B.df[ !is.na( A.eventid),
.( B.events = .N,
last.B.eventid = max( B.eventid ),
next.B.eventid = max( B.eventid ) + 1,
mean.B.interval.within.A = mean( time.to.next.B[ B.eventid != max( B.eventid ) ] ) ),
by = .(A.eventid) ][ B.events >= 4, ]
# A.eventid B.events last.B.eventid next.B.eventid mean.B.interval.within.A
# 1: 16 5 14 15 20.879500 secs
# 2: 41 8 35 36 6.097714 secs
# 3: 50 4 40 41 26.239000 secs
# 4: 51 7 48 49 62.953500 secs
#now find the needed intervals using an update joins
ans[ B.df, start_time := i.B.end, on = .(last.B.eventid = B.eventid)]
ans[ B.df, end_time := i.B.start, on = .(next.B.eventid = B.eventid)]
# A.eventid B.events last.B.eventid next.B.eventid mean.B.interval.within.A start_time end_time
# 1: 16 5 14 15 20.879500 secs 2019-07-15 22:34:16 2019-07-15 22:40:25
# 2: 41 8 35 36 6.097714 secs 2019-07-15 23:57:10 2019-07-15 23:59:33
# 3: 50 4 40 41 26.239000 secs 2019-07-16 00:24:52 2019-07-16 00:25:21
# 4: 51 7 48 49 62.953500 secs 2019-07-16 00:32:32 2019-07-16 00:38:59
X <- ans$mean.B.interval.within.A
# Time differences in secs
# [1] 20.879500 6.097714 26.239000 62.953500
Y <- ans$end_time - ans$start_time
# Time differences in secs
# [1] 369.553 143.376 28.974 387.086
I tried to come up with a possible solution, minus the part of the average calculation, which should be obvious. First I renamed the column names, which makes it easier to join the data sets:
A.df = A.df %>%
rename_all(funs(str_replace(., "A.", ""))) %>%
mutate(type="A")
B.df = B.df %>%
rename_all(funs(str_replace(., "B.", ""))) %>%
mutate(type="B")
Then the overall data, sorted by time, is:
data = bind_rows(A.df, B.df) %>%
arrange(start)
Now I add a column showing the time stamp of the last start of an A event. Forward filling this value will show for each event the time of the last A event.
data = data %>%
mutate(last.A.start=ifelse(type=='A', start, NA)) %>%
tidyr::fill(last.A.start)
Finally, the A events can be removed. As long as the last.A.start is the same, the B events belong to the same A event. Based on these information x and y can be calculated.
data = data %>%
filter(type == "B") %>%
mutate(
duration=end-start, # Not needed.
delta=start - lag(end),
sameA=(last.A.start == lag(last.A.start)),
x=ifelse(sameA, delta, NA),
y=ifelse(sameA, NA, delta)
)
Does this help?
Bests, M
Related
Does anyone have a solution to perform
separate operations on
groups of consecutive values that are a
subset of a time series and are
identified by a reoccurring, identical flag
with R ?
In the example data set created by the code below, this would refer for example to calculating the mean of “value” separately for each group where “flag” == 1 on consecutive days.
A typical case in science would be a data set recorded by an instrument that repeatedly executes a calibration procedure and flags the corresponding data with the same flag, but the user needs to evaluate each calibration separately with the same procedure.
Thanks for your suggestions. Jens
library(lubridate)
df <- data.frame(
date = seq(ymd("2018-01-01"), ymd("2018-06-29"), by = "days"),
flag = rep( c(rep(1,10), rep(0, 20)), 6),
value = seq(1,180,1)
)
The data.table function rleid is great for giving group IDs to runs of consecutive values. I continue to use data.table, but you could everything but the rleid part just as well in dplyr or base.
My answer comes down to use data.table::rleid and then pick your favorite way to take the mean by group (R-FAQ link).
library(data.table)
setDT(df)
df[, r_id := rleid(flag)]
df[flag == 1, list(
min_date = min(date),
max_date = max(date),
mean_value = mean(value)
), by = r_id]
# r_id min_date max_date mean_value
# 1: 1 2018-01-01 2018-01-10 5.5
# 2: 3 2018-01-31 2018-02-09 35.5
# 3: 5 2018-03-02 2018-03-11 65.5
# 4: 7 2018-04-01 2018-04-10 95.5
# 5: 9 2018-05-01 2018-05-10 125.5
# 6: 11 2018-05-31 2018-06-09 155.5
I'm calculating price differences between trades that have a specific time difference (say 60 seconds). I need this to be done with several assets and several trades. However, I could not figure a way to do this without an eternal for-loop.
Let's create some random prices:
library(birk)
library(tictoc)
library(dplyr)
initial.date <- as.POSIXct('2018-10-27 10:00:00',tz='GMT')
last.date <- as.POSIXct('2018-10-28 17:00:00',tz='GMT')
PriorityDateTime=seq.POSIXt(from=initial.date,to = last.date,by = '30 sec')
TradePrice=seq(from=1, to=length(PriorityDateTime),by = 1)
ndf<- data.frame(PriorityDateTime,TradePrice)
ndf$InstrumentSymbol <- rep_len(x = c('asset1','asset2'),length.out = length(ndf$PriorityDateTime))
ndf$id <- seq(1:length(x = ndf$InstrumentSymbol))
My main function is the following:
For each trade (at the TradePrice column) I need to find closest trade that falls in the 60-second interval.
calc.spread <- function(df,c=60){
n<-length(df$PriorityDateTime)
difft <- dspread <- spread <- rep(0,n)
TimeF <- as.POSIXct(NA)
for (k in 1:n){
diffs <- as.POSIXct(df$PriorityDateTime) - as.POSIXct(df$PriorityDateTime[k])
idx <- which.closest(diffs,x=c)
TimeF[k]<- as.POSIXct(df$PriorityDateTime[idx])
difft[k] <- difftime(time1 = TimeF[k],time2 = df$PriorityDateTime[k], units = 'sec')
dspread[k] <- abs(df$TradePrice[k] - df$TradePrice[idx])
spread[k] <- 2*abs(log(df$TradePrice[k]) - log(df$TradePrice[idx]))
}
df <- data.frame(spread,dspread,difft,TimeF,PriorityDateTime=df$PriorityDateTime,id=df$id)
}
The function which.closest is just a wrapper for which.min(abs(vec - x)). As I have a data frame with multiple assets, I run:
c=60
spreads <- ndf %>% group_by(InstrumentSymbol) %>% do(calc.spread(.,c=c))
The problem is that I need to run this for 3-million row data frames. I have searched on the forum but couldn't find a way to run this code faster. Ddply is a little bit slower than using dplyr.
Is there any suggestion?
Being quite unsatisfied by my own previous answer, I asked here for help and turns out there is at least one way in data.table which is clearly faster. Also made a dplyr-related question here
s <- Sys.time()
initial.date <- as.POSIXct('2018-10-27 10:00:00',tz='GMT')
last.date <- as.POSIXct('2018-12-28 17:00:00',tz='GMT')
PriorityDateTime=seq.POSIXt(from=initial.date,to = last.date,by = '30 sec');length(PriorityDateTime)
TradePrice=seq(from=1, to=length(PriorityDateTime),by = 1)
ndf<- data.frame(PriorityDateTime,TradePrice)
ndf$InstrumentSymbol <- rep_len(x = c('asset1','asset2'),length.out = length(ndf$PriorityDateTime))
ndf$id <- seq(1:length(x = ndf$InstrumentSymbol))
ndf$datetime <- ymd_hms(ndf$PriorityDateTime)
res <- ndf %>% data.table()
res2 <- setDT(res)
res2 <- res2[, `:=` (min_60 = datetime - 60, plus_60 = datetime + 60, idx = .I)][
res2, on = .(InstrumentSymbol = InstrumentSymbol, datetime >= min_60, datetime <= plus_60), allow.cartesian = TRUE][
idx != i.idx, .SD[which.min(abs(i.TradePrice - TradePrice))], by = id][
, .(id, minpricewithin60 = i.TradePrice, index.minpricewithin60 = i.idx)][
res, on = .(id)][, `:=` (min_60 = NULL, plus_60 = NULL, idx = NULL)]
res2[]
e <- Sys.time()
> e-s
Time difference of 1.23701 mins
You can then apply your calc.spread function directly to the minpricewithin60 column.
You might have made a mistake in the sense that you are not looking for the minimum difference within 60 secs difference as described, but instead you are looking for a trade which took place as close as possible to 60secs in past or future:
idx <- which.closest(diffs,x=c)
Using this a trade which took place 1 sec ago would be discarded for a trade that happened closer to 60 secs away, I don't think that this is what you want. You probably want the lowest price difference for all trades within 60 secs which can be done by:
res$idx[i] <<- which.min(pricediff)[1]
See the code below:
library(lubridate)
library(dplyr)
ndf$datetime <- ymd_hms(ndf$PriorityDateTime)
res <- ndf %>% data.frame(stringsAsFactors = F)
res$dspread <- res$idx <- res$spread <- NA
sapply(1:nrow(res),function(i){
within60 <- abs(difftime(ndf$datetime[i],ndf$datetime,"secs"))<=60
samesymbol <- res$InstrumentSymbol[i]==res$InstrumentSymbol
isdifferenttrade <- 1:nrow(res)!=i
pricediff <- ifelse(within60&samesymbol&isdifferenttrade,abs(res$TradePrice[i]-res$TradePrice), Inf)
res$dspread[i] <<- min(pricediff)
res$idx[i] <<- which.min(pricediff)[1] #in case several elements have same price
res$spread[i] <<- 2*abs(log(res$TradePrice[i])-log(res$TradePrice[res$idx[i]]))
} )
head(res)
What I used was apply which is similar to (and can be even slower than) for loops. If this is any faster for your real data, it is because I did the operations in a way which needed less steps.
Let me know, otherwise you can try the same in a for loop, or we'd have to try with data.table which I am less familiar with. These are generally time consuming of course because you need to define conditions based on each row of data.
PriorityDateTime TradePrice InstrumentSymbol id datetime spread idx
1 2018-10-27 10:00:00 1 asset1 1 2018-10-27 10:00:00 2.1972246 3
2 2018-10-27 10:00:30 2 asset2 2 2018-10-27 10:00:30 1.3862944 4
3 2018-10-27 10:01:00 3 asset1 3 2018-10-27 10:01:00 2.1972246 1
4 2018-10-27 10:01:30 4 asset2 4 2018-10-27 10:01:30 1.3862944 2
5 2018-10-27 10:02:00 5 asset1 5 2018-10-27 10:02:00 1.0216512 3
6 2018-10-27 10:02:30 6 asset2 6 2018-10-27 10:02:30 0.8109302 4
dspread
1 2
2 2
3 2
4 2
5 2
6 2
I have a large file of time-series data, which looks as follows. The dataset covers years, in increments of 15 minutes. A small subset looks like:
uniqueid time
a 2014-04-30 23:30:00
a 2014-04-30 23:45:00
a 2014-05-01 00:00:00
a 2014-05-01 00:15:00
a 2014-05-12 13:45:00
a 2014-05-12 14:00:00
b 2014-05-12 13:45:00
b 2014-05-12 14:00:00
b 2014-05-12 14:30:00
To reproduce above:
time<-c("2014-04-30 23:30:00","2014-04-30 23:45:00","2014-05-01 00:00:00","2014-05-01 00:15:00",
"2014-05-12 13:45:00","2014-05-12 14:00:00","2014-05-12 13:45:00","2014-05-12 14:00:00",
"2014-05-12 14:30:00")
uniqueid<-c("a","a","a","a","a","a","b","b","b")
mydf<-data.frame(uniqueid,time)
My goal is to count the number of rows per unique id, per consecutive timeflow. A consecutive timespan is when a unique id is stamped for each 15 minutes in a row (such as id A, which is stamped from 30.04.14 23.30 hrs until 01.05.14 00.15 hrs - hence 4 rows), yet when this flow of 15-minute iterations is disrupted (after 01.05.14 00:15, it is not stamped at 01.05.14 00:30 hence it is disrupted), it should count the next timestamp as start of a new consecutive timeflow and again calculate the number of rows until this flow is disrupted again. Time is POSIX.
As you can see in above example; a consecutive timeflow may cover different days, different months, or different years. I have many unique ids (and as said, a very large file), so I'm looking for a way that my computer can handle (loops probably wouldn't work).
I am looking for output something like:
uniqueid flow number_rows
a 1 4
a 2 2
b 3 2
b 4 1
I have looked into some time packages (such as lubridate), but given my limited R knowledge, I don't even know where to begin.
I hope all is clear - if not, I'd be happy to try to clarify it further. Thank you very much in advance!
Another way to do this with data.table also using a time difference would be to make use of the data.table internal values for group number and number of rows in each group:
library(data.table)
res<-setDT(mydf)[, list(number_rows=.N,flow=.GRP),
by=.(uniqueid,cumsum(as.numeric(difftime(time,shift(time,1L,type="lag",fill=0))) - 15))][,cumsum:=NULL]
print(res)
uniqueid number_rows flow
1: a 4 1
2: a 2 2
3: b 2 3
4: b 1 4
Also since the sample data you posted didn't align with the subset you posted, I have included my data below:
Data
time<-as.POSIXct(c("2014-04-30 23:30:00","2014-04-30 23:45:00","2014-05-01 00:00:00","2014-05-01 00:15:00",
"2014-05-12 13:45:00","2014-05-12 14:00:00","2014-05-12 13:45:00","2014-05-12 14:00:00",
"2014-05-12 14:30:00"))
uniqueid<-c("a","a","a","a","a","a","b","b","b")
mydf<-data.frame(uniqueid,time)
You can groupby the uniqueid and the cumulative sum of the difference of time between rows which is not equal to 15 min and that gives the flow id and then a count of rows should give you what you need:
A justification of the logic is whenever the time difference is not equal to 15 within each uniqueid, a new flow process should be generated so we label it as TRUE and combine that with the cumsum, it becomes a new flow id with the following consecutive rows:
library(dplyr)
mydf$time <- as.POSIXct(mydf$time, "%Y-%m-%d %H:%M:%S")
# convert the time column to POSIXct class so that we can apply the diff function correctly
mydf %>% group_by(uniqueid, flow = 1 + cumsum(c(F, diff(time) != 15))) %>%
summarize(num_rows = n())
# Source: local data frame [4 x 3]
# Groups: uniqueid [?]
#
# uniqueid flow num_rows
# <fctr> <dbl> <int>
# 1 a 1 4
# 2 a 2 2
# 3 b 3 2
# 4 b 4 1
Base R is pretty fast. Using crude benchmarking, I found it finished in half the time of DT, and I got tired of waiting for dplyr.
# estimated size of data, years x days x hours x 15mins x uniqueids
5*365*24*4*1000 # = approx 180M
# make data with posixct and characters of 180M rows, mydf is approx 2.5GB in memory
time<-rep(as.POSIXct(c("2014-04-30 23:30:00","2014-04-30 23:45:00","2014-05-01 00:00:00","2014-05-01 00:15:00",
"2014-05-12 13:45:00","2014-05-12 14:00:00","2014-05-12 13:45:00","2014-05-12 14:00:00",
"2014-05-12 14:30:00")),times = 20000000)
uniqueid<-rep(as.character(c("a","a","a","a","a","a","b","b","b")),times = 20000000)
mydf<-data.frame(uniqueid,time = time)
rm(time,uniqueid);gc()
Base R:
# assumes that uniqueid's are in groups and in order, and there won't be a followed by b that have the 15 minute "flow"
starttime <- Sys.time()
# find failed flows
mydf$diff <- c(0,diff(mydf$time))
mydf$flowstop <- mydf$diff != 15
# give each flow an id
mydf$flowid <- cumsum(mydf$flowstop)
# clean up vars
mydf$time <- mydf$diff <- mydf$flowstop <- NULL
# find flow length
mydfrle <- rle(mydf$flowid)
# get uniqueid/flowid pairs (unique() is too slow)
mydf <- mydf[!duplicated(mydf$flowid), ]
# append rle and remove separate var
mydf$number_rows <- mydfrle$lengths
rm(mydfrle)
print(Sys.time()-starttime)
# Time difference of 30.39437 secs
data.table:
library(data.table)
starttime <- Sys.time()
res<-setDT(mydf)[, list(number_rows=.N,flow=.GRP),
by=.(uniqueid,cumsum(as.numeric(difftime(time,shift(time,1L,type="lag",fill=0))) - 15))][,cumsum:=NULL]
print(Sys.time()-starttime)
# Time difference of 57.08156 secs
dplyr:
library(dplyr)
# convert the time column to POSIXct class so that we can apply the diff function correctly
starttime <- Sys.time()
mydf %>% group_by(uniqueid, flow = 1 + cumsum(c(F, diff(time) != 15))) %>%
summarize(num_rows = n())
print(Sys.time()-starttime)
# too long, did not finish after a few minutes
I think the assumption of uniqueid's and times being in order is huge, and the other solutions might be able to take advantage of that better. order() is easy enough to do.
I'm not sure about the impact of memory, or of the impact of different data sets that aren't so simple. It should be easy enough to break it into chunks and process if memory is an issue. It takes more code in Base R for sure.
Having both ordered "id" and "time" columns, we could build a single group to operate on by creating a logical vector of indices wherever either "id" changes or "time" is > 15 minutes.
With:
id = as.character(mydf$uniqueid)
tm = mydf$time
find where "id":
id_gr = c(TRUE, id[-1] != id[-length(id)])
and "time":
tm_gr = c(0, difftime(tm[-1], tm[-length(tm)], unit = "mins")) > 15
change and combine them in:
gr = id_gr | tm_gr
which shows wherever either "id" changed or "time" > 15.
And to get the result:
tab = tabulate(cumsum(gr)) ## basically, the only operation per group -- 'n by group'
data.frame(id = id[gr], flow = seq_along(tab), n = tab)
# id flow n
#1 a 1 4
#2 a 2 2
#3 b 3 2
#4 b 4 1
On a larger scale:
set.seed(1821); nid = 1e4
dat = replicate(nid, as.POSIXct("2016-07-07 12:00:00 EEST") +
cumsum(sample(c(1, 5, 10, 15, 20, 30, 45, 60, 90, 120, 150, 200, 250, 300), sample(5e2:1e3, 1), TRUE)*60),
simplify = FALSE)
names(dat) = make.unique(rep_len(letters, nid))
dat = data.frame(id = rep(names(dat), lengths(dat)), time = do.call(c, dat))
system.time({
id = as.character(dat$id); tm = dat$time
id_gr = c(TRUE, id[-1] != id[-length(id)])
tm_gr = c(0, difftime(tm[-1], tm[-length(tm)], unit = "mins")) > 15
gr = id_gr | tm_gr
tab = tabulate(cumsum(gr))
ans1 = data.frame(id = id[gr], flow = seq_along(tab), n = tab)
})
# user system elapsed
# 1.44 0.19 1.66
For comparison, included MikeyMike's answer:
library(data.table)
dat2 = copy(dat)
system.time({
ans2 = setDT(dat2)[, list(flow = .GRP, n = .N),
by = .(id, cumsum(as.numeric(difftime(time,
shift(time, 1L, type = "lag", fill = 0),
unit = "mins")) > 15))][, cumsum := NULL]
})
# user system elapsed
# 3.95 0.22 4.26
identical(as.data.table(ans1), ans2)
#[1] TRUE
I have a dataframe of time stamps which specify a categorical status. The status is valid until the next time stamp, at which time the category might change.
I'd like to be able to determine percentage of time spent in each category over regular time periods, like monthly, quarterly, or annually.
This seems like a common enough problem, but I've been unable to find an elegant solution or library to solve it.
For example, with the following sample dataframe:
date status
2016-02-20 09:11:00 a
2016-03-06 02:38:00 c
2016-03-10 15:20:00 b
2016-03-10 21:20:00 a
2016-03-11 11:51:00 b
2016-03-12 01:19:00 c
2016-03-22 14:39:00 c
2016-03-23 11:37:00 b
2016-03-25 17:38:00 c
2016-03-26 01:24:00 c
2016-03-26 12:40:00 a
2016-04-12 10:28:00 c
... I might want to report weekly from 3/1-3/7, 3/8-3/14, 3/15-3/21, the percent time in each week of 'a', 'b', and 'c' status.
I started brute force coding a solution to this (it's ugly...), when I decided maybe I should ask here whether there's a more elegant way to do it.
======== Edited to add an inelegant brute-force solution below ========
time_analysis <- function(df, starttime, endtime) {
# - assumes sorted by date
startindex <- sum(df$date <= starttime) # find the index of the entry which contains the start time
endindex <- sum(df$date <= endtime) + 1 # find the index of the entry which contains the end time
if ( (startindex == 0) || (endindex > nrow(df) ) ) {
print("Date outside of available data")
return(NULL)
}
df2 <- df[ startindex:endindex, ] # subset the dataframe to include the range, but still need to trim ends
df2$date[1] <- starttime # trim to the start time
df2$date[nrow(df2)] <- endtime # trim back the end time
df2$status[nrow(df2)] <- df2$status[nrow(df2)-1] # status hasn't changed yet, so still the previous status
duration <- diff(df2$date) # vector of the time within each segment, 1 fewer elements than the dataframe
units(duration) <- 'days'
duration <- as.numeric(duration) # need to convert to numeric, or else can't divide by total duration
df2 <- df2[ -nrow(df2), ] # remove the last row, to make length same as the duration vector
df2$duration <- duration # add the duration column
total <- sum(df2$duration) # to allow calculations within the ddply
return(ddply(df2[, c('status','duration')], 'status', function(x) { # calculate by each status category
return( c(
date = starttime,
totaldays = round(sum(x$duration), 2),
fraction = round(sum(x$duration) / total, 3)) )
} ))
}
And below would be a sample use, that would split the reporting into roughly 2-week chunks. I hate the use manual date coding and using a loop in R, but am too inexperienced to know a better way.
times <- c("2016-03-01","2016-03-15","2016-04-01","2016-04-15","2016-05-01","2016-05-15")
result <- data.frame()
for (i in 1:(length(times) - 1)) {
result <- rbind( result, time_analysis(d, times[i], times[i+1]) )
}
print(result, row.names = FALSE)
Yielding (other than some errors for dates out of range):
status date totaldays fraction
a 2016-03-01 5.71 0.409
b 2016-03-01 0.81 0.058
c 2016-03-01 7.43 0.532
a 2016-03-15 5.47 0.322
b 2016-03-15 2.25 0.132
c 2016-03-15 9.28 0.546
=====
And after posting, found a much nicer way to generate the times:
times <- as.character( seq( as.Date("2016-03-01"), as.Date("2016-05-15"), by = '2 weeks' ) )
Here's an approach that combines the cut.POSIXt() S3 specific with a nested data.table aggregation.
## define data
library(data.table);
dt <- data.table(date=as.POSIXct(c('2016-02-20 09:11:00','2016-03-06 02:38:00','2016-03-10 15:20:00','2016-03-10 21:20:00','2016-03-11 11:51:00','2016-03-12 01:19:00','2016-03-22 14:39:00','2016-03-23 11:37:00','2016-03-25 17:38:00','2016-03-26 01:24:00','2016-03-26 12:40:00','2016-04-12 10:28:00')),status=c('a','c','b','a','b','c','c','b','c','c','a','c'));
## solution
dt[,{ n1 <- .N; .SD[,.(pct=.N/n1*100),.(status)]; },.(month=cut(df$date,'month'))];
## month status pct
## 1: 2016-02-01 a 100
## 2: 2016-03-01 c 50
## 3: 2016-03-01 b 30
## 4: 2016-03-01 a 20
## 5: 2016-04-01 c 100
I don't often have to work with dates in R, but I imagine this is fairly easy. I have daily data as below for several years with some values and I want to get for each 8 days period the sum of related values.What is the best approach?
Any help you can provide will be greatly appreciated!
str(temp)
'data.frame':648 obs. of 2 variables:
$ Date : Factor w/ 648 levels "2001-03-24","2001-03-25",..: 1 2 3 4 5 6 7 8 9 10 ...
$ conv2: num -3.93 -6.44 -5.48 -6.09 -7.46 ...
head(temp)
Date amount
24/03/2001 -3.927020472
25/03/2001 -6.4427004
26/03/2001 -5.477592528
27/03/2001 -6.09462162
28/03/2001 -7.45666902
29/03/2001 -6.731540928
30/03/2001 -6.855206184
31/03/2001 -6.807210228
1/04/2001 -5.40278802
I tried to use aggregate function but for some reasons it doesn't work and it aggregates in wrong way:
z <- aggregate(amount ~ Date, timeSequence(from =as.Date("2001-03-24"),to =as.Date("2001-03-29"), by="day"),data=temp,FUN=sum)
I prefer the package xts for such manipulations.
I read your data, as zoo objects. see the flexibility of format option.
library(xts)
ts.dat <- read.zoo(text ='Date amount
24/03/2001 -3.927020472
25/03/2001 -6.4427004
26/03/2001 -5.477592528
27/03/2001 -6.09462162
28/03/2001 -7.45666902
29/03/2001 -6.731540928
30/03/2001 -6.855206184
31/03/2001 -6.807210228
1/04/2001 -5.40278802',header=TRUE,format = '%d/%m/%Y')
Then I extract the index of given period
ep <- endpoints(ts.dat,'days',k=8)
finally I apply my function to the time series at each index.
period.apply(x=ts.dat,ep,FUN=sum )
2001-03-29 2001-04-01
-36.13014 -19.06520
Use cut() in your aggregate() command.
Some sample data:
set.seed(1)
mydf <- data.frame(
DATE = seq(as.Date("2000/1/1"), by="day", length.out = 365),
VALS = runif(365, -5, 5))
Now, the aggregation. See ?cut.Date for details. You can specify the number of days you want in each group using cut:
output <- aggregate(VALS ~ cut(DATE, "8 days"), mydf, sum)
list(head(output), tail(output))
# [[1]]
# cut(DATE, "8 days") VALS
# 1 2000-01-01 8.242384
# 2 2000-01-09 -5.879011
# 3 2000-01-17 7.910816
# 4 2000-01-25 -6.592012
# 5 2000-02-02 2.127678
# 6 2000-02-10 6.236126
#
# [[2]]
# cut(DATE, "8 days") VALS
# 41 2000-11-16 17.8199285
# 42 2000-11-24 -0.3772209
# 43 2000-12-02 2.4406024
# 44 2000-12-10 -7.6894484
# 45 2000-12-18 7.5528077
# 46 2000-12-26 -3.5631950
rollapply. The zoo package has a rolling apply function which can also do non-rolling aggregations. First convert the temp data frame into zoo using read.zoo like this:
library(zoo)
zz <- read.zoo(temp)
and then its just:
rollapply(zz, 8, sum, by = 8)
Drop the by = 8 if you want a rolling total instead.
(Note that the two versions of temp in your question are not the same. They have different column headings and the Date columns are in different formats. I have assumed the str(temp) output version here. For the head(temp) version one would have to add a format = "%d/%m/%Y" argument to read.zoo.)
aggregate. Here is a solution that does not use any external packages. It uses aggregate based on the original data frame.
ix <- 8 * ((1:nrow(temp) - 1) %/% 8 + 1)
aggregate(temp[2], list(period = temp[ix, 1]), sum)
Note that ix looks like this:
> ix
[1] 8 8 8 8 8 8 8 8 16
so it groups the indices of the first 8 rows, the second 8 and so on.
Those are NOT Date classed variables. (No self-respecting program would display a date like that, not to mention the fact that these are labeled as factors.) [I later noticed these were not the same objects.] Furthermore, the timeSequence function (at least the one in the timeDate package) does not return a Date class vector either. So your expectation that there would be a "right way" for two disparate non-Date objects to be aligned in a sensible manner is ill-conceived. The irony is that just using the temp$Date column would have worked since :
> z <- aggregate(amount ~ Date, data=temp , FUN=sum)
> z
Date amount
1 1/04/2001 -5.402788
2 24/03/2001 -3.927020
3 25/03/2001 -6.442700
4 26/03/2001 -5.477593
5 27/03/2001 -6.094622
6 28/03/2001 -7.456669
7 29/03/2001 -6.731541
8 30/03/2001 -6.855206
9 31/03/2001 -6.807210
But to get it in 8 day intervals use cut.Date:
> z <- aggregate(temp$amount ,
list(Dts = cut(as.Date(temp$Date, format="%d/%m/%Y"),
breaks="8 day")), FUN=sum)
> z
Dts x
1 2001-03-24 -49.792561
2 2001-04-01 -5.402788
A more cleaner approach extended to #G. Grothendieck appraoch. Note: It does not take into account if the dates are continuous or discontinuous, sum is calculated based on the fixed width.
code
interval = 8 # your desired date interval. 2 days, 3 days or whatevea
enddate = interval-1 # this sets the enddate
nrows = nrow(z)
z <- aggregate(.~V1,data = df,sum) # aggregate sum of all duplicate dates
z$V1 <- as.Date(z$V1)
data.frame ( Start.date = (z[seq(1, nrows, interval),1]),
End.date = z[seq(1, nrows, interval)+enddate,1],
Total.sum = rollapply(z$V2, interval, sum, by = interval, partial = TRUE))
output
Start.date End.date Total.sum
1 2000-01-01 2000-01-08 9.1395926
2 2000-01-09 2000-01-16 15.0343960
3 2000-01-17 2000-01-24 4.0974712
4 2000-01-25 2000-02-01 4.1102645
5 2000-02-02 2000-02-09 -11.5816277
data
df <- data.frame(
V1 = seq(as.Date("2000/1/1"), by="day", length.out = 365),
V2 = runif(365, -5, 5))