Complex conditional groupby in R - r

Here is the problem I am trying to solve.
I want to take table 1 to table 2.
Table 1 :
df
# icustay_id starttime endtime vaso_rate vaso_amount
# 1 1 2019-09-10 13:20:00 2019-09-11 13:20:00 3 293.0896
# 2 1 2019-09-11 13:30:00 2019-09-12 01:20:00 9 602.9983
# 3 1 2019-09-14 16:40:00 2019-09-15 16:40:00 4 208.9360
# 4 2 2019-09-10 12:40:00 2019-09-13 13:20:00 2 864.1494
# 5 3 2019-09-10 01:20:00 2019-09-11 13:20:00 9 405.2939
Table 2 :
df
# icustay_id starttime endtime vaso_rate vaso_amount
# 1 1 2019-09-10 13:20:00 2019-09-12 01:20:00 3 293.0896
# 2 1 2019-09-14 16:40:00 2019-09-15 16:40:00 4 208.9360
# 3 2 2019-09-10 12:40:00 2019-09-13 13:20:00 2 864.1494
# 4 3 2019-09-10 01:20:00 2019-09-11 13:20:00 9 405.2939
As you notice :
I am trying to build a function that will :
For every single unique patient (unique icustay_id), groupby icustay_id ONLY if the medication has been stopped for less than an hour.
When the row merges :
Some columns will retain the same value (i.e. the patient identifiers)
Some columns must be modified :
Keep the earlier starttime
Keep the latter endttime
Average the vaso-rate
Sum the vaso-amount
To do so, I have decided to add another column identifier that takes the value 1 when the condition is met and when all the rows are verified, groupby (icustay_id and that new column)
My code as it is written however does not assign the appropriate ID in respect to the condition.
Here is the sample df creation code :
set.seed(1)
df <- data.frame(
icustay_id = c(1, 1, 1, 2, 3),
starttime = as.POSIXct(c("2019-09-10 13:20", "2019-09-11 13:30", "2019-09-14 16:40", "2019-09-10 12:40", "2019-09-10 01:20")),
endtime = as.POSIXct(c("2019-09-11 13:20", "2019-09-11 01:20", "2019-09-15 16:40", "2019-09-13 13:20", "2019-09-11 13:20")),
vaso_rate = sample(1:10, 5, replace = TRUE),
vaso_amount = runif(5, 0, 1000)
)
Here is the function code that I have right now :
merge_pressor_doses <- function(df){
df %>% arrange(icustay_id,starttime)
for (i in unique(df$icustay_id))
{
for (j in which(df$icustay_id==i))
{
start <- df$starttime[as.numeric(j)+1]
end <- df$endtime[as.numeric(j)]
stopduration <- as.numeric(difftime(start, end, units = 'mins'))
bool <- stopduration < 60
df <- df%>%mutate(
group = case_when(
bool = TRUE ~ 1,
bool = FALSE ~ 0)
)
}
}
return(df)
}
This should result in :
df
# icustay_id starttime endtime vaso_rate vaso_amount group
# 1 1 2019-09-10 13:20:00 2019-09-11 13:20:00 3 293.0896 1
# 2 1 2019-09-11 13:30:00 2019-09-12 01:20:00 9 602.9983 1
# 3 1 2019-09-14 16:40:00 2019-09-15 16:40:00 4 208.9360 0
# 4 2 2019-09-10 12:40:00 2019-09-13 13:20:00 2 864.1494 1
# 5 3 2019-09-10 01:20:00 2019-09-11 13:20:00 9 405.2939 1
But in my case the 3rd row is assign a value of 1...
If I can manage to make this portion of the code work, I could proceed with this portion of the code to achieve my objective.
The eventual second portion of the code would be :
group_by(group, icustay_id) %>%
summarise(
starttime = min(starttime),
endtime = max(endtime),
vaso_rate = mean(vaso_rate),
sum_vaso_amount = sum(vaso_amount))
Thank you in advance!!

I'd create a new column pause which says how much time passed since the last medication. Then using this column we assign groups ids to medications: cumsum(pause >= 1) - start with 0, then if pause is >=1 hours, it's a different group.
set.seed(1)
df <- data.frame(
icustay_id = c(1, 1, 1, 2, 3),
starttime = as.POSIXct(c("2019-09-10 13:20", "2019-09-11 13:30", "2019-09-14 16:40", "2019-09-10 12:40", "2019-09-10 01:20")),
endtime = as.POSIXct(c("2019-09-11 13:20", "2019-09-11 01:20", "2019-09-15 16:40", "2019-09-13 13:20", "2019-09-11 13:20")),
vaso_rate = sample(1:10, 5, replace = TRUE),
vaso_amount = runif(5, 0, 1000)
)
library(dplyr)
library(tidyr)
df <-
df %>%
group_by(icustay_id) %>%
mutate(pause = difftime(starttime, lag(endtime), units = "hours")) %>%
replace_na(list(pause = 0)) %>%
mutate(vaso_id = cumsum(pause >= 1))
# A tibble: 5 x 7
# Groups: icustay_id [3]
# icustay_id starttime endtime vaso_rate vaso_amount pause vaso_id
# <dbl> <dttm> <dttm> <int> <dbl> <drtn> <int>
# 1 1 2019-09-10 13:20:00 2019-09-11 13:20:00 9 898. 0.0000000 hours 0
# 2 1 2019-09-11 13:30:00 2019-09-11 01:20:00 4 945. 0.1666667 hours 0
# 3 1 2019-09-14 16:40:00 2019-09-15 16:40:00 7 661. 87.3333333 hours 1
# 4 2 2019-09-10 12:40:00 2019-09-13 13:20:00 1 629. 0.0000000 hours 0
# 5 3 2019-09-10 01:20:00 2019-09-11 13:20:00 2 61.8 0.0000000 hours 0
Then we can use the code you provided.
df %>%
group_by(icustay_id, vaso_id) %>%
summarise(
starttime = min(starttime),
endtime = max(endtime),
vaso_rate = mean(vaso_rate),
sum_vaso_amount = sum(vaso_amount)
)
# A tibble: 4 x 6
# Groups: icustay_id [3]
# icustay_id vaso_id starttime endtime vaso_rate sum_vaso_amount
# <dbl> <int> <dttm> <dttm> <dbl> <dbl>
# 1 1 0 2019-09-10 13:20:00 2019-09-11 13:20:00 6.5 1843.
# 2 1 1 2019-09-14 16:40:00 2019-09-15 16:40:00 7 661.
# 3 2 0 2019-09-10 12:40:00 2019-09-13 13:20:00 1 629.
# 4 3 0 2019-09-10 01:20:00 2019-09-11 13:20:00 2 61.8

Related

Imputing date based on next(or previous) available date grouped by another column

I have a dataframe that looks like this:
CYCLE date_cycle Randomization_Date COUPLEID
1 0 2016-02-16 10892
2 1 2016-08-17 2016-02-19 10894
3 1 2016-08-14 2016-02-26 10899
4 1 2016-02-26 10900
5 2 2016-03--- 2016-02-26 10900
6 3 2016-07-19 2016-02-26 10900
7 4 2016-11-15 2016-02-26 10900
8 1 2016-02-27 10901
9 2 2016-02--- 2016-02-27 10901
10 1 2016-03-27 2016-03-03 10902
11 2 2016-04-21 2016-03-03 10902
12 1 2016-03-03 10903
13 2 2016-03--- 2016-03-03 10903
14 0 2016-03-03 10904
15 1 2016-03-03 10905
16 2 2016-03-03 10905
17 3 2016-03-03 10905
18 4 2016-04-14 2016-03-03 10905
19 5 2016-05--- 2016-03-03 10905
20 6 2016-06--- 2016-03-03 10905
The goal is to fill in the missing day for a given ID using either an earlier or later date and add/subtract 28 from that.
The date_cycle variable was originally in the dataframe as a character type.
I have tried to code it as follows:
mutate(rowwise(df),
newdate = case_when( str_count(date1, pattern = "\\W") >2 ~ lag(as.Date.character(date1, "%Y-%m-%d"),1) + days(28)))
But I need to incorporate it by ID by CYCLE.
An example of my data could be made like this:
data.frame(stringsAsFactors = FALSE,
CYCLE =(0,1,1,1,2,3,4,1,2,1,2,1,2,0,1,2,3,4,5,6),
date_cycle = c(NA,"2016-08-17", "2016-08-14",NA,"2016-03---","2016-07-19", "2016-11-15",NA,"2016-02---", "2016-03-27","2016-04-21",NA, "2016-03---",NA,NA,NA,NA,"2016-04-14", "2016-05---","2016-06---"), Randomization_Date = c("2016-02-16","2016-02-19",
"2016-02-26","2016-02-26",
"2016-02-26","2016-02-26",
"2016-02-26",
"2016-02-27","2016-02-27",
"2016-03-03",
"2016-03-03","2016-03-03",
"2016-03-03","2016-03-03",
"2016-03-03",
"2016-03-03","2016-03-03",
"2016-03-03",
"2016-03-03","2016-03-03"),
COUPLEID = c(10892,10894,10899,10900,
10900,10900,10900,10901,10901,
10902,10902,10903,10903,10904,
10905,10905,10905,10905,10905,10905)
)
The output I am after would look like:
COUPLEID CYCLE date_cycle new_date_cycle
a 1 2014-03-27 2014-03-27
a 1 2014-04--- 2014-04-24
b 1 2014-03-24 2014-03-24
b 2 2014-04-21
b 3 2014-05--- 2014-05-19
c 1 2014-04--- 2014-04-02
c 2 2014-04-30 2014-04-30
I have also started to make a long conditional, but I wanted to ask here and see if anyone new of a more straight forward way to do it, instead of explicitly writing out all of the possible conditions.
mutate(rowwise(df),
newdate = case_when(
grp == 1 & str_count(date1, pattern = "\\W") >2 & !is.na(lead(date1,1) ~ lead(date1,1) - days(28),
grp == 2 & str_count(date1, pattern = "\\W") >2 & !is.na(lead(date1,1)) ~ lead(date1,1) - days(28),
grp == 3 & str_count(date1, pattern = "\\W") >2 & ...)))
Function to fill dates forward and backwards
filldates <- function(dates) {
m = which(!is.na(dates))
if(length(m)>0 & length(m)!=length(dates)) {
if(m[1]>1) for(i in seq(m,1,-1)) if(is.na(dates[i])) dates[i]=dates[i+1]-28
if(sum(is.na(dates))>0) for(i in seq_along(dates)) if(is.na(dates[i])) dates[i] = dates[i-1]+28
}
return(dates)
}
Usage:
data %>%
arrange(ID, grp) %>%
group_by(ID) %>%
mutate(date2=filldates(as.Date(date1,"%Y-%m-%d")))
Ouput:
ID grp date1 date2
<chr> <dbl> <chr> <date>
1 a 1 2014-03-27 2014-03-27
2 a 2 2014-04--- 2014-04-24
3 b 1 2014-03-24 2014-03-24
4 b 2 2014-04--- 2014-04-21
5 b 3 2014-05--- 2014-05-19
6 c 1 2014-03--- 2014-04-02
7 c 2 2014-04-30 2014-04-30
An option using purrr::accumulate().
library(tidyverse)
center <- df %>%
group_by(ID) %>%
mutate(helpDate = ymd(str_replace(date1, '---', '-01')),
refDate = max(ymd(date1), na.rm = T))
backward <- center %>%
filter(refDate == max(helpDate)) %>%
mutate(date2 = accumulate(refDate, ~ . - days(28), .dir = 'backward'))
forward <- center %>%
filter(refDate == min(helpDate)) %>%
mutate(date2 = accumulate(refDate, ~ . + days(28)))
bind_rows(forward, backward) %>%
ungroup() %>%
mutate(date2 = as_date(date2)) %>%
select(-c('helpDate', 'refDate'))
# # A tibble: 7 x 4
# ID grp date1 date2
# <chr> <int> <chr> <date>
# 1 a 1 2014-03-27 2014-03-27
# 2 a 2 2014-04--- 2014-04-24
# 3 b 1 2014-03-24 2014-03-24
# 4 b 2 2014-04--- 2014-04-21
# 5 b 3 2014-05--- 2014-05-19
# 6 c 1 2014-03--- 2014-04-02
# 7 c 2 2014-04-30 2014-04-30

How to generate a unique ID for each group based on relative date interval in R using dplyr?

I have a cohort of data with multiple person visits and want to group visits with a common ID based on person # and the time of the visit. The condition is if an start is within 24 hours of a the previous exit, then I want those to have the same ID.
Sample of what data looks like:
dat <- data.frame(
Person_ID = c(1,1,1,2,3,3,3,4,4),
Admit_Date_Time = as.POSIXct(c("2017-02-07 15:26:00","2017-04-21 10:20:00",
"2017-04-22 12:12:00", "2017-10-16 01:31:00","2017-01-24 02:41:00","2017- 01-24 05:31:00", "2017-01-28 04:26:00", "2017-12-01 01:31:00","2017-12-01
01:31:00"), format = "%Y-%m-%d %H:%M"),
Discharge_Date_Time = as.POSIXct(c("2017-03-01 11:42:00","2017-04-22
05:56:00",
"2017-04-26 21:01:00",
"2017-10-18 20:11:00",
"2017-01-27 22:15:00",
"2017-01-26 15:35:00",
"2017-01-28 09:25:00",
"2017-12-05 18:33:00",
"2017-12-04 16:41:00"),format = "%Y-%m-%d %H:%M" ),
Visit_ID = c(1:9))
this is what I tried to start:
dat1 <-
dat %>%
arrange(Person_ID, Admit_Date_Time) %>%
group_by(Person_ID) %>%
mutate(Previous_Visit_Interval = difftime(lag(Discharge_Date_Time,
1),Admit_Date_Time, units = "hours")) %>%
mutate(start = c(1,Previous_Visit_Interval[-1] < hours(-24)), run =
cumsum(start))
dat1$ID = as.numeric(as.factor(paste0(dat1$Person_ID,dat1$run)))
Which is almost right, except it does not give the correct ID for visit 7 (person #3). Since there are three visits and the second visit is entirely within the first, and the third starts within 24 hours of the first but not the second.
There's probably a way to shorten this, but here's an approach using tidyr::gather and spread. By gathering into long format, we can track the cumulative admissions inside each visit. A new visit is recorded whenever there's a new Person_ID or that Person_ID completed a visit (cumulative admissions went to zero) at least 24 hours prior.
library(tidyr)
dat1 <- dat %>%
# Gather into long format with event type in one column, timestamp in another
gather(event, time, Admit_Date_Time:Discharge_Date_Time) %>%
# I want discharges to have an effect up to 24 hours later. Sort using that.
mutate(time_adj = if_else(event == "Discharge_Date_Time",
time + ddays(1),
time)) %>%
arrange(Person_ID, time_adj) %>%
# For each Person_ID, track cumulative admissions. 0 means a visit has completed.
# (b/c we sorted by time_adj, these reflect the 24hr period after discharges.)
group_by(Person_ID) %>%
mutate(admissions = if_else(event == "Admit_Date_Time", 1, -1)) %>%
mutate(admissions_count = cumsum(admissions)) %>%
ungroup() %>%
# Record a new Hosp_ID when either (a) a new Person, or (b) preceded by a
# completed visit (ie admissions_count was zero).
mutate(Hosp_ID_chg = 1 *
(Person_ID != lag(Person_ID, default = 1) | # (a)
lag(admissions_count, default = 1) == 0), # (b)
Hosp_ID = cumsum(Hosp_ID_chg)) %>%
# Spread back into original format
select(-time_adj, -admissions, -admissions_count, -Hosp_ID_chg) %>%
spread(event, time)
Results
> dat1
# A tibble: 9 x 5
Person_ID Visit_ID Hosp_ID Admit_Date_Time Discharge_Date_Time
<dbl> <int> <dbl> <dttm> <dttm>
1 1 1 1 2017-02-07 15:26:00 2017-03-01 11:42:00
2 1 2 2 2017-04-21 10:20:00 2017-04-22 05:56:00
3 1 3 2 2017-04-22 12:12:00 2017-04-26 21:01:00
4 2 4 3 2017-10-16 01:31:00 2017-10-18 20:11:00
5 3 5 4 2017-01-24 02:41:00 2017-01-27 22:15:00
6 3 6 4 2017-01-24 05:31:00 2017-01-26 15:35:00
7 3 7 4 2017-01-28 04:26:00 2017-01-28 09:25:00
8 4 8 5 2017-12-01 01:31:00 2017-12-05 18:33:00
9 4 9 5 2017-12-01 01:31:00 2017-12-04 16:41:00
Here's a data.table approach using an overlap-join
library( data.table )
library( lubridate )
setDT( dat )
setorder( dat, Person_ID, Admit_Date_Time )
#create a 1-day extension after each discharge
dt2 <- dat[, discharge_24h := Discharge_Date_Time %m+% days(1)][]
#now create id
setkey( dat, Admit_Date_Time, discharge_24h )
#create data-table with overlap-join, create groups based on overlapping ranges
dt2 <- setorder(
foverlaps( dat,
dat,
mult = "first",
type = "any",
nomatch = 0L
),
Visit_ID )[, list( Visit_ID = i.Visit_ID,
Hosp_ID = .GRP ),
by = .( Visit_ID )][, Visit_ID := NULL]
#reorder the result
setorder( dt2[ dat, on = "Visit_ID" ][, discharge_24h := NULL], Visit_ID )[]
# Visit_ID Hosp_ID Person_ID Admit_Date_Time Discharge_Date_Time
# 1: 1 1 1 2017-02-07 15:26:00 2017-03-01 11:42:00
# 2: 2 2 1 2017-04-21 10:20:00 2017-04-22 05:56:00
# 3: 3 2 1 2017-04-22 12:12:00 2017-04-26 21:01:00
# 4: 4 3 2 2017-10-16 01:31:00 2017-10-18 20:11:00
# 5: 5 4 3 2017-01-24 02:41:00 2017-01-27 22:15:00
# 6: 6 4 3 2017-01-24 05:31:00 2017-01-26 15:35:00
# 7: 7 4 3 2017-01-28 04:26:00 2017-01-28 09:25:00
# 8: 8 5 4 2017-12-01 01:31:00 2017-12-05 18:33:00
# 9: 9 5 4 2017-12-01 01:31:00 2017-12-04 16:41:00

How to replace missing value in time series data by looping?

I'm trying to create looping to replace missing time series data with value == 0.
This is my data:
df
Times value
05-03-2018 09:00:00 1
05-03-2018 09:01:26 2
05-03-2018 09:04:28 1
05-03-2018 09:07:05 2
05-03-2018 09:09:05 1
and my desired output is:
Times value
05-03-2018 09:00:00 1
05-03-2018 09:01:26 2
05-03-2018 09:02:00 0
05-03-2018 09:03:00 0
05-03-2018 09:04:28 1
05-03-2018 09:05:00 0
05-03-2018 09:06:00 0
05-03-2018 09:07:05 2
05-03-2018 09:08:00 0
05-03-2018 09:09:05 1
Missing minutes in the data should be created and assigned a value of 0.
What should I do? Create new dummies table with missing minute or make a sequence looping?
You could do this with dplyr and padr packages. padr is very useful for extending datetime series between to dates or adding missing values.
library(dplyr)
library(padr)
df1 %>%
thicken(interval = "min") %>% # roll time series up to minutes
pad(by = "Times_min") %>% # add missing minute intervals
fill_by_value(value) %>% # fill missing values with 0
mutate(Times = if_else(is.na(Times), Times_min, Times)) %>% # fill NA's in Times column
select(-Times_min) # drop not needed column
pad applied on the interval: min
Times value
1 2018-03-05 09:00:00 1
2 2018-03-05 09:01:26 2
3 2018-03-05 09:02:00 0
4 2018-03-05 09:03:00 0
5 2018-03-05 09:04:28 1
6 2018-03-05 09:05:00 0
7 2018-03-05 09:06:00 0
8 2018-03-05 09:07:05 2
9 2018-03-05 09:08:00 0
10 2018-03-05 09:09:05 1
data:
df1 <- structure(list(Times = structure(c(1520240400, 1520240486, 1520240668,
1520240825, 1520240945), class = c("POSIXct", "POSIXt"), tzone = "UTC"),
value = c(1, 2, 1, 2, 1)), row.names = c(NA, -5L), class = "data.frame")
You could create a second 'complete' data frame and merge them together.
dif <- diff(as.numeric(range(df1$Times)))
df1 <- merge(df1,
data.frame(Times=as.POSIXct(0:(dif/60)*60,
origin=df1[1, 1], tz="UTC")), all=TRUE)
Then replace resulting NAs with 0.
df1[is.na(df1$value), 2] <- 0
Finally remove the duplicates.
df1 <- df1[-which(duplicated(strftime(df1$Times, format="%M"))) + 1, ]
Yields:
> df1
Times value
1 2018-03-05 09:00:00 1
3 2018-03-05 09:01:26 2
4 2018-03-05 09:02:00 0
5 2018-03-05 09:03:00 0
7 2018-03-05 09:04:28 1
8 2018-03-05 09:05:00 0
9 2018-03-05 09:06:00 0
11 2018-03-05 09:07:05 2
12 2018-03-05 09:08:00 0
14 2018-03-05 09:09:05 1
Data:
df1 <- structure(list(Times = structure(c(1520240400, 1520240486, 1520240668,
1520240825, 1520240945), class = c("POSIXct", "POSIXt"), tzone = "UTC"),
value = c(1, 2, 1, 2, 1)), row.names = c(NA, -5L), class = "data.frame")
library(tidyverse)
library(lubridate)
library(magrittr)
Recreate your data
df <- tibble(
Times = c("05-03-2018 09:00:00", "05-03-2018 09:01:26",
"05-03-2018 09:04:28", "05-03-2018 09:07:05",
"05-03-2018 09:09:05"),
value = c(1, 2, 1, 2, 1)
)
Code
Parse your Times variable to datetime
df$Times %<>% parse_datetime("%d-%m-%Y %H:%M:%S")
Create a new variable join that is truncated to the minute
df %<>% mutate(join = floor_date(Times, unit = "minute"))
Create a new data frame with one variable also called join and containing every minute in your range
all <- tibble(
join = seq(as_datetime(first(df$Times), as_datetime(last(df$Times)), by = 60)
)
Join both data frames
result <- left_join(all, df)
Add the "missing minutes" to your Times variable
result$Times[is.na(result$Times)] <- result$join[is.na(result$Times)]
Replace the NA by 0
result$value[is.na(result$value)] <- 0
Remove the join variable
result %>%
select(- join)
Result
# A tibble: 10 x 2
Times value
<dttm> <dbl>
1 2018-03-05 09:00:00 1
2 2018-03-05 09:01:26 2
3 2018-03-05 09:02:00 0
4 2018-03-05 09:03:00 0
5 2018-03-05 09:04:28 1
6 2018-03-05 09:05:00 0
7 2018-03-05 09:06:00 0
8 2018-03-05 09:07:05 2
9 2018-03-05 09:08:00 0
10 2018-03-05 09:09:05 1

Select value from time range dataframe in R

I have a dataframe of datetimes
tdata_df <- data.frame(timestamp=seq(c(ISOdate(2018,4,20)), by = (60*229), length.out = 6))
tdata_df
timestamp
1 2018-04-20 21:00:00
2 2018-04-21 00:49:00
3 2018-04-21 04:38:00
4 2018-04-21 08:27:00
5 2018-04-21 12:16:00
6 2018-04-21 16:05:00
then I would like to get value from this time range table
time_range_df <- data.frame(start=c("08:30","11:35","15:10","05:00"),
end=c("11:29","15:09","02:29","08:29"),value=c(1,2,3,4))
timerange_df
start end value
1 08:30 11:29 1
2 11:35 15:09 2
3 15:10 02:29 3
4 05:00 08:29 4
like this
timestamp value
1 2018-04-20 21:00:00 3
2 2018-04-21 00:49:00 3
3 2018-04-21 04:38:00 NA
4 2018-04-21 08:27:00 4
5 2018-04-21 12:16:00 2
6 2018-04-21 16:05:00 3
Any help would be greatly appreciated.
The sqldf package provides greater flexibility to join in such cases. The approach is:
Change time in time_range_df to offset from mid-night.
Add a column in tdata_df to represent time elapsed since midnight
Join both data frames for overlapped time since midnight
library(lubridate)
time_range_df$start <- as.numeric(seconds(hm(time_range_df$start)))
time_range_df$end <- as.numeric(seconds(hm(time_range_df$end)))
tdata_df$timeSinceMidNigh <- as.numeric(seconds(hms(format(ymd_hms(tdata_df$timestamp),
format = "%H:%M:%S"))))
library(sqldf)
sqlquery <- "SELECT D1.timestamp, Q.value FROM tdata_df D1
LEFT JOIN (SELECT * FROM tdata_df D, time_range_df R
WHERE (R.start < R.end AND D.timeSinceMidNigh between R.start AND R.end) OR
(R.start > R.end AND D.timeSinceMidNigh between R.start AND 86400) OR
(R.start > R.end AND D.timeSinceMidNigh between 0 and R.end)) Q
ON D1.timestamp = Q.timestamp"
sqldf(sqlquery)
# timestamp value
# 1 2018-04-20 13:00:00 2
# 2 2018-04-20 16:49:00 3
# 3 2018-04-20 20:38:00 3
# 4 2018-04-21 00:27:00 3
# 5 2018-04-21 04:16:00 NA
# 6 2018-04-21 08:05:00 4
Data:
tdata_df <- data.frame(timestamp=seq(c(ISOdate(2018,4,20)), by = (60*229), length.out = 6))
time_range_df <- data.frame(start=c("08:30","11:35","15:10","05:00"),
end=c("11:29","15:09","02:29","08:29"),value=c(1,2,3,4))

Consolidating rows by max and min dates

I have a dataset that looks like this.
id1 = c(1,1,1,1,1,1,1,1,2,2)
id2 = c(3,3,3,3,3,3,3,3,3,3)
lat = c(-62.81559,-62.82330, -62.78693,-62.70136, -62.76476,-62.48157,-62.49064,-62.45838,42.06258,42.06310)
lon = c(-61.15518, -61.14885,-61.17801,-61.00363, -59.14270, -59.22009, -59.32967, -59.04125 ,154.70579, 154.70625)
start_date= as.POSIXct(c('2016-03-24 15:30:00', '2016-03-24 15:30:00','2016-03-24 23:40:00','2016-03-25 12:50:00','2016-03-29 18:20:00','2016-06-01 02:40:00','2016-06-01 08:00:00','2016-06-01 16:30:00','2016-07-29 20:20:00','2016-07-29 20:20:00'), tz = 'UTC')
end_date = as.POSIXct(c('2016-03-24 23:40:00', '2016-03-24 18:50:00','2016-03-25 03:00:00','2016-03-25 19:20:00','2016-04-01 03:30:00','2016-06-02 01:40:00','2016-06-01 14:50:00','2016-06-02 01:40:00','2016-07-30 07:00:00','2016-07-30 07:00:00'),tz = 'UTC')
speed = c(2.9299398, 2.9437502, 0.0220565, 0.0798409, 1.2824859, 1.8685429, 3.7927680, 1.8549291, 0.8140249,0.8287073)
df = data.frame(id1, id2, lat, lon, start_date, end_date, speed)
id1 id2 lat lon start_date end_date speed
1 1 3 -62.81559 -61.15518 2016-03-24 15:30:00 2016-03-24 23:40:00 2.9299398
2 1 3 -62.82330 -61.14885 2016-03-24 15:30:00 2016-03-24 18:50:00 2.9437502
3 1 3 -62.78693 -61.17801 2016-03-24 23:40:00 2016-03-25 03:00:00 0.0220565
4 1 3 -62.70136 -61.00363 2016-03-25 12:50:00 2016-03-25 19:20:00 0.0798409
5 1 3 -62.76476 -59.14270 2016-03-29 18:20:00 2016-04-01 03:30:00 1.2824859
6 1 3 -62.48157 -59.22009 2016-06-01 02:40:00 2016-06-02 01:40:00 1.8685429
7 1 3 -62.49064 -59.32967 2016-06-01 08:00:00 2016-06-01 14:50:00 3.7927680
8 1 3 -62.45838 -59.04125 2016-06-01 16:30:00 2016-06-02 01:40:00 1.8549291
9 2 3 42.06258 154.70579 2016-07-29 20:20:00 2016-07-30 07:00:00 0.8140249
10 2 3 42.06310 154.70625 2016-07-29 20:20:00 2016-07-30 07:00:00 0.8287073
The actual dataset is larger. What I would like to do is consolidate this dataset based on date ranges and grouped by id1 and id2, such that if the date/time range on one row is within 12 hours of the next date/time range 'ABS(end_date[1] - start_date[2]) < 12hrs' the rows should be consolidated with the new start_date being the earliest date and the end_date being the latest. All other values (lat, lon, speed) will be averaged. This is some sense a 'deduping' effort as rows that are within 12 hours actually represent the same 'event'. For the above example the final result would be
id1 id2 lat lon start_date end_date speed
1 1 3 -62.7818 -61.12142 2016-03-24 15:30:00 2016-03-25 19:20:00 1.493897
2 1 3 -62.76476 -59.14270 2016-03-29 18:20:00 2016-04-01 03:30:00 1.2824859
3 1 3 -62.47686 -59.197 2016-06-01 02:40:00 2016-06-02 01:40:00 2.505413
4 2 3 42.06284 154.706 2016-07-29 20:20:00 2016-07-30 07:00:00 0.8213661
With the first four rows consolidated (into row1), the 5 row left alone (row2), the 6-8 rows consolidated (row3), and the 9-10 rows consolidated (row4).
I have been trying to do this with dplyr group_by and summarize, but I can't seem to get the get the date ranges to come out correctly.
Hopefully someone can determine a simple means of solving the problem. Extra points if you know how to do it in SQL ;-) so I can dedupe before even pulling this into R.
Here is a first very naive implementation. Warning: it is slow, not pretty and still missing the start and end dates in the output! Note that it expects the rows to be ordered by date and time. If that's not the case in the data set, you can do it in R or SQL first. Sorry that I can't think of a dplyr or SQL solution. I'd also like to see those two, if anyone has got an idea.
dedupe <- function(df) {
counter = 1
temp_vector = unlist(df[1, ])
summarized_df = df[0, c(1, 2, 3, 4, 7)]
colnames(summarized_df) = colnames(df)[c(1, 2, 3, 4, 7)]
summarized_df$counter = NULL
for (i in 2:nrow(df)) {
if (((abs(difftime(df[i, "start_date"], df[i - 1, "end_date"], units = "h")) <
12) ||
abs(difftime(df[i, "start_date"], df[i - 1, "start_date"], units = "h")) <
12) &&
df[i, "id1"] == df[i - 1, "id1"] &&
df[i, "id2"] == df[i - 1, "id2"]) {
#group events because id is the same and time range overlap
#sum up columns and select maximum end_date
temp_vector[c(3, 4, 7)] = temp_vector[c(3, 4, 7)] + unlist(df[i, c(3, 4, 7)])
temp_vector["end_date"] = max(temp_vector["end_date"], df[i, "end_date"])
counter = counter + 1
if (i == nrow(df)) {
#in the last iteration we need to create a new group
summarized_df[nrow(summarized_df) + 1, c(1, 2)] = df[i, c(1, 2)]
summarized_df[nrow(summarized_df), 3:5] = temp_vector[c(3, 4, 7)] / counter
summarized_df[nrow(summarized_df), "counter"] = counter
}
} else {
#new event so we calculate group statistics for temp_vector and reset its value as well as counter
summarized_df[nrow(summarized_df) + 1, c(1, 2)] = df[i, c(1, 2)]
summarized_df[nrow(summarized_df), 3:5] = temp_vector[c(3, 4, 7)] / counter
summarized_df[nrow(summarized_df), "counter"] = counter
counter = 1
temp_vector[c(3, 4, 7)] = unlist(df[i, c(3, 4, 7)])
}
}
return(summarized_df)
}
Function call
> dedupe(df)
id1 id2 lat lon speed counter
5 1 3 -62.78179 -61.12142 1.4938968 4
6 1 3 -62.76476 -59.14270 1.2824859 1
9 2 3 -62.47686 -59.19700 2.5054133 3
10 2 3 42.06284 154.70602 0.8213661 2
This can be easily achieved by using insurancerating::reduce():
df |>
insurancerating::reduce(begin = start_date, end = end_date, id1, id2,
agg_cols = c(lat, lon, speed), agg = "mean",
min.gapwidth = 12 * 3600)
#> id1 id2 index end_date start_date lat lon
#> 1 1 3 0 2016-03-25 19:20:00 2016-03-24 15:30:00 -62.78180 -61.12142
#> 2 1 3 1 2016-04-01 03:30:00 2016-03-29 18:20:00 -62.76476 -59.14270
#> 3 1 3 2 2016-06-02 01:40:00 2016-06-01 02:40:00 -62.47686 -59.19700
#> 4 2 3 0 2016-07-30 07:00:00 2016-07-29 20:20:00 42.06284 154.70602
#> speed
#> 1 1.4938969
#> 2 1.2824859
#> 3 2.5054133
#> 4 0.8213661
Created on 2022-06-13 by the reprex package (v2.0.1)

Resources