rcppRoll n = number of months instead of obs - r

I ran into an issue with the rcppRoll package. I want to use it to sum the value of the past 3 months, however, sometimes there is no data on 1 or more months. The "n = 3" considers the last three observations, rather than the last 3 months. I couldn't find a solid solution, so I am trying my luck here. Thank you in advance for any suggestions.
P.S. I prefer to work with data.table and rcpp_roll as my dataset is large and I am familiar with those.
Code:
library("data.table")
library("RcppRoll")
test = data.table(id = rep(1, 8),date = c("2015-01","2015-02","2015-03","2015-04","2015-08","2015-09","2015-10","2015-11"), value = 1:8)
test = test[, var:= roll_sumr(value, n = 3, na.rm = TRUE), by = id]
id date value var
1: 1 2015-01 1 NA
2: 1 2015-02 2 NA
3: 1 2015-03 3 6
4: 1 2015-04 4 9
5: 1 2015-08 5 12
6: 1 2015-09 6 15
7: 1 2015-10 7 18
8: 1 2015-11 8 21
Expected output
prefered_outcome = data.table(id = rep(1, 8),date = c("2015-01","2015-02","2015-03","2015-04","2015-08","2015-09","2015-10","2015-11"), value = 1:8,var = c(NA, NA, 6, 9, NA, NA, 18, 21))
id date value var
1: 1 2015-01 1 NA
2: 1 2015-02 2 NA
3: 1 2015-03 3 6
4: 1 2015-04 4 9
5: 1 2015-08 5 NA
6: 1 2015-09 6 NA
7: 1 2015-10 7 18
8: 1 2015-11 8 21

Define ym of yearmon class and check if the prior and second prior ym are one and two months back and if so use roll_sumr and otherwise use NA.
library(zoo)
ym <- test[, as.yearmon(date)]
test[, roll := ifelse(ym - 1/12 == shift(ym) & ym - 2/12 == shift(ym, 2),
roll_sumr(value, 3, na.rm = TRUE), NA), by = id ]
giving:
> test
id date value roll
1: 1 2015-01 1 NA
2: 1 2015-02 2 NA
3: 1 2015-03 3 6
4: 1 2015-04 4 9
5: 1 2015-08 5 NA
6: 1 2015-09 6 NA
7: 1 2015-10 7 18
8: 1 2015-11 8 21

You can add the missing months first and then performing the function. After that, the added months can be removed again
library(data.table)
library("RcppRoll")
library(zoo)
test = data.table(id = rep(1, 8),date = c("2015-01","2015-02","2015-03","2015-04","2015-08","2015-09","2015-10","2015-11"), value = 1:8)
test$date <- as.yearmon(test$date)
allMonths <- seq.Date(from=as.Date(test$date[1]),to=as.Date(test$date[length(test$date)]),by="month")
df2 <- data.frame(date=as.yearmon(allMonths))
df3 <- merge(test,df2, all=TRUE)
df3 <- df3[, var:= roll_sumr(value, n = 3, na.rm = TRUE), by = id]
df3

Related

Using match on multiple criteria to generate value in R

I currently have the following data format:
df = data.frame(c(rep("A", 12), rep("B", 12)), rep(1:12, 2), seq(-12, 11))
colnames(df) = c("station", "month", "mean")
df
df_master = data.frame(c(rep("A", 10), rep("B", 10)), rep(c(27:31, 1:5), 2), rep(c(rep(1, 5), rep(2, 5)), 2), rep(seq(-4,5), 2))
colnames(df_master) = c("station", "day", "month", "value")
df_master
Effectively df is a monthly average value for each station and I want to compute a new variable in the df_master data set which computes the difference from the monthly mean for each daily observation. I have managed to do this with an overall average incuding all the data, but since the mean values vary from each station so I would like to make the new variable station specific.
I have tried the following code to match the monthly value, but this currently doesn't account for cross station differences:
df_master$mean = df$mean[match(df_master$month, df$month)]
df_master = df_master %>% mutate(diff = value - mean)
How can I progress this further so that the averages are taken per station?
With dplyr using a left join
library(dplyr)
left_join(df_master, df, by = c('station', 'month')) %>%
mutate(monthdiff = value - mean) %>%
select(-mean)
If you convert them to data.tables, you can add the difference column with an update join, joining df_master with df on the condition that the values for both station and month are equal.
library(data.table)
setDT(df_master)
setDT(df)
df_master[df, on = .(station, month),
diff_monthmean := value - i.mean]
df_master
# station day month value diff_monthmean
# 1: A 27 1 -4 8
# 2: A 28 1 -3 9
# 3: A 29 1 -2 10
# 4: A 30 1 -1 11
# 5: A 31 1 0 12
# 6: A 1 2 1 12
# 7: A 2 2 2 13
# 8: A 3 2 3 14
# 9: A 4 2 4 15
# 10: A 5 2 5 16
# 11: B 27 1 -4 -4
# 12: B 28 1 -3 -3
# 13: B 29 1 -2 -2
# 14: B 30 1 -1 -1
# 15: B 31 1 0 0
# 16: B 1 2 1 0
# 17: B 2 2 2 1
# 18: B 3 2 3 2
# 19: B 4 2 4 3
# 20: B 5 2 5 4
Another option could be:
transform(df_master,
diff = value - merge(df_master, df, by = c('station', 'month'), all.x = TRUE)$mean)
Or, using match with interaction
transform(df_master,
diff = value - df$mean[match(interaction(df_master[c("month", "station")]), interaction(df[c("month", "station")]))])

Filter data.table with another data.table with different column names

I have this dataset:
library(data.table)
dt <- data.table(
record=c(1:20),
area=rep(LETTERS[1:4], c(4, 6, 3, 7)),
score=c(1,1:3,2:3,1,1,1,2,2,1,2,1,1,1,1,1:3),
cluster=c("X", "Y", "Z")[c(1,1:3,3,2,1,1:3,1,1:3,3,3,3,1:3)]
)
and I have used the solution from this post to create this summary:
dt_summary =
dt[ , .N, keyby = .(area, score, cluster)
][ , {
idx = frank(-N, ties.method = 'min') == 1
NN = sum(N)
.(
cluster_mode = cluster[idx],
cluster_pct = 100*N[idx]/NN,
cluster_freq = N[idx],
record_freq = NN
)
}, by = .(area, score)]
dt_score_1 <- dt_summary[score == 1]
setnames(dt_score_1, "area", "zone")
I would like to use the results from dt_score_1 to filter dt based on the area/zone and cluster/cluster_mode. So in a new data.table, the only rows taken from dt for area A should belong to cluster X, for area D they should be cluster Z etc.
If I'm understanding the question correctly, this is a merge of dt with dt_score_1 with the conditions area = zone, cluster = cluster_mode.
dt[dt_score_1, on = .(area = zone, cluster = cluster_mode)]
# record area score cluster i.score cluster_pct cluster_freq record_freq
# 1: 1 A 1 X 1 100.00000 2 2
# 2: 2 A 1 X 1 100.00000 2 2
# 3: 7 B 1 X 1 66.66667 2 3
# 4: 8 B 1 X 1 66.66667 2 3
# 5: 11 C 2 X 1 100.00000 1 1
# 6: 12 C 1 X 1 100.00000 1 1
# 7: 14 D 1 Z 1 80.00000 4 5
# 8: 15 D 1 Z 1 80.00000 4 5
# 9: 16 D 1 Z 1 80.00000 4 5
# 10: 17 D 1 Z 1 80.00000 4 5
# 11: 20 D 3 Z 1 80.00000 4 5
For a more detailed explanation of join-as-filter, see the link below posted by #Frank
Perform a semi-join with data.table

Count if between date range based on groups

I'm stuck trying to find a relatively simple way to count occurrences within a date range by group using R. I get the idea there has to be an easier way than what I'm trying.
I have over 6,000 groups, each group has anywhere from 1 to 100 IDs within, each with a start date and an end date anywhere from Jan 1, 1990 to today. I want to make a dataframe, one group per column, and one day per row, counting the number of IDs active per day from April 1, 2013 until March 31, 2018. For obvious reasons, using countifs in excel will not cut it.
I was trying to use this question as a starting point, as such:
df1 <- data.frame(group = c(1,1,2,3,3),
id = c(1,2,1,1,2),
startdate = c("2016-01-01","2016-04-04","2016-03-02","2016-08-01","2016-04-01"),
enddate = c("2016-04-04","2999-01-01","2016-05-02","2016-08-05","2999-01-01"))
report <- data.frame(date = seq(from = as.Date("2016-04-01"),by="1 day", length.out = 7))
report <- cbind(report,matrix(data=NA,nrow=7,ncol=3))
names(report) <- c('date',as.vector(unique(df1$group)))
daily <- function(i,...){
report[,i+1] <- sapply(report$date, function(x)
sum(as.Date(df1$startdate) < as.Date(x) &
as.Date(df1$enddate) > as.Date(x) &
df1$group == unique(df1$group)[i]))
}
for (i in unique(df1$group))
daily(i)
However, this doesn't seem to do anything (nor does it throw errors). Is there an easier way to do this? Am I way off base? Any help is appreciated for this non-programmer!
Additional help requested: I'm trying to modify Jaap's code in the answer below to include group start and group end times, so that the data table displays an NA when the group is not active.
Example data:
df2 <- data.frame(group = c(1,1,2,3,3),
groupopendate = c("2016-04-02","2016-04-02","2016-04-01","2016-04-02","2016-04-02"),
groupclosedate = c("2016-04-08","2016-04-08","2016-04-10","2016-04-09","2016-04-09"),
id = c(1,2,1,1,2),
startdate = c("2016-04-02","2016-04-04","2016-04-03","2016-04-02","2016-04-05"),
enddate = c("2016-04-04","2016-04-06","2016-04-10","2016-04-08","2016-04-08"))
Jaap's solution gives me this:
active grp1 grp2 grp3
1: 2016-04-02 1 0 1
2: 2016-04-03 1 1 1
3: 2016-04-04 1 1 1
4: 2016-04-05 1 1 2
5: 2016-04-06 0 1 2
6: 2016-04-07 0 1 2
However, what I want is such:
active grp1 grp2 grp3
1: 2016-04-01 NA 0 NA
2: 2016-04-02 1 0 1
3: 2016-04-03 1 1 1
4: 2016-04-04 1 1 1
5: 2016-04-05 1 1 1
6: 2016-04-06 1 1 2
7: 2016-04-07 0 1 2
8: 2016-04-08 NA 1 0
9: 2016-04-09 NA 1 NA
10: 2016-04-10 NA NA NA
Any help is appreciated!
A possible alternative solution using data.table:
# load the package & convert 'df1' to a data.table
library(data.table)
setDT(df1)
# convert the date columns to a date format
# not needed if they are
df1[, `:=` (startdate = as.Date(startdate), enddate = as.Date(enddate))]
# create a new data.table with the 'active' days
DT <- data.table(active = seq(from = as.Date("2016-04-01"), by = "day", length.out = 7))
# use a join and dcast to get the desired result
DT[df1
, on = .(active > startdate, active < enddate)
, allow = TRUE
, nomatch = 0
, .(active = x.active, group, id)
][, dcast(.SD, active ~ paste0("grp",group), value.var = "id", fun = length)]
which gives:
active grp1 grp2 grp3
1: 2016-04-01 1 1 0
2: 2016-04-02 1 1 1
3: 2016-04-03 1 1 1
4: 2016-04-04 0 1 1
5: 2016-04-05 1 1 1
6: 2016-04-06 1 1 1
7: 2016-04-07 1 1 1
NOTE: I've used paste0("grp",group) instead of just group in the dcast step as it leads to better columnnames (it is better not to use just numeric values as columnnames)
With regard to your additional example, you could solve that as follows:
setDT(df2)
df2[, c(2:3,5:6) := lapply(.SD, as.Date), .SDcols = c(2:3,5:6)]
DT <- data.table(active = seq(from = min(df2$groupopendate),
to = max(df2$groupclosedate),
by = "day"))
df2new <- df2[, .(active = seq.Date(startdate, enddate, by = "day"))
, by = .(group, id)
][, .N, by = .(group, active)
][df2[, .(active = seq.Date(groupopendate[1], groupclosedate[.N] - 1, by = "day"))
, by = .(group)]
, on = .(group, active)
][is.na(N), N := 0
][, dcast(.SD, active ~ paste0("grp",group))]
nms <- setdiff(names(df2new), "active")
DT[df2new
, on = .(active)
, (nms) := mget(paste0("i.",nms))][]
which gives:
> DT
active grp1 grp2 grp3
1: 2016-04-01 NA 0 NA
2: 2016-04-02 1 0 1
3: 2016-04-03 1 1 1
4: 2016-04-04 2 1 1
5: 2016-04-05 1 1 2
6: 2016-04-06 1 1 2
7: 2016-04-07 0 1 2
8: 2016-04-08 NA 1 2
9: 2016-04-09 NA 1 NA
10: 2016-04-10 NA 1 NA
I've figured it out! As usual, as soon as you post a question, you figure out the answer. I was overcomplicating it by putting in the function, when I could just put the sapply in the for loop.
If anyone is interested:
for (i in unique(df1$group))
{report[,i+1] <-
sapply(report$date, function(x)
sum(as.Date(df1$startdate) < as.Date(x) &
as.Date(df1$enddate) > as.Date(x) &
df1$group == unique(df1$group)[i]))}

Carry / use value from previous group

data:
structure(list(id = c(1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5),
ax = c("a", "a", "b", "b", "b", "b", "b", "b", "c", "c",
"d", "d", "e"), time = c(1, 3, 0, 2, 4, 5, 6, 8, 7, 9, 10,
11, 12)), .Names = c("id", "ax", "time"), class = c("data.table",
"data.frame"), row.names = c(NA, -13L))
looks like:
id ax time
1: 1 a 1
2: 1 a 3
3: 2 b 0
4: 2 b 2
5: 2 b 4
6: 2 b 5
7: 2 b 6
8: 2 b 8
9: 3 c 7
10: 3 c 9
11: 4 d 10
12: 4 d 11
13: 5 e 12
I want to have the max of the previous group next to the actual group:
desired output:
id ax time newCol
1: 1 a 1 NA
2: 1 a 3 NA
3: 2 b 0 3
4: 2 b 2 3
5: 2 b 4 3
6: 2 b 5 3
7: 2 b 6 3
8: 2 b 8 3
9: 3 c 7 8
10: 3 c 9 8
11: 4 d 10 9
12: 4 d 11 9
13: 5 e 12 11
Is it also possible to have the value of the "previous-previous" grp?
Interessted in baseR, data.table and tidyverse solutions
note:
Can be grouped by EITHER id or ax. The example is a little redundant here.
A data.table solution:
dtt.max <- dtt[, .(max = max(time)), by = ax]
dtt.max[, max.prev := shift(max)]
dtt[dtt.max, newCol := i.max.prev, on = 'ax']
# > dtt
# id ax time newCol
# 1: 1 a 1 NA
# 2: 1 a 3 NA
# 3: 2 b 0 3
# 4: 2 b 2 3
# 5: 2 b 4 3
# 6: 2 b 5 3
# 7: 2 b 6 3
# 8: 2 b 8 3
# 9: 3 c 7 8
# 10: 3 c 9 8
# 11: 4 d 10 9
# 12: 4 d 11 9
# 13: 5 e 12 11
data.table solution using id + 1
library(data.table)
merge(d, setDT(d)[, max(time), id + 1], all.x = TRUE)
Here is a dplyr approach. The key here is to group and ungroup when necessary:
df %>%
group_by(ax) %>%
mutate(new = time[n()]) %>%
ungroup() %>%
mutate(new = lag(new)) %>%
group_by(ax) %>%
mutate(new = new[1])
# A tibble: 13 x 4
# Groups: ax [5]
id ax time new
<dbl> <chr> <dbl> <dbl>
1 1. a 1. NA
2 1. a 3. NA
3 2. b 0. 3.
4 2. b 2. 3.
5 2. b 4. 3.
6 2. b 5. 3.
7 2. b 6. 3.
8 2. b 8. 3.
9 3. c 7. 8.
10 3. c 9. 8.
11 4. d 10. 9.
12 4. d 11. 9.
13 5. e 12. 11.
Assuming id is the same as group:
dfr <- dfr %>% group_by(id) %>% mutate(groupmax = max(time))
dfr$old_group_max <- dfr$groupmax[match(dfr$id - 1, dfr$id)]
The antepenultimate group is left as an exercise :-)
1) This uses no packages. It computes the maximum for each group giving Ag and and then lags it giving LagMax. Finally it left joins using merge that back into the original data frame DF:
Ag <- aggregate(time ~ id, DF, max)
LagMax <- transform(Ag, lagmax = c(NA, head(time, -1)), time = NULL)
merge(DF, LagMax, by = "id", all.x = TRUE)
giving:
id ax time lagmax
1 1 a 1 NA
2 1 a 3 NA
3 2 b 0 3
4 2 b 2 3
5 2 b 4 3
6 2 b 5 3
7 2 b 6 3
8 2 b 8 3
9 3 c 7 8
10 3 c 9 8
11 4 d 10 9
12 4 d 11 9
13 5 e 12 11
2) This sorts time within id so that we know that the maximum is the last value in each id group.
o <- order(factor(DF$id, levels = unique(DF$id)), DF$time)
Time <- DF$time[o]
lagmax <- function(r) if (r[1] == 1) NA else Time[r[1] - 1]
transform(DF, lagmax = ave(seq_along(id), id, FUN = lagmax))
In the question the time values are already sorted within id and if that is known to be the case the above could be shortened to:
lagmax <- function(r) if (r[1] == 1) NA else DF$time[r[1] - 1]
transform(DF, lagmax = ave(seq_along(id), id, FUN = lagmax))
3) This one-liner is a data.table translation of (2):
library(data.table)
DT <- copy(DF) # don't overwrite DF
setDT(DT)[, g:=rleid(id)][, lagmax := DT$time[.I[1]-1], keyby = c("g", "id")]
In the sample data in the question time is sorted within id and if that were known to be the case we could use the following shorter code in place of the last line above
setDT(DT)[, lagmax := DT$time[.I[1]-1], by = id]

Split contents of column into two rows, for conversion to STRUCTURE format

I am trying to separate the contents of columns into two rows, and duplicate the row names. Each variable consists of only two numbers (11, 12, 13, 14, 21, 22, etc. or an NA.) This is for conversion to STRUCTURE format, a common population genetics format.
I have this:
population X354045 X430045 X995019
Crater <NA> 11 22
Teton 11 31 11
I would like to have this:
population X354045 X430045 X995019
Crater <NA> 1 2
Crater <NA> 1 2
Teton 1 3 1
Teton 1 1 1
This is a data.table question, so I would just suggest the built-in tstrsplit function for that matter
Reading Your data
library(data.table)
DT <- fread('population X354045 X430045 X995019
Crater NA 11 22
Teton 11 31 11')
Solution (if you have a data.frame, use setDT(DT) in order to convert to a data.table)
DT[, lapply(.SD, function(x) unlist(tstrsplit(x, ""))), by = population]
# population X354045 X430045 X995019
# 1: Crater NA 1 2
# 2: Crater NA 1 2
# 3: Teton 1 3 1
# 4: Teton 1 1 1
Ok, so here is how I would do it. Lets create some data:
vector <- c(10, 11, 12, NA, 13, 14, 15)
First, we need a function that allows you to break each two-digit number into its two digits (and NAs into two NAs):
as.numeric(sapply(vector, function(x) (x %% c(1e2,1e1)) %/% c(1e1,1e0)))
# 1 0 1 1 1 2 NA NA 1 3 1 4 1 5
Now all we have to do is apply this to every relevant column:
DF <- data.frame(population = c("Crater", "Teton"), X354045 = c(NA, 11), X430045 = c(11, 31), X995019 = c(22, 11))
DF2 <- apply(DF[-1], 2, function(y) as.numeric(sapply(y, function(x) (x %% c(1e2,1e1)) %/% c(1e1,1e0))))
Finally, we just combine it with the new population column:
population <- as.character(rep(DF$population, each = 2))
DF3 <- cbind(population, data.frame(DF2))
dd <- read.table(header = TRUE, text = 'population X354045 X430045 X995019
Crater NA 11 22
Teton 11 31 11')
nr <- nrow(dd)
dd <- dd[rep(1:2, each = nr), ]
# population X354045 X430045 X995019
# 1 Crater NA 11 22
# 1.1 Crater NA 11 22
# 2 Teton 11 31 11
# 2.1 Teton 11 31 11
dd[, -1] <- lapply(dd[, -1], function(x) {
idx <- (seq_along(x) %% 2 == 0) + 1L
substr(x, idx, idx)
})
# population X354045 X430045 X995019
# 1 Crater <NA> 1 2
# 1.1 Crater <NA> 1 2
# 2 Teton 1 3 1
# 2.1 Teton 1 1 1
Or just
dd <- dd[rep(1:2, each = nr), ]
dd[, -1] <- lapply(dd[, -1], function(x)
Vectorize(substr)(x, rep(1:2, nr), rep(1:2, nr)))
would work
And the same idea in data.table thanks to #DavidArenburg
library('data.table')
dd <- read.table(header = TRUE, text = 'population X354045 X430045 X995019
Crater NA 11 22
Teton 11 31 11')
setDT(dd)[rep(1:2, each = .N), lapply(.SD, substr, 1:2, 1:2), by = population]
# population X354045 X430045 X995019
# 1: Crater NA 1 2
# 2: Crater NA 1 2
# 3: Teton 1 3 1
# 4: Teton 1 1 1
Or similarly, but avoiding the by part
dd <- setDT(dd)[rep(1:2, each = .N)]
dd[, 2:4 := dd[ ,lapply(.SD, substr, 1:2, 1:2), .SD = -1]]
which should be pretty fast/efficient if you are working with a large data set

Resources