Fill NA in a time series only to a limited number - r

Is there a way we can fill NAs in a zoo or xts object with limited number of NAs forward. In other words like fill NAs up to 3 consecutive NAs, and then keep the NAs from the 4th value on until a valid number.
Something like this.
library(zoo)
x <- zoo(1:20, Sys.Date() + 1:20)
x[c(2:4, 6:10, 13:18)] <- NA
x
2014-09-20 2014-09-21 2014-09-22 2014-09-23 2014-09-24 2014-09-25 2014-09-26
1 NA NA NA 5 NA NA
2014-09-27 2014-09-28 2014-09-29 2014-09-30 2014-10-01 2014-10-02 2014-10-03
NA NA NA 11 12 NA NA
2014-10-04 2014-10-05 2014-10-06 2014-10-07 2014-10-08 2014-10-09
NA NA NA NA 19 20
Desired output, will be something with variable n = 3 is
2014-09-20 2014-09-21 2014-09-22 2014-09-23 2014-09-24 2014-09-25 2014-09-26
1 1 1 1 5 5 5
2014-09-27 2014-09-28 2014-09-29 2014-09-30 2014-10-01 2014-10-02 2014-10-03
5 NA NA 11 12 12 12
2014-10-04 2014-10-05 2014-10-06 2014-10-07 2014-10-08 2014-10-09
12 NA NA NA 19 20
I have tried lot of combination with na.locf(x, maxgap = 3) etc without much success. I can create a loop to get the desired output, I was wondering whether there is vectorized way of achieving this.
fillInTheBlanks <- function(v, n=3) {
result <- v
counter0 <- 1
for(i in 2:length(v)) {
value <- v[i]
if (is.na(value)) {
if (counter0 > n) {
result[i] <- v[i]
} else {
result[i] <- result[i-1]
counter0 <- counter0 + 1
} }
else {
result[i] <- v[i]
counter0 <- 1
}
}
return(result)
}
Thanks

Here's another way:
l <- cumsum(! is.na(x))
c(NA, x[! is.na(x)])[replace(l, ave(l, l, FUN=seq_along) > 4, 0) + 1]
# [1] 1 1 1 1 5 5 5 5 NA NA 11 12 12 12 12 NA NA NA 19 20
edit: my previous answer required that x have no duplicates. The current answer does not.
benchmarks
x <- rep(x, length.out=1e4)
plourde <- function(x) {
l <- cumsum(! is.na(x))
c(NA, x[! is.na(x)])[replace(l, ave(l, l, FUN=seq_along) > 4, 0) + 1]
}
agstudy <- function(x) {
unlist(sapply(split(coredata(x),cumsum(!is.na(x))),
function(sx){
if(length(sx)>3)
sx[2:4] <- rep(sx[1],3)
else sx <- rep(sx[1],length(sx))
sx
}))
}
microbenchmark(plourde(x), agstudy(x))
# Unit: milliseconds
# expr min lq median uq max neval
# plourde(x) 5.30 5.591 6.409 6.774 57.13 100
# agstudy(x) 16.04 16.249 16.454 17.516 20.64 100

And another idea that, unless I've missed something, seems valid:
na_locf_until = function(x, n = 3)
{
wnn = which(!is.na(x))
inds = sort(c(wnn, (wnn + n+1)[which((wnn + n+1) < c(wnn[-1], length(x)))]))
c(rep(NA, wnn[1] - 1),
as.vector(x)[rep(inds, c(diff(inds), length(x) - inds[length(inds)] + 1))])
}
na_locf_until(x)
#[1] 1 1 1 1 5 5 5 5 NA NA 11 12 12 12 12 NA NA NA 19 20

Without using na.locf, but the idea is to split your xts by group of non missing values, then for each group replacing only the 3 first values (after the non misssing one) with the first value. It is a loop , but since it is only applied on group , it should be faster than a simple loop over all the values.
zz <-
unlist(sapply(split(coredata(x),cumsum(!is.na(x))),
function(sx){
if(length(sx)>3)
sx[2:4] <- rep(sx[1],3)
else sx <- rep(sx[1],length(sx))
sx
}))
## create the zoo object since , the latter algorithm is applied only to the values
zoo(zz,index(x))
2014-09-20 2014-09-21 2014-09-22 2014-09-23 2014-09-24 2014-09-25 2014-09-26 2014-09-27 2014-09-28 2014-09-29 2014-09-30 2014-10-01 2014-10-02
1 1 1 1 5 5 5 5 NA NA 11 12 12
2014-10-03 2014-10-04 2014-10-05 2014-10-06 2014-10-07 2014-10-08 2014-10-09
12 12 NA NA NA 19 20

The cleanest way to implement this in data.table is probably using the join syntax:
na.omit(dt)[dt, on = .(date), roll = +3, .(date, x_filled = x, x = i.x)]
date x_filled x
1: 2019-02-14 1 1
2: 2019-02-15 1 NA
3: 2019-02-16 1 NA
4: 2019-02-17 1 NA
5: 2019-02-18 5 5
6: 2019-02-19 5 NA
7: 2019-02-20 5 NA
8: 2019-02-21 5 NA
9: 2019-02-22 NA NA
10: 2019-02-23 NA NA
11: 2019-02-24 11 11
12: 2019-02-25 12 12
13: 2019-02-26 12 NA
14: 2019-02-27 12 NA
15: 2019-02-28 12 NA
16: 2019-03-01 NA NA
17: 2019-03-02 NA NA
18: 2019-03-03 NA NA
19: 2019-03-04 19 19
20: 2019-03-05 20 20
*This solution depends on the date columns and it being contiguous

From playing around in data.table comes this hacky solution:
np1 <- 3 + 1
dt[,
x_filled := x[c(rep(1, min(np1, .N)), rep(NA, max(0, .N - np1)))],
by = cumsum(!is.na(x))]
# Or slightly simplified:
dt[,
x_filled := ifelse(rowid(x) < 4, x[1], x[NA]),
by = cumsum(!is.na(x))]
> dt
date x x_filled
1: 2019-02-14 1 1
2: 2019-02-15 NA 1
3: 2019-02-16 NA 1
4: 2019-02-17 NA 1
5: 2019-02-18 5 5
6: 2019-02-19 NA 5
7: 2019-02-20 NA 5
8: 2019-02-21 NA 5
9: 2019-02-22 NA NA
10: 2019-02-23 NA NA
11: 2019-02-24 11 11
12: 2019-02-25 12 12
13: 2019-02-26 NA 12
14: 2019-02-27 NA 12
15: 2019-02-28 NA 12
16: 2019-03-01 NA NA
17: 2019-03-02 NA NA
18: 2019-03-03 NA NA
19: 2019-03-04 19 19
20: 2019-03-05 20 20
We build on the fact that subsetting vectors with NA returns NA.
Data/Packages
library(zoo)
library(data.table)
x <- zoo(1:20, Sys.Date() + 1:20)
x[c(2:4, 6:10, 13:18)] <- NA
dt <- data.table(date = index(x), x = as.integer(x))

Related

How to search upwards a column for a value based on whether another column is NA or not?

I need to find the previous date for which value is not NA and then also use the value on that row. I have tried to use shift, but I have met a problem because shift works well for row 9 but not for when there are consecutive non-NAs on type, such as on rows 5,6.
dtihave = data.table(date = as.Date(c("2020-01-01", "2020-02-01", "2020-03-03", "2020-04-02", "2020-05-09", "2020-06-10", "2020-07-18", "2020-08-23", "2020-09-09")),
type = c(1,1,NA,NA,1,1,NA,NA,1),
value = c(7,NA,6,8,NA,NA,5,9,NA))
> dtihave
date type value
1: 2020-01-01 1 7
2: 2020-02-01 1 NA
3: 2020-03-03 NA 6
4: 2020-04-02 NA 8
5: 2020-05-09 1 NA
6: 2020-06-10 1 NA
7: 2020-07-18 NA 5
8: 2020-08-23 NA 9
9: 2020-09-09 1 NA
dtiwant = data.table(date = as.Date(c("2020-01-01", "2020-02-01", "2020-03-03", "2020-04-02", "2020-05-09", "2020-06-10", "2020-07-18", "2020-08-23", "2020-09-09")),
type = c(1,1,NA,NA,1,1,NA,NA,1),
value = c(7,NA,6,8,NA,NA,5,9,NA),
iwantdate = c(NA, as.Date("2020-01-01"), NA, NA, as.Date("2020-04-02"), as.Date("2020-04-02"), NA, NA, as.Date("2020-08-23")),
iwantvalue = c(NA,7,NA,NA,8,8,NA,NA,9))
dtiwant[, iwantdate := as.Date(iwantdate, origin = "1970-01-01")]
> dtiwant
date type value iwantdate iwantvalue
1: 2020-01-01 1 7 <NA> NA
2: 2020-02-01 1 NA 2020-01-01 7
3: 2020-03-03 NA 6 <NA> NA
4: 2020-04-02 NA 8 <NA> NA
5: 2020-05-09 1 NA 2020-04-02 8
6: 2020-06-10 1 NA 2020-04-02 8
7: 2020-07-18 NA 5 <NA> NA
8: 2020-08-23 NA 9 <NA> NA
9: 2020-09-09 1 NA 2020-08-23 9
My current progress using shift, but I need row 6's iwantdate = "2020-04-02". The number of shifts I need to make is unknown, so I can not just use n=2 in shift.
dtprogress = copy(dtihave)
dtprogress[, iwantdate := ifelse(!is.na(type) & is.na(value), shift(date), NA)]
dtprogress[, iwantdate := ifelse(!is.na(type) & !is.na(value), date, iwantdate)]
dtprogress[, iwantdate := as.Date(iwantdate, origin = "1970-01-01")]
> dtprogress
date type value iwantdate
1: 2020-01-01 1 7 2020-01-01
2: 2020-02-01 1 NA 2020-01-01
3: 2020-03-03 NA 6 <NA>
4: 2020-04-02 NA 8 <NA>
5: 2020-05-09 1 NA 2020-04-02
6: 2020-06-10 1 NA 2020-05-09
7: 2020-07-18 NA 5 <NA>
8: 2020-08-23 NA 9 <NA>
9: 2020-09-09 1 NA 2020-08-23
You could do:
dtihave[, idx := cummax((!is.na(value)) * .I) * NA^!is.na(value)][,
c('want_date', 'want_value') := lapply(.SD, '[', idx),
.SDcols = c('date', 'value')][, idx:=NULL]
dtihave
date type value want_date want_value
1: 2020-01-01 1 7 <NA> NA
2: 2020-02-01 1 NA 2020-01-01 7
3: 2020-03-03 NA 6 <NA> NA
4: 2020-04-02 NA 8 <NA> NA
5: 2020-05-09 1 NA 2020-04-02 8
6: 2020-06-10 1 NA 2020-04-02 8
7: 2020-07-18 NA 5 <NA> NA
8: 2020-08-23 NA 9 <NA> NA
9: 2020-09-09 1 NA 2020-08-23 9
with tidyverse. Hopefully this solves the grouping. ie just add %>%group_by(...) before mutate and you are good to go
dtihave %>%
mutate(val_na = !is.na(value),
idx = nafill(na_if(row_number() * val_na, 0), "locf"),
idx = idx * NA ^ val_na,
date1 = date[idx], value1 = value[idx],
val_na = NULL, idx = NULL)
You can use lag to get previous values, e.g.
library(dplyr)
dtihave %>%
mutate(iwantdate = ifelse(is.na(value), lag(date), NA) %>% as.Date(., origin = "1970-01-01"),
iwantvalue = ifelse(is.na(value), lag(value), NA))
date type value iwantdate iwantvalue
1: 2020-01-01 1 7 <NA> NA
2: 2020-02-01 1 NA 2020-01-01 7
3: 2020-03-03 NA 6 <NA> NA
4: 2020-04-02 NA 8 <NA> NA
5: 2020-05-09 1 NA 2020-04-02 8
6: 2020-06-10 1 NA 2020-05-09 NA
7: 2020-07-18 NA 5 <NA> NA
8: 2020-08-23 NA 9 <NA> NA
9: 2020-09-09 1 NA 2020-08-23 9

R data.table replace missing value by next non missing value

I have a data.table which age column contain missing values and rdate is Date format. I want to replace missing age by finding the next non-missing age and rdate of each horsenum, then calculate the missing age by next non-missing age - ceiling year difference of non-missing rdate and this record' rdate. I assume next non-missing rdate is birthday so I use ceiling year difference. Also, I want to keep rdate.fill as Date format. How to write this in data.table code?
My idea of age.fill is calculate by this way, but I have error
library(lubridateļ¼‰
data[, rdate.fill := ifelse(is.na(age), as.Date(rdate[na.lacf(age)]), NA), by=horsenum]
data[, age.fill := ifelse(is.na(age), ind4- ceiling(time_length(difftime(rdate.fill, rdate, "years"), age), by=horsenum]
input
index rdate horsenum age ind4
1: 14704 2009-03-01 K123 NA 10
2: 14767 2009-03-01 K212 NA 9
3: 39281 2011-10-09 K123 NA 10
4: 39561 2011-10-19 K212 NA 9
5: 74560 2015-04-07 K212 NA 9
6: 77972 2015-09-06 K123 10 NA
7: 79111 2015-10-10 K212 9 NA
8: 84233 2016-03-28 K212 10 NA
structure(list(index = c(14704L, 14767L, 39281L, 39561L, 74560L,
77972L, 79111L, 84233L), rdate = structure(c(14304, 14304, 15256,
15266, 16532, 16684, 16718, 16888), class = "Date"), horsenum = c("K123",
"K212", "K123", "K212", "K212", "K123", "K212", "K212"), age = c(NA,
NA, NA, NA, NA, 10, 9, 10), ind4 = c(10, 9, 10, 9, 9, NA, NA,
NA)), row.names = c(NA, -8L), class = c("data.table", "data.frame"
), .internal.selfref = <pointer: 0x000002c5512f1ef0>)
output
index rdate horsenum age ind4 rdate.fill age.fill
1: 14704 2009-03-01 K123 NA 10 2015-09-06 3
2: 14767 2009-03-01 K212 NA 9 2015-10-10 2
3: 39281 2011-10-09 K123 NA 10 2015-09-06 6
4: 39561 2011-10-19 K212 NA 9 2015-10-10 5
5: 74560 2015-04-07 K212 NA 9 2015-10-10 8
6: 77972 2015-09-06 K123 10 NA 10
7: 79111 2015-10-10 K212 9 NA 9
8: 84233 2016-03-28 K212 10 NA 10
Not clear to me how age.fill is calculated differently for rows 2 and 4 as compared to rows 1 and 3 respectively. But this should get you closer to your needs:
library(data.table) #data.table_1.12.4
DT[, rdate.fill := nafill(fifelse(is.na(age), as.Date(NA), rdate), "nocb"), horsenum][,
age.fill := fifelse(is.na(age), ind4 - ceiling(lubridate::time_length(difftime(rdate.fill, rdate), "years")), age), horsenum]
output:
index rdate horsenum age ind4 rdate.fill age.fill
1: 14704 2009-03-01 K123 NA 10 2015-09-06 3
2: 14767 2009-03-01 K212 NA 9 2015-10-10 2
3: 39281 2011-10-09 K123 NA 10 2015-09-06 6
4: 39561 2011-10-19 K212 NA 9 2015-10-10 5
5: 74560 2015-04-07 K212 NA 9 2015-10-10 8
6: 77972 2015-09-06 K123 10 NA 2015-09-06 10
7: 79111 2015-10-10 K212 9 NA 2015-10-10 9
8: 84233 2016-03-28 K212 10 NA 2016-03-28 10
data[,age.fill := nafill(age,'nocb'),by=horsenum][,
rdate.fill:=ifelse(is.na(age),rdate[which.min(age.fill==age)],rdate),by=horsenum][,
age.fill:=unclass(age.fill - round((rdate.fill-rdate)/365))
]
index rdate horsenum age ind4 age.fill rdate.fill
1: 14704 2009-03-01 K123 NA 10 3 2015-09-06
2: 14767 2009-03-01 K212 NA 9 2 2015-10-10
3: 39281 2011-10-09 K123 NA 10 6 2015-09-06
4: 39561 2011-10-19 K212 NA 9 5 2015-10-10
5: 74560 2015-04-07 K212 NA 9 8 2015-10-10
6: 77972 2015-09-06 K123 10 NA 10 2015-09-06
7: 79111 2015-10-10 K212 9 NA 9 2015-10-10
8: 84233 2016-03-28 K212 10 NA 10 2016-03-28
Your algorithm systematically underestimates age. For example, horse K212's estimated age on 2015-04-07 (row 5) is 8. However, we know K212's age on 2016-03-28 is 10 (row 8), so K212 must be 9 on 2015-04-07, not 8. Here I address this problem by calculating an estimated birthdate from each non-NA rdate, then calculating the earliest estimated birthdate for each horse.
library(data.table)
data=data.table(index=c(14704L,14767L,39281L,39561L,74560L,77972L,79111L,84233L),rdate=structure(c(14304,14304,15256,15266,16532,16684,16718,16888),class="Date"),horsenum=c("K123","K212","K123","K212","K212","K123","K212","K212"),age=c(NA,NA,NA,NA,NA,10,9,10))
lt = data[!is.na(age),as.POSIXlt(rdate)]
lt$year = lt$year - data[!is.na(age),age]
data[!is.na(age),bday:=as.Date(lt)]
data[,bday:=min(bday,na.rm=T),horsenum]
data[,age.fill:=floor(as.numeric(rdate-bday)/365)]
data[order(index)]
Output:
index rdate horsenum age bday age.fill
1: 14704 2009-03-01 K123 NA 2005-09-06 3
2: 14767 2009-03-01 K212 NA 2006-03-28 2
3: 39281 2011-10-09 K123 NA 2005-09-06 6
4: 39561 2011-10-19 K212 NA 2006-03-28 5
5: 74560 2015-04-07 K212 NA 2006-03-28 9
6: 77972 2015-09-06 K123 10 2005-09-06 10
7: 79111 2015-10-10 K212 9 2006-03-28 9
8: 84233 2016-03-28 K212 10 2006-03-28 10
Note: this algorithm could be improved. Consider K212 is 9 on 2015-10-10 and 10 on 2016-03-28. This means that K212's actual birthday is after 10-10 and before 3-28. Instead of assuming 3-28, we could assume it is halfway between 10-10 and 3-28, or, more specifically, if there is more than one estimated birthdate, calculate both the max and the min possible birthdate for each horse, then find the date that's halfway between max - 1 year and min.
The approach below is slightly different:
It calculates, based on the given ages, the possible 'range' of the birthday from the horse. It then uses this window to calculate the minimum and maximum age a horse can haveon the given rdate.
So, the more infor you have in te horse's age, the smaller the window of possible birthdays, and the bigger the chance that the minimum estimates age equals the maximum estimated age (of they are the same, you know the age of the horse for sure )..
Here we go:
library( data.table )
library( lubridate ) #for the %m+% and %m-% operators
library( intervals ) #to calculate with intervals and find overlaps
library( eeptools ) #for age_calc function; calculating the age, given a date and a birthday (respects leap yaers, etc..)
#read sample data
DT <- fread("
index rdate horsenum age ind4
14704 2009-03-01 K123 NA 10
14767 2009-03-01 K212 NA 9
39281 2011-10-09 K123 NA 10
39561 2011-10-19 K212 NA 9
74560 2015-04-07 K212 NA 9
77972 2015-09-06 K123 10 NA
79111 2015-10-10 K212 9 NA
84233 2016-03-28 K212 10 NA")
#set dates as IDate
DT[, rdate := as.POSIXct(rdate) ]
#set keys
setkey( DT, horsenum, rdate, age )
#calculate bandwidth date of birth (dob) based on age and date
DT[!is.na( age ), dob_min := as.integer( rdate %m-% lubridate::years(age + 1) %m+% lubridate::days(1) ) ]
DT[!is.na( age ), dob_max := as.integer( rdate %m-% lubridate::years(age) ) ]
#function to get get overlap of birthday-intervals
myfun <- function( y ) {
all_intervals <- intervals::Intervals( as.matrix( y ), check_valid = TRUE )
int_min <- all_intervals[1]
for (i in 1:nrow(all_intervals) ) int_min <- interval_intersection( all_intervals[1], all_intervals[i] )
as.data.table( int_min )
}
#get range of possible date of birth for each horsenum
dob_range <- DT[ !is.na(age), myfun( .SD ), by = .(horsenum), .SDcols = c("dob_min", "dob_max") ]
dob_range <- dob_range[, .(horsenum, dob_from = as.POSIXct(V1, origin = "1970-01-01"),
dob_to = as.POSIXct(V2, origin = "1970-01-01"))]
#use found ranges of birthday to estimate ages
#first join dob-ranges by horsenum
DT[ dob_range, `:=`( dob_from = i.dob_from, dob_to = i.dob_to), on = .(horsenum)]
#now calculate ages (minimum and maximum)
DT[, age_min := floor( eeptools::age_calc( as.Date(dob_to), as.Date(rdate), units= "years" ) )]
DT[, age_max := floor( eeptools::age_calc( as.Date(dob_from), as.Date(rdate), units= "years" ) )]
#remove helper columns
DT[, `:=`( dob_min = NULL, dob_max = NULL, dob_from = NULL, dob_to = NULL)]
# index rdate horsenum age ind4 age_min age_max
# 1: 14704 2009-03-01 K123 NA 10 3 4
# 2: 39281 2011-10-09 K123 NA 10 6 7
# 3: 77972 2015-09-06 K123 10 NA 10 10
# 4: 14767 2009-03-01 K212 NA 9 2 3
# 5: 39561 2011-10-19 K212 NA 9 5 6
# 6: 74560 2015-04-07 K212 NA 9 9 9
# 7: 79111 2015-10-10 K212 9 NA 9 9
# 8: 84233 2016-03-28 K212 10 NA 10 10

R: Populate only next two NA values (grouped by individual) [duplicate]

Is there a way we can fill NAs in a zoo or xts object with limited number of NAs forward. In other words like fill NAs up to 3 consecutive NAs, and then keep the NAs from the 4th value on until a valid number.
Something like this.
library(zoo)
x <- zoo(1:20, Sys.Date() + 1:20)
x[c(2:4, 6:10, 13:18)] <- NA
x
2014-09-20 2014-09-21 2014-09-22 2014-09-23 2014-09-24 2014-09-25 2014-09-26
1 NA NA NA 5 NA NA
2014-09-27 2014-09-28 2014-09-29 2014-09-30 2014-10-01 2014-10-02 2014-10-03
NA NA NA 11 12 NA NA
2014-10-04 2014-10-05 2014-10-06 2014-10-07 2014-10-08 2014-10-09
NA NA NA NA 19 20
Desired output, will be something with variable n = 3 is
2014-09-20 2014-09-21 2014-09-22 2014-09-23 2014-09-24 2014-09-25 2014-09-26
1 1 1 1 5 5 5
2014-09-27 2014-09-28 2014-09-29 2014-09-30 2014-10-01 2014-10-02 2014-10-03
5 NA NA 11 12 12 12
2014-10-04 2014-10-05 2014-10-06 2014-10-07 2014-10-08 2014-10-09
12 NA NA NA 19 20
I have tried lot of combination with na.locf(x, maxgap = 3) etc without much success. I can create a loop to get the desired output, I was wondering whether there is vectorized way of achieving this.
fillInTheBlanks <- function(v, n=3) {
result <- v
counter0 <- 1
for(i in 2:length(v)) {
value <- v[i]
if (is.na(value)) {
if (counter0 > n) {
result[i] <- v[i]
} else {
result[i] <- result[i-1]
counter0 <- counter0 + 1
} }
else {
result[i] <- v[i]
counter0 <- 1
}
}
return(result)
}
Thanks
Here's another way:
l <- cumsum(! is.na(x))
c(NA, x[! is.na(x)])[replace(l, ave(l, l, FUN=seq_along) > 4, 0) + 1]
# [1] 1 1 1 1 5 5 5 5 NA NA 11 12 12 12 12 NA NA NA 19 20
edit: my previous answer required that x have no duplicates. The current answer does not.
benchmarks
x <- rep(x, length.out=1e4)
plourde <- function(x) {
l <- cumsum(! is.na(x))
c(NA, x[! is.na(x)])[replace(l, ave(l, l, FUN=seq_along) > 4, 0) + 1]
}
agstudy <- function(x) {
unlist(sapply(split(coredata(x),cumsum(!is.na(x))),
function(sx){
if(length(sx)>3)
sx[2:4] <- rep(sx[1],3)
else sx <- rep(sx[1],length(sx))
sx
}))
}
microbenchmark(plourde(x), agstudy(x))
# Unit: milliseconds
# expr min lq median uq max neval
# plourde(x) 5.30 5.591 6.409 6.774 57.13 100
# agstudy(x) 16.04 16.249 16.454 17.516 20.64 100
And another idea that, unless I've missed something, seems valid:
na_locf_until = function(x, n = 3)
{
wnn = which(!is.na(x))
inds = sort(c(wnn, (wnn + n+1)[which((wnn + n+1) < c(wnn[-1], length(x)))]))
c(rep(NA, wnn[1] - 1),
as.vector(x)[rep(inds, c(diff(inds), length(x) - inds[length(inds)] + 1))])
}
na_locf_until(x)
#[1] 1 1 1 1 5 5 5 5 NA NA 11 12 12 12 12 NA NA NA 19 20
Without using na.locf, but the idea is to split your xts by group of non missing values, then for each group replacing only the 3 first values (after the non misssing one) with the first value. It is a loop , but since it is only applied on group , it should be faster than a simple loop over all the values.
zz <-
unlist(sapply(split(coredata(x),cumsum(!is.na(x))),
function(sx){
if(length(sx)>3)
sx[2:4] <- rep(sx[1],3)
else sx <- rep(sx[1],length(sx))
sx
}))
## create the zoo object since , the latter algorithm is applied only to the values
zoo(zz,index(x))
2014-09-20 2014-09-21 2014-09-22 2014-09-23 2014-09-24 2014-09-25 2014-09-26 2014-09-27 2014-09-28 2014-09-29 2014-09-30 2014-10-01 2014-10-02
1 1 1 1 5 5 5 5 NA NA 11 12 12
2014-10-03 2014-10-04 2014-10-05 2014-10-06 2014-10-07 2014-10-08 2014-10-09
12 12 NA NA NA 19 20
The cleanest way to implement this in data.table is probably using the join syntax:
na.omit(dt)[dt, on = .(date), roll = +3, .(date, x_filled = x, x = i.x)]
date x_filled x
1: 2019-02-14 1 1
2: 2019-02-15 1 NA
3: 2019-02-16 1 NA
4: 2019-02-17 1 NA
5: 2019-02-18 5 5
6: 2019-02-19 5 NA
7: 2019-02-20 5 NA
8: 2019-02-21 5 NA
9: 2019-02-22 NA NA
10: 2019-02-23 NA NA
11: 2019-02-24 11 11
12: 2019-02-25 12 12
13: 2019-02-26 12 NA
14: 2019-02-27 12 NA
15: 2019-02-28 12 NA
16: 2019-03-01 NA NA
17: 2019-03-02 NA NA
18: 2019-03-03 NA NA
19: 2019-03-04 19 19
20: 2019-03-05 20 20
*This solution depends on the date columns and it being contiguous
From playing around in data.table comes this hacky solution:
np1 <- 3 + 1
dt[,
x_filled := x[c(rep(1, min(np1, .N)), rep(NA, max(0, .N - np1)))],
by = cumsum(!is.na(x))]
# Or slightly simplified:
dt[,
x_filled := ifelse(rowid(x) < 4, x[1], x[NA]),
by = cumsum(!is.na(x))]
> dt
date x x_filled
1: 2019-02-14 1 1
2: 2019-02-15 NA 1
3: 2019-02-16 NA 1
4: 2019-02-17 NA 1
5: 2019-02-18 5 5
6: 2019-02-19 NA 5
7: 2019-02-20 NA 5
8: 2019-02-21 NA 5
9: 2019-02-22 NA NA
10: 2019-02-23 NA NA
11: 2019-02-24 11 11
12: 2019-02-25 12 12
13: 2019-02-26 NA 12
14: 2019-02-27 NA 12
15: 2019-02-28 NA 12
16: 2019-03-01 NA NA
17: 2019-03-02 NA NA
18: 2019-03-03 NA NA
19: 2019-03-04 19 19
20: 2019-03-05 20 20
We build on the fact that subsetting vectors with NA returns NA.
Data/Packages
library(zoo)
library(data.table)
x <- zoo(1:20, Sys.Date() + 1:20)
x[c(2:4, 6:10, 13:18)] <- NA
dt <- data.table(date = index(x), x = as.integer(x))

Day difference between rows related to other row that is not NA

Let I have such a data frame(df):
Date x
20.01.2016 34
21.01.2016 28
22.01.2016 NA
23.01.2016 NA
24.01.2016 56
25.01.2016 NA
26.01.2016 28
I want to add such a column(z) to this data frame
Date x z
20.01.2016 34 -
21.01.2016 28 1
22.01.2016 NA NA
23.01.2016 NA NA
24.01.2016 56 3
25.01.2016 NA NA
26.01.2016 28 2
where z shows the day difference between the related row's date and closest previous date (where x is not NA).
For example for the date 24.01.2016 the closest previous date is 21.01.2016 where x is not NA. So the day difference of these two dates is 3.
How can I do this using R?
I will be very glad for any help. Thanks a lot.
Cinsidering that your date variable is as.Date,(i.e. df$Date <- as.Date(df$Date, format = '%d.%m.%Y')) then,
df$z[!is.na(df$x)] <- c(NA, diff.difftime(df$Date[!is.na(df$x)]))
df
# Date x z
#1 2016-01-20 34 NA
#2 2016-01-21 28 1
#3 2016-01-22 NA NA
#4 2016-01-23 NA NA
#5 2016-01-24 56 3
#6 2016-01-25 NA NA
#7 2016-01-26 28 2
We can use data.table
library(data.table)
setDT(df)[, Date := as.IDate(Date, "%d.%m.%Y")][!is.na(x), z := Date - shift(Date)]
df
# Date x z
#1: 2016-01-20 34 NA
#2: 2016-01-21 28 1
#3: 2016-01-22 NA NA
#4: 2016-01-23 NA NA
#5: 2016-01-24 56 3
#6: 2016-01-25 NA NA
#7: 2016-01-26 28 2

Find row of the next instance of the value in R

I have two columns Time and Event. There are two events A and B. Once an event A takes place, I want to find when the next event B occurs. Column Time_EventB is the desired output.
This is the data frame:
df <- data.frame(Event = sample(c("A", "B", ""), 20, replace = TRUE), Time = paste("t", seq(1,20)))
What is the code in R for finding the next instance of a value (B in this case)?
What is the code for once the instance of B is found, return the value of the corresponding Time Column?
The code should be something like this:
data$Time_EventB <- ifelse(data$Event == "A", <Code for returning time of next instance of B>, "")
In Excel this can be done using VLOOKUP.
Here's a simple solution:
set.seed(1)
df <- data.frame(Event = sample(c("A", "B", ""),size=20, replace=T), time = 1:20)
as <- which(df$Event == "A")
bs <- which(df$Event == "B")
next_b <- sapply(as, function(a) {
diff <- bs-a
if(all(diff < 0)) return(NA)
bs[min(diff[diff > 0]) == diff]
})
df$next_b <- NA
df$next_b[as] <- df$time[next_b]
> df
Event time next_b
1 A 1 2
2 B 2 NA
3 B 3 NA
4 4 NA
5 A 5 8
6 6 NA
7 7 NA
8 B 8 NA
9 B 9 NA
10 A 10 14
11 A 11 14
12 A 12 14
13 13 NA
14 B 14 NA
15 15 NA
16 B 16 NA
17 17 NA
18 18 NA
19 B 19 NA
20 20 NA
Here's an attempt using a "rolling join" from the data.table package:
library(data.table)
setDT(df)
df[Event=="B", .(time, nextb=time)][df, on="time", roll=-Inf][Event != "A", nextb := NA][]
# time nextb Event
# 1: 1 2 A
# 2: 2 NA B
# 3: 3 NA B
# 4: 4 NA
# 5: 5 8 A
# 6: 6 NA
# 7: 7 NA
# 8: 8 NA B
# 9: 9 NA B
#10: 10 14 A
#11: 11 14 A
#12: 12 14 A
#13: 13 NA
#14: 14 NA B
#15: 15 NA
#16: 16 NA B
#17: 17 NA
#18: 18 NA
#19: 19 NA B
#20: 20 NA
Using data as borrowed from #thc

Resources