I have a data frame with dates and numbers called 'df'. I have another data frame with start and end dates called 'date_ranges'.
My goal is to filter/subset df so that it only shows for the start/end dates in each row of the date_ranges column. Here is my code so far:
df_date <- as.Date((as.Date('2010-01-01'):as.Date('2010-04-30')))
df_numbers <- c(1:120)
df <- data.frame(df_date, df_numbers)
start_dates <- as.Date(c("2010-01-06", "2010-02-01", '2010-04-15'))
end_dates <- as.Date(c("2010-01-23", "2010-02-06", '2010-04-29'))
date_ranges <- data.frame(start_dates, end_dates)
# Attempting to filter df by start and end dates
for (i in range(date_ranges$start_dates)){
for (j in range(date_ranges$end_dates)){
print (
df %>%
filter(between(df_date, i, j)))
}
}
The first and third result of the nested for loop is what I want, but not the second result. The first and third give me the dates and values for df between their respective rows, but the second result is the range from the earliest date to the latest date. How can I fix this loop to exclude the second result?
A tidyverse approach could be to create a sequence between start and end_dates and join with df to keep only the dates which lie in the range.
library(dplyr)
date_ranges %>%
mutate(df_date = purrr::map2(start_dates, end_dates, seq, "day")) %>%
tidyr::unnest(df_date) %>%
select(-start_dates, -end_dates) %>%
left_join(df, by = 'df_date')
# A tibble: 39 x 2
# df_date df_numbers
# <date> <int>
# 1 2010-01-06 6
# 2 2010-01-07 7
# 3 2010-01-08 8
# 4 2010-01-09 9
# 5 2010-01-10 10
# 6 2010-01-11 11
# 7 2010-01-12 12
# 8 2010-01-13 13
# 9 2010-01-14 14
#10 2010-01-15 15
# … with 29 more rows
You can try looping through index
for (i in seq_along(date_ranges$start_dates)){
print (
df %>%
filter(between(df_date, date_ranges$start_dates[i], date_ranges$end_dates[i])))
}
Base R solution:
# Your data creation can be simplified:
df <- data.frame(df_date = seq.Date(as.Date('2010-01-01', "%Y-%m-%d"), as.Date('2010-04-30', "%Y-%m-%d"),
by = 1), df_numbers = c(1:120))
# Store start and end date vectors to filter the data.frame:
start_dates <- as.Date(c("2010-01-06", "2010-02-01", '2010-04-15'))
end_dates <- as.Date(c("2010-01-23", "2010-02-06", '2010-04-29'))
# Subset the data to extract records with matching dates: df => stdout (Console
df[df$df_date %in% c(start_dates, end_dates),]
Related
I want to count the number of rows within a certain time range based on each row after grouping by id. For instance, let us say a 1-month window around each datetime entry in the column "cleaned_date".
head(data$cleaned_date)
[1] "2004-10-11 CDT" "2008-09-10 CDT" "2011-10-25 CDT" "2011-12-31 CST"
The dates are in POSIXct format.
For the first entry, I need to count the number of rows within the time from 2004-09-11 to 2004-11-11, for the second entry, count the number of rows within the time from 2008-08-10 to 2008-10-10, so on and so forth.
I used roughly the following code
data %>% group_by(id) %>% filter(cleaned_date %within% interval(cleaned_date - 24 * 60 * 60 * 30, cleaned_date + 24 * 60 * 60 * 30)) %>% mutate(counts = n())
But it does not seem to work and I got counts as an empty column. Any help would be appreciated, thanks!
A reproducible example can be the following:
The input is
cleaned_date id
1 2008-09-11 A
2 2008-09-10 B
3 2008-09-30 B
4 2011-10-25 A
5 2011-11-14 A
And I want the output to be
cleaned_date id counts
1 2008-09-11 A 1
2 2008-09-10 B 2
3 2008-09-30 B 2
4 2011-10-25 A 2
5 2011-11-14 A 2
For the first entry, I want to count the rows in the timeframe 2008-08-11 to 2008-10-11, the second entry seems to satisfy but we need to group by "id", so it does not count. For the second entry I want to count the rows in the timeframe 2008-08-10 to 2008-10-10, rows 2 and 3 satisfy, so the counts is 2. For the third entry I want to count the rows in the timeframe 2008-08-30 to 2008-10-30, rows 2 and 3 satisfy again, so on and so forth.
Note that the actual dataset I would like to operate on has millions of rows, so it might be more efficient to use tidyverse rather than base R.
Perhaps not the most elegant solution.
# input data. Dates as character vector
input = data.frame(
cleaned_date = c("2008-09-11", "2008-09-10", "2008-09-30", "2011-10-25", "2011-11-14"),
id = c("A", "B", "B", "A", "A")
)
# function to create a date window n months around specified date
window <- function(x, n = 1){
x <- rep(as.POSIXlt(x),2)
x[1]$mon <- x[1]$mon - n
x[2]$mon <- x[2]$mon + n
return(format(seq(from = x[1], to = x[2], by = "day"), format="%Y-%m-%d"))
}
# find counts for each row
input$counts <- unlist(lapply(1:nrow(input), function(x){
length(which((input$cleaned_date %in% window(input$cleaned_date[x])) & input$id == input$id[x]))
}))
input
cleaned_date id counts
1 2008-09-11 A 1
2 2008-09-10 B 2
3 2008-09-30 B 2
4 2011-10-25 A 2
5 2011-11-14 A 2
Edit for large datasets:
# dummy dataset with 1,000,000 rows
years <- c(2000:2020)
months <- c(1:12)
days <- c(1:20)
n <- 1000000
dates <- paste(sample(years, size = n, replace = T), sample(months, size = n, replace = T), sample(days, size = n, replace = T), sep = "-")
groups <- sample(c("A","B","C"), size = n, replace = T)
input <- data.frame(
cleaned_date = dates,
id = groups
)
input$cleaned_date <- format(as.POSIXlt(input$cleaned_date), format="%Y-%m-%d")
# optional, sort data by date for small boost in performance
input <- input[order(input$cleaned_date),]
counts <- NULL
#pb <- progress::progress_bar$new(total = length(unique(input$cleaned_date)))
t1 <- Sys.time()
# split up vectorization for each unique date.
for(date in unique(input$cleaned_date)){
#pb$tick()
w <- window(date)
tmp <- input[which(input$cleaned_date %in% w),]
tmp_counts <- unlist(lapply(which(tmp$cleaned_date == date), function(x){
length(which(tmp$id == tmp$id[x]))
}))
counts <- c(counts, tmp_counts)
}
# add counts to dataset
input$counts <- counts
# optional, re-order data to original format
input <- input[order(as.numeric(rownames(input))),]
print(Sys.time() - t1)
Time difference of 3.247204 mins
If you want to go faster, you can run the loop in parallel
library(foreach)
library(doParallel)
cores=detectCores()
cl <- makeCluster(cores[1]-1)
registerDoParallel(cl)
dates = unique(input$cleaned_date)
t1 <- Sys.time()
counts <- foreach(i=1:length(dates), .combine= "c") %dopar% {
w <- window(dates[i])
tmp <- input[which(input$cleaned_date %in% w),]
tmp_counts <- unlist(lapply(which(tmp$cleaned_date == dates[i]), function(x){
length(which(tmp$id == tmp$id[x]))
}))
tmp_counts
}
stopCluster(cl)
input$counts <- counts
input <- input[order(as.numeric(rownames(input))),]
print(Sys.time() - t1)
Time difference of 37.37211 secs
Note, I'm running this on a MacBook Pro with a 2.3 GHz Quad-Core Intel Core i7 and 16 GB of RAM.
It is still hard to determine exactly what you're trying to accomplish, but this will at least get you counts for a specified date range:
df %>%
group_by(id) %>%
filter(cleaned_date >= "2008-08-11" & cleaned_date <= "2008-10-11") %>%
mutate(counts = n())
Will give us:
cleaned_date id counts
<date> <chr> <int>
1 2008-09-11 A 1
2 2008-09-10 B 2
3 2008-09-30 B 2
I have 900 files named like 20120412_bwDD2yYa.txt. The first part up to the _ is in the year-month-day format. Some days have multiple files associated with them.
I'd like to use the dates extracted from the file names as data to compile a timeseries where the dates are the x axis and the number of files are the y axis.
How can I do this?
Here is a solution with Base R. Since the question does not include a reproducible example, we'll simulate the file names, parse out the dates, and create the counts by date.
# use list.files() to extract files from directory
files <- list.files(path="./data",pattern="*.txt",full.names = FALSE)
# simulate result from list.files()
files <- c("20120101_aaa.txt","20120101_bbb.txt","20120102_ccc.txt")
# extract dates from file names
date <- as.Date(substr(files,1,8),"%Y%m%d")
df <- data.frame(date,count = rep(1,length(date)))
aggregate(count ~ date,data = df, sum)
...and the output:
date count
1 2012-01-01 2
2 2012-01-02 1
dplyr solution
A solution with dplyr::summarise() looks like this:
files <- list.files(path="./data",pattern="*.txt",full.names = FALSE)
# simulate result from list.files()
files <- c("20120101_aaa.txt","20120101_bbb.txt","20120102_ccc.txt")
library(dplyr)
data.frame(date=as.Date(substr(files,1,8),"%Y%m%d")) %>%
group_by(date) %>% summarise(count = n())
# A tibble: 2 x 2
date count
<date> <int>
1 2012-01-01 2
2 2012-01-02 1
Accounting for dates with no files
In response to a comment on my answer, here is a solution that fills in gaps in the file list where there are days with 0 files. We take the minimum and maximum dates from the file list and create a data frame containing the sequence of dates. Then we left_join() this with the previously aggregated data, and recode NA values for count to 0.
# create a gap in dates with files
files <- c("20120101_aaa.txt","20120101_bbb.txt","20120102_ccc.txt",
"20120104_aaa.txt","20120104_aab.txt","20120104_aac.txt")
library(dplyr)
data.frame(date=as.Date(substr(files,1,8),"%Y%m%d")) %>%
group_by(date) %>% summarise(count = n()) -> fileCounts
# create df with all dates, left_join() and recode NA to 0
data.frame(date = as.Date(min(fileCounts$date):max(fileCounts$date),
origin = "1970-01-01")) %>%
left_join(.,fileCounts) %>%
mutate(count = if_else(is.na(count),0,as.numeric(count)))
...and the output:
Joining, by = "date"
date count
1 2012-01-01 2
2 2012-01-02 1
3 2012-01-03 0
4 2012-01-04 3
You can use table to count frequencies and then stack it to get a dataframe.
Using #Len Greski's files.
files <- c("20120101_aaa.txt","20120101_bbb.txt","20120102_ccc.txt")
stack(table(as.Date(sub('_.*', '', files),"%Y%m%d")))[2:1]
# ind values
#1 2012-01-01 2
#2 2012-01-02 1
I need to sum two times series tables`
df1 <- data.frame(date =c("20191228","20191229","20191230","20191231"), value=c(1,2,3,4), income= c(12,14,51,12))
df1$date<-ymd(df1$date)
df1ts <- tk_xts(df1,data_var =date)
df2 <- data.frame(date =c("20191226","20191227","20191228","20191229"), value=c(4,5,6,7), income= c(14,11,53,13))
df2$date<-ymd(df2$date)
df2ts <- tk_xts(df2,data_var =date)
I want to get this
df3ts value
2019-12-26 4
2019-12-27 5
2019-12-28 7
2019-12-29 10
2019-12-30 3
2019-12-31 4
What kind of funtion I need to apply to get this?
You can rbind the dataframes and use aggregate.
library("tidyverse")
rbind(df1,df2) %>%
aggregate(. ~ date, ., FUN=sum)
If you only want the column 'value', replace the first '.' with 'value'.
I have this dataframe
Data <- c("2013-08-03", "2013-09-04", "2013-09-08", "2013-09-12", "2013-11-01")
prezzi <- c(1,2,3,4,5)
t <- data.frame(Data, prezzi)
Time <- "2013-09-11"
I want filter the dataframe Data by previous near date to a given date Time
The output is
Data Prezzi
2013-09-08 3
I use this code filter(Data == t$Data[which.min(abs(Time - as.Date(t$Data)))])
and it doesn't work. The result is 2013-09-12 but I want previous near date (2013-09-08.
We can subtract 1 from the which.min. If the 'Data' is not arranged, we may need to the order it first
t1 <- t[order(t$as.Date(Data)),]
i1 <- with(t1, which.min(abs(as.Date(Time) - as.Date(Data)))-1)
t1[i1, , drop = FALSE]
# Data prezzi
#3 2013-09-08 3
If we are using dplyr
library(dplyr)
t %>%
mutate(Data = as.Date(Data)) %>%
arrange(Data) %>%
slice(which.min(abs(as.Date(Time) - Data)) - 1)
# Data prezzi
#1 2013-09-08 3
Update
If the OP is not considering based on previous row, but just the rows that is from a minimal difference from 'Time', then use findInterval
Time <- "2013-09-09"
t[findInterval(as.Date(Time), as.Date(t$Data)),]
# Data prezzi
#3 2013-09-08 3
Less straightforward than #akrun's answer, you can filter first for date below "Time" and then keep the maximum date in the subset:
library(dplyr)
t %>% filter(as.Date(Data) < as.Date(Time)) %>%
filter(as.Date(Data) == max(as.Date(Data)))
Data prezzi
1 2013-09-08 3
1) Using base R only this gives the largest date prior to Time:
tail(subset(t, Time > format(Data)), 1)
## Data prezzi
## 3 2013-09-08 3
2) or using dplyr:
t %>%
filter(Time > format(Data)) %>%
slice(n())
3) or using sqldf:
library(sqldf)
fn$sqldf("select max(Data) Data, prezzi from t
where '$Time' > Data")
## Data prezzi
## 1 2013-09-08 3
One way in base R would be :
#Convert to date class
t$Data <- as.Date(t$Data)
Time <- as.Date("2013-09-11")
#Get difference between date and time
inds <- t$Data - Time
#Select the one with minimum difference
t[inds == max(inds[inds < 0 ]), ]
# Data prezzi
#3 2013-09-08 3
I have 2 dataframes, one representing daily sales figures of different stores (df1) and one representing when each store has been audited (df2). I need to create a new dataframe displaying sales information from each site taken 1 week before each audit (i.e. the information in df2). Some example data, firstly for the daily sales figures from different stores across a certain period:
Dates <- as.data.frame(seq(as.Date("2015/12/30"), as.Date("2016/4/7"),"day"))
Sales <- as.data.frame(matrix(sample(0:50, 30*10, replace=TRUE), ncol=3))
df1 <- cbind(Dates,Sales)
colnames(df1) <- c("Dates","Site.A","Site.B","Site.C")
And for the dates of each audit across different stores:
Store<- c("Store.A","Store.A","Store.B","Store.C","Store.C")
Audit_Dates <- as.data.frame(as.POSIXct(c("2016/1/4","2016/3/1","2016/2/1","2016/2/1","2016/3/1")))
df2 <- as.data.frame(cbind(Store,Audit_Dates ))
colnames(df2) <- c("Store","Audit_Dates")
Of note is that there will be an uneven amount of dates within each output (i.e. there may not be a full weeks worth of information prior to some store audits). I have previously asked a question addressing a similar problem Creating a dataframe from an lapply function with different numbers of rows. Below shows an answer from this which would work for an example if I was to consider information from only 1 store:
library(lubridate)
##Data input
Store.A_Dates <- as.data.frame(seq(as.Date("2015/12/30"), as.Date("2016/4/7"),"day"))
Store.A_Sales <- as.data.frame(matrix(sample(0:50, 10*10, replace=TRUE), ncol=1))
Store.A_df1 <- cbind(Store.A_Dates,Store.A_Sales)
colnames(Store.A_df1) <- c("Store.A_Dates","Store.A_Sales")
Store.A_df2 <- as.Date(c("2016/1/3","2016/3/1"))
##Output
Store.A_output<- lapply(Store.A_df2, function(x) {Store.A_df1[difftime(Store.A_df1[,1], x - days(7)) >= 0 & difftime(Store.A_df1[,1], x) <= 0, ]})
n1 <- max(sapply(Store.A_output, nrow))
output <- data.frame(lapply(Store.A_output, function(x) x[seq_len(n1),]))
But I don't know how I would get this for multiple sites.
Try this:
# Renamed vars for my convenience...
colnames(df1) <- c("t","Store.A","Store.B","Store.C")
colnames(df2) <- c("Store","t")
library(tidyr)
library(dplyr)
# Gather df1 so that df1 and df2 have the same format:
df1 = gather(df1, Store, Sales, -t)
head(df1)
t Store Sales
1 2015-12-30 Store.A 16
2 2015-12-31 Store.A 24
3 2016-01-01 Store.A 8
4 2016-01-02 Store.A 42
5 2016-01-03 Store.A 7
6 2016-01-04 Store.A 46
# This lapply call does not iterate over actual values, just indexes, which allows
# you to subset the data comfortably:
r <- lapply(1:nrow(df2), function(i) {
audit.t = df2[i, "t"] #time of audit
audit.s = df1[, "Store"] == df2[i, "Store"] #store audited
df = df1[audit.s, ] #data from audited store
df[, "audited"] = audit.t #add extra column with audit date
week_before = difftime(df[, "t"], audit.t - (7*24*3600)) >= 0
week_audit = difftime(df[, "t"], audit.t) <= 0
df[week_before & week_audit, ]
})
Does this give you the proper subsets?
Also, to summarise your results:
r = do.call("rbind", r) %>%
group_by(audited, Store) %>%
summarise(sales = sum(Sales))
r
audited Store sales
<time> <chr> <int>
1 2016-01-04 Store.A 97
2 2016-02-01 Store.B 156
3 2016-02-01 Store.C 226
4 2016-03-01 Store.A 115
5 2016-03-01 Store.C 187