R loop/lapply, cumulative totals with group by - r

I am trying to create new variables in my data set that are cumulative totals which restart based on other variables (using group by)… I want these to be new columns in the data set and this is the part I am struggling with...
Using the data below, I want to create cumulative Sale and Profit columns that will restart for every Product and Product_Cat grouping.
The below code partly gives me what I need, but the variables are not new variables, instead it overwrites the existing Sale/Profit... what am I getting wrong? I imagine this is simple haven't found anything.
Note: I'm using lapply as my real data set has 40+ varbs that I need to create calculations for.
DT <- setDT(Data)[,lapply(.SD, cumsum), by = .(Product,Product_Cat) ]
Data for example:
Product <- c('A','A','A','B','B','B','C','C','C')
Product_Cat <- c('S1','S1','S2','C1','C1','C1','D1','E1','F1')
Sale <- c(10,15,5,20,15,10,5,5,5)
Profit <- c(2,4,2,6,8,2,4,6,8)
Sale_Cum <- c(10,25,5,20,35,45,5,5,5)
Profit_Cum <- c(2,6,2,6,14,16,4,6,8)
Data <- data.frame(Product,Product_Cat,Sale,Profit)
Desired_Data <- data.frame(Product,Product_Cat,Sale,Profit,Sale_Cum,Profit_Cum)

This doesn't use the group by per se but I think it achieves what you're looking for in that it is easily extensible to many columns:
D2 <- data.frame(lapply(Data[,c(3,4)], cumsum))
names(D2) <- gsub("$", "_cum", names(Data[,c(3,4)]))
Data <- cbind(Data, D2)
If you have 40+ columns just change the c(3,4) to include all the columns you're after.
EDIT:
I forgot that the OP wanted it to reset for each category. In that case, you can modify your original code:
DT <- setDT(Data)[,lapply(.SD, cumsum), by = .(Product,Product_Cat) ]
names(D2)[c(-1,-2)] <- gsub("$", "_cum", names(Data)[c(-1,-2)])
cbind(Data, D2[,c(-1,-2)])

library(data.table)
setDT(Data)
cols <- names(Data)[3:4]
Data[, paste0(cols, '_cumsum') := lapply(.SD, cumsum)
, by = .(Product, Product_Cat)
, .SDcols = cols]

Data:
structure(list(Product = structure(c(1L, 1L, 1L, 2L, 2L, 2L,
3L, 3L, 3L), .Label = c("A", "B", "C"), class = "factor"), Product_Cat = structure(c(5L,
5L, 6L, 1L, 1L, 1L, 2L, 3L, 4L), .Label = c("C1", "D1", "E1",
"F1", "S1", "S2"), class = "factor"), Sale = c(10L, 15L, 5L,
20L, 15L, 10L, 5L, 5L, 5L), Profit = c(2L, 4L, 2L, 6L, 8L, 2L,
4L, 6L, 8L), Sale_Cum = c(10, 25, 5, 20, 35, 45, 5, 5, 5), Profit_Cum = c(2,
6, 2, 6, 14, 16, 4, 6, 8)), .Names = c("Product", "Product_Cat",
"Sale", "Profit", "Sale_Cum", "Profit_Cum"), row.names = c(NA,
-9L), class = "data.frame")`
We can iteratively slice the dataframe based on Product and Product_Cat, and for each iteration, assign the output produced by cumsum() to Sale_Cum and Product_Cum:
cols <- c('Sale', 'Profit')
for (column in cols){
x[, paste0(column, '_Cum')] <- 0
for(p in unique(x$Product)){
for (pc in unique(x$Product_Cat)){
x[x$Product == p & x$Product_Cat == pc, paste0(column, '_Cum')] <- cumsum(x[x$Product == p & x$Product_Cat == pc, column])
}
}
}
print(x)
# Product Product_Cat Sale Profit Sale_Cum Profit_Cum
# 1 A S1 10 2 10 2
# 2 A S1 15 4 25 6
# 3 A S2 5 2 5 2
# 4 B C1 20 6 20 6
# 5 B C1 15 8 35 14
# 6 B C1 10 2 45 16
# 7 C D1 5 4 5 4
# 8 C E1 5 6 5 6
# 9 C F1 5 8 5 8

Here is some pretty poor code that does everything step by step
#sample data
d<-sample(1:10)
f<-sample(1:10)
p<-c("f","f","f","f","q","q","q","w","w","w")
pc<-c("c","c","d","d","d","v","v","v","b","b")
cc<-data.table(p,pc,d,f)
#storing the values that are overwritten first.
three<-cc[,3]
four<- cc[,4]
#applying your function
dt<-setDT(c)[,lapply(.SD,cumsum), by=.(p,pc)]
#binding the stored values to your function and renaming everything.
x<-cbind(dt,three,four)
colnames(x)[5]<-"sale"
colnames(x)[6]<-"profit"
colnames(x)[4]<-"CumSale"
colnames(x)[3]<-"CumProfit"
#reordering the columns
xx<-x[,c("p","pc","profit","sale","CumSale","CumProfit")]
xx

Related

calculating multiple variable in column based on comparing dates in rows as well as header of columns

i am unable to think of an easy method to do this.
Sample data is :
set.seed(101)
b=sample(seq(as.Date("2010/1/1"), as.Date("2010/1/10"), "days"), 3)
f1=data.frame(a=1:length(b), b=b)
col_names=paste(c('x', 'y'), sort(rep(seq(as.Date("2010/1/1"), as.Date("2010/1/10"), "days"), 2)), sep = '')
set.seed((102))
f2 <- data.frame(matrix(sample(0:5,30, replace = T), ncol = length(col_names), nrow = nrow(f1)))
names(f2)=col_names
f3=data.frame(f1, f2)
or
dput(f3)
structure(list(a = 1:3, b = structure(c(14613, 14610, 14615), class = "Date"),
x2010.01.01 = c(3L, 2L, 4L), y2010.01.01 = c(3L, 0L, 2L),
x2010.01.02 = c(5L, 1L, 5L), y2010.01.02 = c(2L, 5L, 4L),
x2010.01.03 = c(4L, 2L, 3L), y2010.01.03 = c(5L, 4L, 2L),
x2010.01.04 = c(5L, 5L, 5L), y2010.01.04 = c(3L, 3L, 3L),
x2010.01.05 = c(1L, 2L, 0L), y2010.01.05 = c(2L, 2L, 2L),
x2010.01.06 = c(3L, 2L, 4L), y2010.01.06 = c(3L, 0L, 2L),
x2010.01.07 = c(5L, 1L, 5L), y2010.01.07 = c(2L, 5L, 4L),
x2010.01.08 = c(4L, 2L, 3L), y2010.01.08 = c(5L, 4L, 2L),
x2010.01.09 = c(5L, 5L, 5L), y2010.01.09 = c(3L, 3L, 3L),
x2010.01.10 = c(1L, 2L, 0L), y2010.01.10 = c(2L, 2L, 2L)), class = "data.frame", row.names = c(NA,
-3L))
Im trying to create new columns based on comparing b date to column header. im calculating 1 day avg, 3 day avg and so on.
In 1st case the date is 4th jan which imply that 1 day col would be x2010.01.04, 3 day avg would include (x2010.01.04,x2010.01.03,x2010.01.02) and so on. This need to be done for x and y variable both.
Finally op should look like
a b oneday_avg_x oneday_avg_y threeday_avg_x threeday_avg_y
1 1 2010-01-04 5 3 (5+4+5)/3=4.6 3.3
2 2 2010-01-01 2 0 2 0
3 3 2010-01-06 4 2 3 2.3
Let me know if anything is missing.
We can use apply for "x" and "y" values. We remove first leading "x" and "y" from column names, convert it to Date and match it with b value. Return that index along with mean of previous 3 index from the apply call. As apply converts everything to characters, we use type.convert to convert columns to appropriate classes.
x_cols <- grep("^x", names(f3))
y_cols <- grep("^y", names(f3))
out <- f3[1:2]
out[c("oneday_avg_x", "threeday_avg_x")] <- t(apply(f3[c(2, x_cols)], 1, function(x) {
inds <- match(as.Date(x[[1]]), as.Date(sub("^x", "", names(x)), "%Y.%m.%d"))
c(x[inds], mean(as.numeric(x[(inds - 2) : inds]), na.rm = TRUE))
}))
out[c("oneday_avg_y", "threeday_avg_y")] <- t(apply(f3[c(2, y_cols)], 1, function(x) {
inds <- match(as.Date(x[[1]]), as.Date(sub("^y", "", names(x)), "%Y.%m.%d"))
c(x[inds], mean(as.numeric(x[(inds - 2) : inds]), na.rm = TRUE))
}))
out <- type.convert(out)
out
# a b oneday_avg_x threeday_avg_x oneday_avg_y threeday_avg_y
#1 1 2010-01-04 5 4.6667 3 3.3333
#2 2 2010-01-01 2 2.0000 0 0.0000
#3 3 2010-01-06 4 3.0000 2 2.3333
EDIT
A more scalable solution which can handle multiple day averages by using single apply
x_cols <- grep("^x", names(f3))
y_cols <- grep("^y", names(f3))
names(f3)[-(1:2)] <- gsub("\\.", "-", sub(".{1}", "", names(f3)[-(1:2)]))
out <- f3[1:2]
num <- c(1, 3)
new_cols <- c(outer(num, c("x", "y"), function(x, y) paste0(x, "_day_avg_", y)))
out[new_cols] <- t(apply(f3, 1, function(x) {
x_ind <- match(x[[2]], names(x)[x_cols])
x_vals <- sapply(num, function(y)
mean(as.numeric(x[x_cols][max((x_ind - y + 1), 1):x_ind])))
y_ind <- match(x[[2]], names(x)[y_cols])
y_vals <- sapply(num, function(y)
mean(as.numeric(x[y_cols][max((y_ind - y + 1), 1):y_ind])))
c(x_vals, y_vals)
}))
out
# a b 1_day_avg_x 3_day_avg_x 1_day_avg_y 3_day_avg_y
#1 1 2010-01-04 5 4.666667 3 3.333333
#2 2 2010-01-01 2 2.000000 0 0.000000
#3 3 2010-01-06 4 3.000000 2 2.333333

Number of continuous weeks by group

How do I find number of continuous weeks by group but counted from the max date in the dataset?
Say I have this dataframe:
id Week
1 A 2/06/2019
2 A 26/05/2019
3 A 19/05/2019
4 A 12/05/2019
5 A 5/05/2019
6 B 2/06/2019
7 B 26/05/2019
8 B 12/05/2019
9 B 5/05/2019
10 C 26/05/2019
11 C 19/05/2019
12 C 12/05/2019
13 D 2/06/2019
14 D 26/05/2019
15 D 19/05/2019
16 E 2/06/2019
17 E 19/05/2019
18 E 12/05/2019
19 E 5/05/2019
My desired output is:
id count
1: A 5
2: B 2
3: D 3
4: E 1
I am currently converting dates into factor to get ordered number and checking against the reference number created based on the number of rows in each group.
library(data.table)
df <- structure(list(id = structure(c(1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L,
2L, 3L, 3L, 3L, 4L, 4L, 4L, 5L, 5L, 5L, 5L),
.Label = c("A", "B", "C", "D", "E"), class = "factor"),
Week = structure(c(3L, 4L, 2L, 1L, 5L, 3L, 4L, 1L, 5L, 4L, 2L, 1L, 3L, 4L, 2L, 3L, 2L, 1L, 5L),
.Label = c("12/05/2019", "19/05/2019", "2/06/2019", "26/05/2019", "5/05/2019"), class = "factor")),
class = "data.frame", row.names = c(NA, -19L))
dt <- data.table(df)
dt[, Week_no := as.factor(as.Date(Week, format = "%d/%m/%Y"))]
dt[, Week_no := factor(Week_no)]
dt[, Week_no := as.numeric(Week_no)]
max_no <- max(dt$Week_no)
dt[, Week_ref := max_no:(max_no - .N + 1), by = "id"]
dt[, Week_diff := Week_no - Week_ref]
dt[Week_diff == 0, list(count = .N), by = "id"]
Here's one way to do this:
dt <- dt[, Week := as.Date(Week, format = "%d/%m/%Y")]
ids_having_max <- dt[.(max(Week)), id, on = "Week"]
dt <- dt[.(ids_having_max), on = "id"
][order(-Week), .(count = sum(rleid(c(-7L, diff(Week))) == 1)), by = "id"]
Breaking it into steps:
We leave Week as a date because it can already be compared,
and you can subtract dates to get time differences.
We then get all the ids that contain the maximum date in the whole table.
This is using secondary indices.
We use secondary indices again to filter out those ids that were not part of the previous result
(the dt[.(ids_having_max), on = "id" part).
The last frame is tricky.
We group by id and make sure that rows are ordered by Week in descending order.
Then the logic is as follows.
When you have contiguous weeks,
diff(Week) is always -7 with the chosen sorting.
Computing diff returns a shorter vector because the first result is computed by subtracting the first input element from the second,
so we prepend a -7 to make sure that it is the first element in the input to rleid.
With rleid we assign a 1 to the first -7 and keep the 1 until we see something different from -7.
Something different means weeks stopped being contiguous.
The sum(rleid(c(-7L, diff(Week))) == 1) will simply return how many rows had a rleid equal to 1.
Example of the last part for B:
Differences: -7, -14, -7
After prepending -7: -7, -7, -14, -7
After rleid: 1, 1, 2, 3
From the previous, two had a rleid == 1
Apologies for dplyr solution, but I presume a similar approach can be achieved more concisely with data.table.
library(dplyr)
df$Week = lubridate::dmy(df$Week)
df %>%
group_by(id) %>%
arrange(id, Week) %>%
# Assign group to each new streak
mutate(new_streak = cumsum(Week != lag(Week, default = 0) + 7)) %>%
add_count(id, new_streak) %>%
slice(n()) # Only keep last week
So I would suggest converting the format of the data column to show week number "%W" as follows
dt[, Week_no := format(as.Date(Week, format = "%d/%m/%Y"),"%W")]
Then find the amount of unique week numbers for each id value
dt[,(length(unique(Week_no))),by="id"]
FULL DISCLOSURE
I realise that when I run this I get a different table than you present, as R counts the week by the week # for the given year.
If this doesnt answer your question just let me know and I can try to update

Filtering from changing set in dplyr / data.table instead of for loop

I've got a data.frame with 2 columns: Time of the user request in seconds (ranges from 1 to 86400) and the user ID. After counting number of requests, unique requests and IDs of unique request for every second I'm setting the time window - say 10 seconds. Now for every second I want to find a list of new unique IDs that appeared in this specific second and didn't appeared in my time interval [x-1, x-window] (where x means specific second), count number of this IDs and number of unique IDs in time interval.
I accomplished this using for loop but it's really slow. Is there a way to get the same effect using data.table or dplyr package (or some other faster technique) ?
Simple data:
library(readr)
library(dplyr)
data<- structure(list(Second = c(1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L,
2L, 2L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 5L, 5L, 5L, 5L), UniqueId = c(1L,
1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 2L, 3L, 9L, 10L, 3L, 7L, 11L,
12L, 13L, 1L, 11L, 9L, 14L, 15L)), .Names = c("Second", "UniqueId"
), row.names = c(NA, -23L), class = c("tbl_df", "tbl", "data.frame"
))
>head(data)
# A tibble: 6 × 2
Second UniqueId
<int> <int>
1 1 1
2 1 1
3 1 2
4 1 3
5 1 4
6 1 5
# Statistics per second
data2<- data %>% group_by(Second) %>%
summarise(n_req=n(),n_uniq_req=n_distinct(UniqueId),
Ids=list(unique(UniqueId))) %>%
arrange(Second) %>%
ungroup()
window <- 2 # two second window
data3<-data2 %>% mutate(New_Ids=NA, UniqueUsersInMemory=NA,
NewRequest=NA)
New_Ids=list()
UniqueUsersInMemory=c()
NewRequest=c()
for(i in 1:nrow(data2)){
temp<- unlist(data2[(data2$Second<data2$Second[i] & data2$Second>=data2$Second[i]-window),"Ids"])
New_Ids[[i]]<-setdiff(unlist(data2[i,"Ids"]),temp)
UniqueUsersInMemory[[i]]<-length(unique(temp))
NewRequest[[i]]<-length(unique(New_Ids[[i]]))
}
data3$New_Ids<-New_Ids
data3$UniqueUsersInMemory<-UniqueUsersInMemory
data3$NewRequest<-NewRequest
In the end I'm getting something like this:
After a non-equi join, you can define many summary statistics:
library(data.table)
DT = data.table(data)
setnames(DT, c("s", "id"))
setcolorder(DT, c("id","s"))
setorder(DT)
window = 2
DT[.(id = id, s0 = s - window, s1 = s), on=.(id, s >= s0, s < s1),
.(is_new = .N == 0)
, by=.EACHI][, -2][, .(
n_r = .N,
n_newr = sum(is_new),
n_id = uniqueN(id),
ids = toString(unique(id)),
n_newid = uniqueN(id[is_new]),
newids = toString(unique(id[is_new]))
), keyby=s]
s n_r n_newr n_id ids n_newid newids
1: 1 6 6 5 1, 2, 3, 4, 5 5 1, 2, 3, 4, 5
2: 2 5 3 5 2, 3, 6, 7, 8 3 6, 7, 8
3: 3 4 2 4 3, 7, 9, 10 2 9, 10
4: 4 4 4 4 1, 11, 12, 13 4 1, 11, 12, 13
5: 5 4 2 4 9, 11, 14, 15 2 14, 15
The -2 drops the second column (called s0 in the on=); and the keyby sorts the result by s.

Pick a column to multiply with, contingent on value of other variables

I am still doing my first footsteps with R and found SO to be a great tool for learning more and finding answers to my questions. For this one i though did not manage to find any good solution here.
I have a dataframe that can be simplified to this structure:
set.seed(10)
df <- data.frame(v1 = rep(1:2, times=3),
v2 = c("A","B","B","A","B","A"),
v3 = sample(1:6),
xA_1 = sample(1:6),
xA_2 = sample(1:6),
xB_1 = sample(1:6), xB_2 = sample(1:6))
df thus looks like this:
> df
v1 v2 v3 xA_1 xA_2 xB_1 xB_2
1 1 A 4 2 1 3 3
2 2 B 2 6 3 5 4
3 1 B 5 3 2 4 5
4 2 A 3 5 4 2 1
5 1 B 1 4 6 6 2
6 2 A 6 1 5 1 6
I now want R to create a fourth variable, which is dependent on the values of v1 and v2. I achieve this by using the following code:
df <- data.table(df)
df[, v4 := ifelse(v1 == 1 & v2 == "A", v3*xA_1,
ifelse(v1 == 1 & v2 == "B", v3*xB_1,
ifelse(v1 == 2 & v2 == "A", v3*xA_2,
ifelse(v1 == 2 & v2 == "B", v3*xB_2, v3*1))))]
So v4 is created by multiplying v3 with the column that contains the v1 and the v2 value
(e.g. for row 1: v1=1 and v2=A thus multiply v3=4 with xA_1=2 -> 8).
> df$v4
[1] 8 8 20 12 6 30
Obviuosly, my ifelse approach is tedious when v1 and v2 in fact have many more different values than they have in this example. So I am looking for an efficient way to tell R if v1 == y & v2 == z, multiply v3 with column xy_z.
I tried writing a for-loop, writing a function that has y and z as index and using the apply function. However none of this worked as wanted.
I appreciate any ideas!
Here's a base R option:
i <- paste0("x", df$v2, "_", df$v1)
df$v4 <- df$v3 * as.numeric(df[cbind(1:nrow(df), match(i, names(df)))])
For the sample data provided below, it creates a column v4 as:
> df$v4
[1] 25 12 2 6 3 10
Or if you want to include the "else" condition to multiply by 1 in case there's no matching column name:
i <- paste0("x", df$v2, "_", df$v1)
tmp <- as.numeric(df[cbind(1:nrow(df), match(i, names(df)))])
df$v4 <- df$v3 * ifelse(is.na(tmp), 1, tmp)
Sample data:
df <- structure(list(v1 = c(1L, 2L, 1L, 2L, 1L, 2L), v2 = structure(c(1L,
2L, 2L, 1L, 2L, 1L), .Label = c("A", "B"), class = "factor"),
v3 = c(5L, 4L, 1L, 6L, 3L, 2L), xA_1 = c(5L, 6L, 3L, 1L,
2L, 4L), xA_2 = c(6L, 4L, 2L, 1L, 3L, 5L), xB_1 = c(4L, 6L,
2L, 5L, 1L, 3L), xB_2 = c(5L, 3L, 2L, 4L, 1L, 6L)), .Names = c("v1",
"v2", "v3", "xA_1", "xA_2", "xB_1", "xB_2"), row.names = c(NA,
-6L), class = "data.frame")
This is a standard "wide" table problem - what you want is harder to do as-is, but easy when the data is "melted":
dt = as.data.table(df)
melt(dt, id.vars = c('v1', 'v2', 'v3'))[variable == paste0('x', v2, '_', v1)
][dt, on = c('v1', 'v2', 'v3'), v3 * value]
#[1] 8 8 20 12 6 30
You can try this :
v4 <- c()
for(i in 1:nrow(df)){
col <- paste("x",df$v2[i],"_",df$v1[i],sep="")
v4 <- c(v4,df$v3[i]*df[i,col])
}
df$v4 <- v4

Data roll up along with transpose

I want to roll up at customer unique id level with each observation being transposed againt it as given below
Below is the snapshot of my data
basedata <- structure(list(customer = structure(c(1L, 1L, 1L, 1L, 2L, 2L,
2L, 3L, 3L), .Label = c("a", "b", "d"), class = "factor"), obs = c(12L,
11L, 12L, 10L, 3L, 5L, 7L, 8L, 1L)), .Names = c("customer", "obs"
), class = "data.frame", row.names = c(NA, -9L))
Or
customer obs
a 12
a 11
a 12
a 10
b 3
b 5
b 7
d 8
d 1
I want to convert it in the following form
customer obs1 obs2 obs3 obs4
a 12 11 12 10
b 3 5 7 -
d 8 1 - -
I used the following code
basedata$shopping <- unlist(tapply(rawdata$customer, rawdata$customer,
function (x) seq(1, len = length(x))))
reshape(basedata, idvar = "customer", direction = "wide")
It gives the following error
Error in `[.data.frame`(data, , timevar) : undefined columns selected
How can I do it in R and excel?
Thank You
x <- structure(list(customer = structure(c(1L, 1L, 1L, 1L, 2L, 2L,
2L, 3L, 3L), .Label = c("a", "b", "d"), class = "factor"), obs = c(12L,
11L, 12L, 10L, 3L, 5L, 7L, 8L, 1L)), .Names = c("customer", "obs"
), class = "data.frame", row.names = c(NA, -9L))
I chose to use a couple of extra packages (plyr and reshape2) because I find them easier and more general to use than reshape from the base package.
library(plyr)
library(reshape2)
## add observation number
x2 <- ddply(x,"customer",transform,num=1:length(customer))
## reshape
dcast(x2,customer~num,value.var="obs")
A base R way, assuming dat is the data,
> s <- split(dat$obs, dat$customer)
> df <- data.frame(do.call(rbind, lapply(s, function(x){ length(x) <- 4; x })))
> names(df) <- paste0('obs', seq(df))
> df
# obs1 obs2 obs3 obs4
# a 12 11 12 10
# b 3 5 7 NA
# d 8 1 NA NA
If you want the unique customer ID to be a column,
> df2 <- cbind(customer = rownames(df), df)
> rownames(df2) <- seq(nrow(df2))
> df2
# customer obs1 obs2 obs3 obs4
# 1 a 12 11 12 10
# 2 b 3 5 7 NA
# 3 d 8 1 NA NA
I'm assuming that "basedata" and "rawdata" are supposed to be the same (or at least copies of each other). If that's the case, you're simply missing specifying what the timevar argument for reshape should be.
Continuing from where you left off:
rawdata$shopping <- unlist(tapply(rawdata$customer, rawdata$customer,
function (x) seq(1, len = length(x))))
## rawdata$shopping <- with(rawdata, ave(customer, customer, FUN = seq_along))
Here's the actual reshaping step:
reshape(rawdata, idvar = "customer", timevar="shopping", direction = "wide")
# customer obs.1 obs.2 obs.3 obs.4
# 1 a 12 11 12 10
# 5 b 3 5 7 NA
# 8 d 8 1 NA NA

Resources