Data roll up along with transpose - r

I want to roll up at customer unique id level with each observation being transposed againt it as given below
Below is the snapshot of my data
basedata <- structure(list(customer = structure(c(1L, 1L, 1L, 1L, 2L, 2L,
2L, 3L, 3L), .Label = c("a", "b", "d"), class = "factor"), obs = c(12L,
11L, 12L, 10L, 3L, 5L, 7L, 8L, 1L)), .Names = c("customer", "obs"
), class = "data.frame", row.names = c(NA, -9L))
Or
customer obs
a 12
a 11
a 12
a 10
b 3
b 5
b 7
d 8
d 1
I want to convert it in the following form
customer obs1 obs2 obs3 obs4
a 12 11 12 10
b 3 5 7 -
d 8 1 - -
I used the following code
basedata$shopping <- unlist(tapply(rawdata$customer, rawdata$customer,
function (x) seq(1, len = length(x))))
reshape(basedata, idvar = "customer", direction = "wide")
It gives the following error
Error in `[.data.frame`(data, , timevar) : undefined columns selected
How can I do it in R and excel?
Thank You

x <- structure(list(customer = structure(c(1L, 1L, 1L, 1L, 2L, 2L,
2L, 3L, 3L), .Label = c("a", "b", "d"), class = "factor"), obs = c(12L,
11L, 12L, 10L, 3L, 5L, 7L, 8L, 1L)), .Names = c("customer", "obs"
), class = "data.frame", row.names = c(NA, -9L))
I chose to use a couple of extra packages (plyr and reshape2) because I find them easier and more general to use than reshape from the base package.
library(plyr)
library(reshape2)
## add observation number
x2 <- ddply(x,"customer",transform,num=1:length(customer))
## reshape
dcast(x2,customer~num,value.var="obs")

A base R way, assuming dat is the data,
> s <- split(dat$obs, dat$customer)
> df <- data.frame(do.call(rbind, lapply(s, function(x){ length(x) <- 4; x })))
> names(df) <- paste0('obs', seq(df))
> df
# obs1 obs2 obs3 obs4
# a 12 11 12 10
# b 3 5 7 NA
# d 8 1 NA NA
If you want the unique customer ID to be a column,
> df2 <- cbind(customer = rownames(df), df)
> rownames(df2) <- seq(nrow(df2))
> df2
# customer obs1 obs2 obs3 obs4
# 1 a 12 11 12 10
# 2 b 3 5 7 NA
# 3 d 8 1 NA NA

I'm assuming that "basedata" and "rawdata" are supposed to be the same (or at least copies of each other). If that's the case, you're simply missing specifying what the timevar argument for reshape should be.
Continuing from where you left off:
rawdata$shopping <- unlist(tapply(rawdata$customer, rawdata$customer,
function (x) seq(1, len = length(x))))
## rawdata$shopping <- with(rawdata, ave(customer, customer, FUN = seq_along))
Here's the actual reshaping step:
reshape(rawdata, idvar = "customer", timevar="shopping", direction = "wide")
# customer obs.1 obs.2 obs.3 obs.4
# 1 a 12 11 12 10
# 5 b 3 5 7 NA
# 8 d 8 1 NA NA

Related

R loop/lapply, cumulative totals with group by

I am trying to create new variables in my data set that are cumulative totals which restart based on other variables (using group by)… I want these to be new columns in the data set and this is the part I am struggling with...
Using the data below, I want to create cumulative Sale and Profit columns that will restart for every Product and Product_Cat grouping.
The below code partly gives me what I need, but the variables are not new variables, instead it overwrites the existing Sale/Profit... what am I getting wrong? I imagine this is simple haven't found anything.
Note: I'm using lapply as my real data set has 40+ varbs that I need to create calculations for.
DT <- setDT(Data)[,lapply(.SD, cumsum), by = .(Product,Product_Cat) ]
Data for example:
Product <- c('A','A','A','B','B','B','C','C','C')
Product_Cat <- c('S1','S1','S2','C1','C1','C1','D1','E1','F1')
Sale <- c(10,15,5,20,15,10,5,5,5)
Profit <- c(2,4,2,6,8,2,4,6,8)
Sale_Cum <- c(10,25,5,20,35,45,5,5,5)
Profit_Cum <- c(2,6,2,6,14,16,4,6,8)
Data <- data.frame(Product,Product_Cat,Sale,Profit)
Desired_Data <- data.frame(Product,Product_Cat,Sale,Profit,Sale_Cum,Profit_Cum)
This doesn't use the group by per se but I think it achieves what you're looking for in that it is easily extensible to many columns:
D2 <- data.frame(lapply(Data[,c(3,4)], cumsum))
names(D2) <- gsub("$", "_cum", names(Data[,c(3,4)]))
Data <- cbind(Data, D2)
If you have 40+ columns just change the c(3,4) to include all the columns you're after.
EDIT:
I forgot that the OP wanted it to reset for each category. In that case, you can modify your original code:
DT <- setDT(Data)[,lapply(.SD, cumsum), by = .(Product,Product_Cat) ]
names(D2)[c(-1,-2)] <- gsub("$", "_cum", names(Data)[c(-1,-2)])
cbind(Data, D2[,c(-1,-2)])
library(data.table)
setDT(Data)
cols <- names(Data)[3:4]
Data[, paste0(cols, '_cumsum') := lapply(.SD, cumsum)
, by = .(Product, Product_Cat)
, .SDcols = cols]
Data:
structure(list(Product = structure(c(1L, 1L, 1L, 2L, 2L, 2L,
3L, 3L, 3L), .Label = c("A", "B", "C"), class = "factor"), Product_Cat = structure(c(5L,
5L, 6L, 1L, 1L, 1L, 2L, 3L, 4L), .Label = c("C1", "D1", "E1",
"F1", "S1", "S2"), class = "factor"), Sale = c(10L, 15L, 5L,
20L, 15L, 10L, 5L, 5L, 5L), Profit = c(2L, 4L, 2L, 6L, 8L, 2L,
4L, 6L, 8L), Sale_Cum = c(10, 25, 5, 20, 35, 45, 5, 5, 5), Profit_Cum = c(2,
6, 2, 6, 14, 16, 4, 6, 8)), .Names = c("Product", "Product_Cat",
"Sale", "Profit", "Sale_Cum", "Profit_Cum"), row.names = c(NA,
-9L), class = "data.frame")`
We can iteratively slice the dataframe based on Product and Product_Cat, and for each iteration, assign the output produced by cumsum() to Sale_Cum and Product_Cum:
cols <- c('Sale', 'Profit')
for (column in cols){
x[, paste0(column, '_Cum')] <- 0
for(p in unique(x$Product)){
for (pc in unique(x$Product_Cat)){
x[x$Product == p & x$Product_Cat == pc, paste0(column, '_Cum')] <- cumsum(x[x$Product == p & x$Product_Cat == pc, column])
}
}
}
print(x)
# Product Product_Cat Sale Profit Sale_Cum Profit_Cum
# 1 A S1 10 2 10 2
# 2 A S1 15 4 25 6
# 3 A S2 5 2 5 2
# 4 B C1 20 6 20 6
# 5 B C1 15 8 35 14
# 6 B C1 10 2 45 16
# 7 C D1 5 4 5 4
# 8 C E1 5 6 5 6
# 9 C F1 5 8 5 8
Here is some pretty poor code that does everything step by step
#sample data
d<-sample(1:10)
f<-sample(1:10)
p<-c("f","f","f","f","q","q","q","w","w","w")
pc<-c("c","c","d","d","d","v","v","v","b","b")
cc<-data.table(p,pc,d,f)
#storing the values that are overwritten first.
three<-cc[,3]
four<- cc[,4]
#applying your function
dt<-setDT(c)[,lapply(.SD,cumsum), by=.(p,pc)]
#binding the stored values to your function and renaming everything.
x<-cbind(dt,three,four)
colnames(x)[5]<-"sale"
colnames(x)[6]<-"profit"
colnames(x)[4]<-"CumSale"
colnames(x)[3]<-"CumProfit"
#reordering the columns
xx<-x[,c("p","pc","profit","sale","CumSale","CumProfit")]
xx

Subsetting a data frame according to recursive rows and creating a column for ordering

Consider the sample data
df <-
structure(
list(
id = c(1L, 1L, 1L, 1L, 2L, 2L, 3L),
A = c(20L, 12L, 13L, 8L, 11L, 21L, 17L),
B = c(1L, 1L, 0L, 0L, 1L, 0L, 0L)
),
.Names = c("id", "A", "B"),
class = "data.frame",
row.names = c(NA,-7L)
)
Each id (stored in column 1) has varying number of entries for column A and B. In the example data, there are four observations with id = 1. I am looking for a way to subset this data in R so that there will be at most 3 entries for for each id and finally create another column (labelled as C) which consists of the order of each id. The expected output would look like:
df <-
structure(
list(
id = c(1L, 1L, 1L, 2L, 2L, 3L),
A = c(20L, 12L, 13L, 11L, 21L, 17L),
B = c(1L, 1L, 0L, 1L, 0L, 0L),
C = c(1L, 2L, 3L, 1L, 2L, 1L)
),
.Names = c("id", "A", "B","C"),
class = "data.frame",
row.names = c(NA,-6L)
)
Your help is much appreciated.
Like this?
library(data.table)
dt <- as.data.table(df)
dt[, C := seq(.N), by = id]
dt <- dt[C <= 3,]
dt
# id A B C
# 1: 1 20 1 1
# 2: 1 12 1 2
# 3: 1 13 0 3
# 4: 2 11 1 1
# 5: 2 21 0 2
# 6: 3 17 0 1
Here is one option with dplyr and considering the top 3 values based on A (based of the comments of #Ronak Shah).
library(dplyr)
df %>%
group_by(id) %>%
top_n(n = 3, wt = A) %>% # top 3 values based on A
mutate(C = rank(id, ties.method = "first")) # C consists of the order of each id
# A tibble: 6 x 4
# Groups: id [3]
id A B C
<int> <int> <int> <int>
1 1 20 1 1
2 1 12 1 2
3 1 13 0 3
4 2 11 1 1
5 2 21 0 2
6 3 17 0 1

Conditional data manipulation using data.table in R

I have 2 dataframes, testx and testy
testx
testx <- structure(list(group = 1:2), .Names = "group", class = "data.frame", row.names = c(NA,
-2L))
testy
testy <- structure(list(group = c(1L, 1L, 1L, 2L, 2L, 2L, 3L, 3L, 3L),
time = c(1L, 3L, 4L, 1L, 4L, 5L, 1L, 5L, 7L), value = c(50L,
52L, 10L, 4L, 84L, 2L, 25L, 67L, 37L)), .Names = c("group",
"time", "value"), class = "data.frame", row.names = c(NA, -9L
))
Based on this topic, I add missing time values using the following code, which works perfectly.
data <- setDT(testy, key='time')[, .SD[J(min(time):max(time))], by = group]
Now I would like to only add these missing time values IF the value for group appears in testx. In this example, I thus only want to add missing time values for groups matching the values for group in the file testx.
data <- setDT(testy, key='time')[,if(testy[group %in% testx[, group]]) .SD[J(min(time):max(time))], by = group]
The error I get is "undefined columns selected". I looked here, here and here, but I don't see why my code isn't working. I am doing this on large datasets, why I prefer using data.table.
You don't need to refer testy when you are within testy[] and are using group by, directly using group as a variable gives correct result, you need an extra else statement to return rows where group is not within testx if you want to keep all records in testy:
testy[, {if(group %in% testx$group) .SD[J(min(time):max(time))] else .SD}, by = group]
# group time value
# 1: 1 1 50
# 2: 1 2 NA
# 3: 1 3 52
# 4: 1 4 10
# 5: 2 1 4
# 6: 2 2 NA
# 7: 2 3 NA
# 8: 2 4 84
# 9: 2 5 2
# 10: 3 1 25
# 11: 3 5 67
# 12: 3 7 37

Sum the values of groups of 4 contiguous columns in R

Starting from a table of 372 columns and 12,000 rows in R, I need to create a new table with columns that contain rows with the sum of same row from columns 1:4, then 5:8, then 9:12, and so on up to column 372 of the original table. Here a short example:
Input:
m = structure(c(3L, 1L, 2L, 6L, 3L, 1L, 1L, 8L, 1L, 5L, 2L, 1L, 3L, 7L,
+ 1L, 1L), .Dim = c(2L, 8L), .Dimnames = list(c("r1", "r2"), c("a", "b",
+"c", "d", "e", "f", "g", "h")))
Which looks like this:
a b c d e f g h
r1 3 2 3 1 1 2 3 1
r2 1 6 1 8 5 1 7 1
Expected output:
A B
r1 9 7
r2 16 14
So, A = a+b+c+d, and B=e+f+g+h. Easy to do with a small table in Excel. Columns a-d correspond to a group, e-f to another, if that helps.
The question is currently underspecified, but supposing you have a matrix...
m = structure(c(3L, 1L, 2L, 6L, 3L, 1L, 1L, 8L, 1L, 5L, 2L, 1L, 3L,
7L, 1L, 1L), .Dim = c(2L, 8L), .Dimnames = list(c("r1", "r2"),
c("a", "b", "c", "d", "e", "f", "g", "h")))
Make your column mapping:
map = data.frame(old = colnames(m), new = rep(LETTERS, each=4, length.out=ncol(m)))
old new
1 a A
2 b A
3 c A
4 d A
5 e B
6 f B
7 g B
8 h B
And then rowsum by it:
res = rowsum(t(m), map$new)
r1 r2
A 9 16
B 7 14
We have to transpose the data with t here because R has rowsum but no colsum. You can transpose it back afterwards, like t(res).
A base R solution, suppose df is your data frame:
cols = 8
do.call(cbind, lapply(seq(1, ncols, 4), function(i) rowSums(df[i:(i+3)])))
# [,1] [,2]
# r1 9 7
# r2 16 14
Another way:
df <- data.frame(t(matrix(colSums(matrix(t(df), nrow=4)),nrow=nrow(df))))
## X1 X2
##1 9 7
##2 16 14
First transpose the data to a 4 x (ncol(df)/4 * now(df)) matrix where now each column is a group of four columns for each row in the original data frame.
Sum each column using colSums
Transpose the data back to a data frame with the original number of rows
You can do this in a vectorised way if you transform your original data to a matrix with 4 columns, then use rowSums on that, and then transform it back to match the rows of the original data frame. Here it is in one long command
df <- read.table(header = TRUE, text = "a b c d e f g h
3 2 3 1 1 2 3 1
1 6 1 8 5 1 7 1")
matrix(rowSums(matrix(as.vector(t(as.matrix(df))),
ncol = 4, byrow = TRUE)), ncol = ncol(df) / 4, byrow = TRUE)
# [,1] [,2]
#[1,] 9 7
#[2,] 16 14
Edit: To preserve the row names, if e.g. rownames(df) <- c("r1", "r2"), just apply them to the resulting matrix (the row order is preserved), ie run rownames(result) <- rownames(df).

R- How to merge multiple dataframes of different lengths?

I have been stuck with this issue for a while now. Need some help.
I am reading the following files (which can be mire than 3 files files) into a dataframe.
My input files look like the following:
file1:
someName someMOD someID
A T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P),S762(P) 1
B S495(P) 2
C S162(P),Q159(D) 3
D S45(P),C47(C),S48(P),S26(P) 4
E S18(P) 5
file2:
someName someMOD someID
C S162(P),Q159(D) 3
D S45(P),C47(C),S48(P),S26(P) 4
F S182(P) 6
E S18(P) 5
Z Q100(P) 9
A T754(P),M691(O),S694(P),S739(P),S740(P) 1
file3:
someName someMOD someID
A T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P) 1
B S495(P) 2
D S45(P),C47(C),S48(P),S26(P) 4
E S18(P) 5
F S182(P) 6
L Z182(P) 8
C S162(P),Q159(D) 3
My Code:
fileList <- dir(pattern="*.xls")
i<-1
j<-1
a<-list()
mybigtable<-data.frame
for (f in 1:length(fileList)){
fileName <- fileList[f]
X <-read.xls(fileName)
if(regexpr("Drug_Rep", fileName)[1]>0){
a[[i]]<-X
}
i=i+1
}
else{
#Don't do anything
}
}
#Now i want to merge my dataframes
mymerge <- function(x, y)
merge(x, y, by=c("someName", "someID"), all=TRUE))
Reduce(mymerge,a) #passing my list of dataframes 'a'
I did dput() on my 'a' list:
list(structure(list(someName = structure(c(1L, 2L, 4L, 5L, 6L,
7L, 3L), .Label = c("A", "B", "C", "D", "E", "F", "L"), class = "factor"),
someMOD = structure(c(6L, 5L, 4L, 2L, 3L, 7L, 1L), .Label = c("S162(P),Q159(D)",
"S18(P)", "S182(P)", "S45(P),C47(C),S48(P),S26(P)", "S495(P)",
"T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P)",
"Z182(P)"), class = "factor"), someID = c(1L, 2L, 4L, 5L,
6L, 8L, 3L)), .Names = c("someName", "someMOD", "someID"), class = "data.frame", row.names = c(NA,
-7L)), structure(list(someName = structure(1:5, .Label = c("A",
"B", "C", "D", "E"), class = "factor"), someMOD = structure(c(5L,
4L, 1L, 3L, 2L), .Label = c("S162(P),Q159(D)", "S18(P)", "S45(P),C47(C),S48(P),S26(P)",
"S495(P)", "T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P),S762(P)"
), class = "factor"), someID = 1:5), .Names = c("someName", "someMOD",
"someID"), class = "data.frame", row.names = c(NA, -5L)), structure(list(
someName = structure(c(2L, 3L, 5L, 4L, 6L, 1L), .Label = c("A",
"C", "D", "E", "F", "Z"), class = "factor"), someMOD = structure(c(2L,
5L, 4L, 3L, 1L, 6L), .Label = c("Q100(P)", "S162(P),Q159(D)",
"S18(P)", "S182(P)", "S45(P),C47(C),S48(P),S26(P)", "T754(P),M691(O),S694(P),S739(P),S740(P)"
), class = "factor"), someID = c(3L, 4L, 6L, 5L, 9L, 1L)), .Names = c("someName",
"someMOD", "someID"), class = "data.frame", row.names = c(NA,
-6L)))
What is my mistake in populating a list? Any help is really appreciated.
I am just trying to get an out put like the following:
The problem with the code I gave you before is that merge gets confused if there are any duplicate column names, and you're merging more than 3 datasets. You'll have to rename your someMOD columns so they don't clash. A for loop works as well as anything for this purpose.
dupvars <- which(!names(a[[1]]) %in% c("someName", "someID"))
for(i in seq_along(a))
names(a[[i]])[dupvars] <- paste0(names(a[[i]])[dupvars], i)
# and then merge
Reduce(mymerge, a)
Perhaps the problem is that you're actually not trying to merge in the standard sense, but reshape. In this case, you can rbind all the data.frames together after adding a "time" variable, and use dcast from "reshape2" to get what you're after:
Add a "time" variable and rbind the data.frames together
temp <- do.call(rbind,
lapply(seq_along(a),
function(x) data.frame(a[[x]], time = x)))
head(temp)
# someName someMOD someID time
# 1 A T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P) 1 1
# 2 B S495(P) 2 1
# 3 D S45(P),C47(C),S48(P),S26(P) 4 1
# 4 E S18(P) 5 1
# 5 F S182(P) 6 1
# 6 L Z182(P) 8 1
Transform the data.frame from a "long" format to a "wide" format
library(reshape2)
dcast(temp, someName + someID ~ time, value.var="someMOD")
# someName someID 1
# 1 A 1 T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P)
# 2 B 2 S495(P)
# 3 C 3 S162(P),Q159(D)
# 4 D 4 S45(P),C47(C),S48(P),S26(P)
# 5 E 5 S18(P)
# 6 F 6 S182(P)
# 7 L 8 Z182(P)
# 8 Z 9 <NA>
# 2
# 1 T754(P),M691(O),S692(P),S694(P),S739(P),S740(P),S759(P),S762(P)
# 2 S495(P)
# 3 S162(P),Q159(D)
# 4 S45(P),C47(C),S48(P),S26(P)
# 5 S18(P)
# 6 <NA>
# 7 <NA>
# 8 <NA>
# 3
# 1 T754(P),M691(O),S694(P),S739(P),S740(P)
# 2 <NA>
# 3 S162(P),Q159(D)
# 4 S45(P),C47(C),S48(P),S26(P)
# 5 S18(P)
# 6 S182(P)
# 7 <NA>
# 8 Q100(P)

Resources