Is there an elegant/fastR way to combine all pairs of columns in a data.frame?
For example, using mapply() and paste() we can turn this data.frame:
mydf <- data.frame(a.1 = letters, a.2 = 26:1, b.1 = letters, b.2 = 1:26)
head(mydf)
a.1 a.2 b.1 b.2
1 a 26 a 1
2 b 25 b 2
3 c 24 c 3
4 d 23 d 4
5 e 22 e 5
6 f 21 f 6
into this data.frame:
mydf2 <- mapply(function(x, y) {
paste(x, y, sep = ".")},
mydf[ ,seq(1, ncol(mydf), by = 2)],
mydf[ ,seq(2, ncol(mydf), by = 2)])
head(mydf2)
a.1 b.1
[1,] "a.26" "a.1"
[2,] "b.25" "b.2"
[3,] "c.24" "c.3"
[4,] "d.23" "d.4"
[5,] "e.22" "e.5"
[6,] "f.21" "f.6"
However, this feels clumsy and is a bit slow when applied to big datasets. Any suggestions, perhaps using a Hadley package?
EDIT:
The ideal solution would easily scale to large numbers of columns, such that the names of the columns would not need to be included in the function call. Thanks!
It's amusing to note that the OP's solution appears to be the fastest one:
f1 <- function(mydf) {
mapply(function(x, y) {
paste(x, y, sep = ".")},
mydf[ ,seq(1, ncol(mydf), by = 2)],
mydf[ ,seq(2, ncol(mydf), by = 2)])
}
f.thelatemail <- function(mydf) {
mapply(paste,mydf[c(TRUE,FALSE)],mydf[c(FALSE,TRUE)],sep=".")
}
require(dplyr)
f.on_the_shores_of_linux_sea <- function(mydf) {
transmute(mydf,x1=paste0( a.1,'.', a.2),x2=paste0( b.1,'.', b.2))
}
f.jazurro <- function(mydf) {
odd <- seq(1, ncol(mydf), 2);
lapply(odd, function(x) paste(mydf[,x], mydf[,x+1], sep = ".")) %>%
do.call(cbind,.)
}
library(data.table)
f.akrun <- function(mydf) {
res <- as.data.table(matrix(, ncol=ncol(mydf)/2, nrow=nrow(mydf)))
indx <- seq(1, ncol(mydf), 2)
setDT(mydf)
for(j in seq_along(indx)){
set(res, i=NULL, j=j, value= paste(mydf[[indx[j]]],
mydf[[indx[j]+1]], sep='.'))
}
res
}
mydf <- data.frame(a.1 = letters, a.2 = 26:1, b.1 = letters, b.2 = 1:26)
mydf <- mydf[rep(1:nrow(mydf),5000),]
library(rbenchmark)
benchmark(f1(mydf),f.thelatemail(mydf),f.on_the_shores_of_linux_sea(mydf),f.jazurro(mydf),f.akrun(mydf))
Results:
# test replications elapsed relative user.self sys.self user.child sys.child
# 5 f.akrun(mydf) 100 14.000 75.269 13.673 0.296 0 0
# 4 f.jazurro(mydf) 100 0.388 2.086 0.314 0.071 0 0
# 3 f.on_the_shores_of_linux_sea(mydf) 100 15.585 83.790 15.293 0.280 0 0
# 2 f.thelatemail(mydf) 100 26.416 142.022 25.736 0.639 0 0
# 1 f1(mydf) 100 0.186 1.000 0.169 0.017 0 0
[Updated Benchmark]
I've added one solution from #thelatemail, which I missed in the original answer, and one solution from #akrun:
f.thelatemail2 <- function(mydf) {
data.frame(Map(paste,mydf[c(TRUE,FALSE)],mydf[c(FALSE,TRUE)],sep="."))
}
f.akrun2 <- function(mydf) {
setDT(mydf)
indx <- as.integer(seq(1, ncol(mydf), 2))
mydf2 <- copy(mydf)
for(j in indx){
set(mydf2, i=NULL, j=j, value= paste(mydf2[[j]],
mydf2[[j+1]], sep="."))
}
mydf2[,indx, with=FALSE]
}
Benchmark:
library(rbenchmark)
benchmark(f1(mydf),f.thelatemail(mydf), f.thelatemail2(mydf), f.on_the_shores_of_linux_sea(mydf),f.jazurro(mydf),f.akrun(mydf),f.akrun2(mydf))
# test replications elapsed relative user.self sys.self user.child sys.child
# 6 f.akrun(mydf) 100 13.247 69.356 12.897 0.340 0 0
# 7 f.akrun2(mydf) 100 12.746 66.733 12.405 0.339 0 0
# 5 f.jazurro(mydf) 100 0.327 1.712 0.254 0.073 0 0
# 4 f.on_the_shores_of_linux_sea(mydf) 100 16.347 85.586 15.838 0.445 0 0
# 2 f.thelatemail(mydf) 100 26.307 137.733 25.536 0.708 0 0
# 3 f.thelatemail2(mydf) 100 15.938 83.445 15.136 0.750 0 0
# 1 f1(mydf) 100 0.191 1.000 0.156 0.036 0 0
I'm not sure this is the best approach. See if the below code gives any speed improvement
require(dplyr)
transmute(mydf,x1=paste0( a.1,'.', a.2),x2=paste0( b.1,'.', b.2))
Answer updated based on comment :-)
An option using set from data.table. It should be fast for large datasets as it modifies by reference and the overhead of [.data.table is avoided. Assuming that the columns are ordered for each pair of columns.
library(data.table)
res <- as.data.table(matrix(, ncol=ncol(mydf)/2, nrow=nrow(mydf)))
indx <- seq(1, ncol(mydf), 2)
setDT(mydf)
for(j in seq_along(indx)){
set(res, i=NULL, j=j, value= paste(mydf[[indx[j]]],
mydf[[indx[j]+1]], sep='.'))
}
head(res)
# V1 V2
#1: a.26 a.1
#2: b.25 b.2
#3: c.24 c.3
#4: d.23 d.4
#5: e.22 e.5
#6: f.21 f.6
Instead of creating a new result dataset, we can also update the same or a copy of the original dataset. There will be some warnings about type conversion, but I guess this would be a bit faster (not benchmarked)
setDT(mydf)
mydf2 <- copy(mydf)
for(j in indx){
set(mydf2, i=NULL, j=j, value= paste(mydf2[[j]],
mydf2[[j+1]], sep="."))
}
mydf2[,indx, with=FALSE]
Benchmarks
I tried the benchmarks on a slightly bigger data with many columns.
data
set.seed(24)
d1 <- as.data.frame(matrix(sample(letters,500*10000, replace=TRUE),
ncol=500), stringsAsFactors=FALSE)
set.seed(4242)
d2 <- as.data.frame(matrix(sample(1:200,500*10000,
replace=TRUE), ncol=500))
d3 <- cbind(d1,d2)
mydf <- d3[,order(c(1:ncol(d1), 1:ncol(d2)))]
mydf1 <- copy(mydf)
Compared f1, f.jazurro (fastest) (from #Marat Talipov's post) with f.akrun2
microbenchmark(f1(mydf), f.jazurro(mydf), f.akrun2(mydf1),
unit='relative', times=20L)
#Unit: relative
# expr min lq mean median uq max neval
# f1(mydf) 3.420448 2.3217708 2.714495 2.653178 2.819952 2.736376 20
#f.jazurro(mydf) 1.000000 1.0000000 1.000000 1.000000 1.000000 1.000000 20
#f.akrun2(mydf1) 1.204488 0.8015648 1.031248 1.042262 1.097136 1.066671 20
#cld
#b
#a
#a
In this, f.jazurro is slighly better than f.akrun2. I think if I increase the group size, nrows etc, it would be an interesting comparison
For what its worth seven years later, here is a trick using the glue package and eval() + parse(). I don't know how it compares to other answers, but it works pretty darn well for me.
mydf <- data.frame(a.1 = letters, a.2 = 26:1, b.1 = letters, b.2 = 1:26)
mydf2 <- mydf
vars <- c('a', 'b')
eval(parse(text = glue::glue('mydf2 <- mydf2 |> unite({vars}, c(`{vars}.1`, `{vars}.2`), na.rm = T, sep = ".")')))
mydf2
Related
Say I have a data.frame like this:
X1 X2 X3
1 A B A
2 A C B
3 B A B
4 A A C
I would like to count the occurrences of A, B, C, etc. in each column, and return the result as
A_count B_count C_count
X1 3 1 0
X2 2 1 1
X3 1 2 1
I'm sure this question has a thousand duplicates, but I can't seem to find an answer that works for me :(
By running
apply(mydata, 2, table)
I get something like
$X1
B A
1 3
$X2
A C B
2 1 1
But it's not exactly what I want and if I try to build it back into a data frame, it doesn't work because I don't get the same number of columns for every row (like $X1 above where there are no C's).
What am I missing?
Many thanks!
You can refactor to include the factor levels common to each column, then tabulate. I would also recommend using lapply() instead of apply(), as apply() is for matrices.
df <- read.table(text = "X1 X2 X3
1 A B A
2 A C B
3 B A B
4 A A C", h=T)
do.call(
rbind,
lapply(df, function(x) table(factor(x, levels=levels(unlist(df)))))
)
# A B C
# X1 3 1 0
# X2 2 1 1
# X3 1 2 1
Suppose your data frame is x, I would simply do:
do.call(rbind, tapply(unlist(x, use.names = FALSE),
rep(1:ncol(x), each = nrow(x)),
table))
# A B C
#1 3 1 0
#2 2 1 1
#3 1 2 1
Benchmarking
# a function to generate toy data
# `k` factor levels
# `n` row
# `p` columns
datsim <- function(n, p, k) {
as.data.frame(replicate(p, sample(LETTERS[1:k], n, TRUE), simplify = FALSE),
col.names = paste0("X",1:p), stringsAsFactors = TRUE)
}
# try `n = 100`, `p = 500` and `k = 3`
x <- datsim(100, 500, 3)
## DirtySockSniffer's answer
system.time(do.call(rbind, lapply(x, function(u) table(factor(u, levels=levels(unlist(x)))))))
# user system elapsed
# 21.240 0.068 21.365
## my answer
system.time(do.call(rbind, tapply(unlist(x, use.names = FALSE), rep(1:ncol(x), each = nrow(x)), table)))
# user system elapsed
# 0.108 0.000 0.111
Dirty's answer can be improved, by:
## improved DirtySockSniffer's answer
system.time({clevels <- levels(unlist(x, use.names = FALSE));
do.call(rbind, lapply(x, function(u) table(factor(u, levels=clevels))))})
# user system elapsed
# 0.108 0.000 0.108
Also consider user20650's answer:
## Let's try a large `n`, `p`, `k`
x <- datsim(200, 5000, 5)
system.time(t(table(stack(lapply(x, as.character)))))
# user system elapsed
# 0.592 0.052 0.646
While my answer does:
system.time(do.call(rbind, tapply(unlist(x, use.names = FALSE), rep(1:ncol(x), each = nrow(x)), table)))
# user system elapsed
# 1.844 0.056 1.904
Improved Dirty's answer does:
system.time({clevels <- levels(unlist(x, use.names = FALSE));
do.call(rbind, lapply(x, function(u) table(factor(u, levels=clevels))))})
# user system elapsed
# 1.240 0.012 1.263
I have a variable length list which looks like this
chr [1:249] "1" "29.12" "2" "20.78" "3" "12.09" ...
chr [1:200] "1" "20.45" "3" "10.56" "4" "12.34" ...
chr [1:213] "2" "12.20" "3" "19.93" "5" "23.05" ...
The values in odd positions ("1", "3", "4", etc) represent variables having specific meaning while the values in even positions are the values for the variables represented by the number before it. E.g. in the second element of the list, the variable "3" has the value "10.56".
I'm trying to convert this into a data frame with values like "10.56" going into the correct column of the data frame i.e. column "3". This is the code I am using
e <- unlist(d[[k]]) ## d is my list. k is the index for a for loop
pos_index <- seq(1, length(e), 2) ## gives positions for the variables
val_index <- seq(2, length(e), 2) ## gives positions for corresponding values
df_index <- as.numeric(e[pos_index])
## Populate a pre-defined data frame at calculated positions
CNNIBN_DF[k, df_index] <- as.numeric(e[val_index])
The data frame should look something like this
X1 X2 X3 X4 X5
1 29.12 20.78 12.09 NA NA
2 20.45 NA 10.56 12.34 NA
3 NA 12.20 19.93 NA 23.05
This works but takes a long time. system.time for 1000 entities gives this
user system elapsed
57.64 0.06 58.14
The list itself has 33k entities with each entity having 200+ elements. I have tried the same operation using just for loops but both tend to take about the same time.
Is there a faster way to do this? I'm using a win32 machine with 4GB of RAM running Intel Core i3 M350 CPU # 2.27 GHz.
Thanks in advance!
Akrun, already, posted some of the many probable alternatives; I'll just add a more explicit approach that seems to do as less as possible (using akrun's "lst"):
ulst = unlist(lst)
cols = seq(1, length(ulst), 2)
inds = cbind(row = rep(seq_along(lst), lengths(lst) %/% 2),
col = as.integer(ulst[cols]))
vals = as.numeric(ulst[-cols])
ans = matrix(, max(inds[, "row"]), max(inds[, "col"]))
ans[inds] = vals
# [,1] [,2] [,3] [,4] [,5]
#[1,] 29.12 20.78 12.09 NA NA
#[2,] 20.45 NA 10.56 12.34 NA
#[3,] NA 12.20 19.93 NA 23.05
From your goal, it seems that you shouldn't necessarily need a "data.frame", but the "matrix" is easily converted to one. Also, it might be worth to look into if you could manipulate the building/fetching of your data in order to avoid this weird format.
Try
lst1 <- lapply(lst, function(x) { x<- as.numeric(x)
indx <- c(TRUE, FALSE)
v1 <- tabulate(x[indx])
is.na(v1) <- v1==0
v1[!is.na(v1)] <- x[!indx]
v1 })
setNames(do.call(rbind.data.frame,lapply(lst1, `length<-`,
max(lengths(lst1)))), paste0('X', 1:5))
# X1 X2 X3 X4 X5
#1 29.12 20.78 12.09 NA NA
#2 20.45 NA 10.56 12.34 NA
#3 NA 12.20 19.93 NA 23.05
Or
m1 <- do.call(rbind,Map(function(x,y) cbind(x,matrix(as.numeric(y),
nrow=length(y)/2, byrow=TRUE)), seq_along(lst), lst))
m2 <- matrix(NA, ncol=max(m1[,2]), nrow=length(lst))
m2[m1[,-3]] <- m1[,3]
We can use sparseMatrix from Matrix
library(Matrix)
d1 <- setNames(as.data.frame(m1), c('Row', 'Col', 'Value'))
with(d1, sparseMatrix(Row, Col, x=Value))
#3 x 5 sparse Matrix of class "dgCMatrix"
#[1,] 29.12 20.78 12.09 . .
#[2,] 20.45 . 10.56 12.34 .
#[3,] . 12.20 19.93 . 23.05
which can be converted to matrix by as.matrix.
Or
library(tidyr)
library(dplyr)
d1 <- unnest(lst, group)
d2 <- bind_cols(slice(d1, seq(1, n(), by=2)), slice(d1, seq(2, n(), by=2))[2])
colnames(d2)[3] <- 'val'
spread(d2, x, val) %>%
select(-group)
# 1 2 3 4 5
#1 29.12 20.78 12.09 <NA> <NA>
#2 20.45 <NA> 10.56 12.34 <NA>
#3 <NA> 12.20 19.93 <NA> 23.05
Or
library(data.table)#v1.9.5+
library(reshape2)
dcast(setDT(melt(lst))[, list(indx= value[c(TRUE, FALSE)],
value=value[c(FALSE, TRUE)]) ,L1], L1~paste0('X', indx), value.var='value')
# L1 X1 X2 X3 X4 X5
#1: 1 29.12 20.78 12.09 NA NA
#2: 2 20.45 NA 10.56 12.34 NA
#3: 3 NA 12.20 19.93 NA 23.05
Benchmarks
For a 1000 entities list,
set.seed(42)
lst <- lapply(1:1000, function(i) {v1 <- sample(50:200)[1L]
v2 <- sample(1:200, v1, replace=FALSE)
as.character(c(rbind(v2, rnorm(v1))))})
system.time({
m1 <- do.call(rbind,Map(function(x,y) cbind(x,matrix(as.numeric(y),
nrow=length(y)/2, byrow=TRUE)), seq_along(lst), lst))
m2 <- matrix(NA, ncol=max(m1[,2]), nrow=length(lst))
m2[m1[,-3]] <- m1[,3]
})
# user system elapsed
# 0.064 0.004 0.067
system.time({
m1 <- do.call(rbind,Map(function(x,y) cbind(x,matrix(as.numeric(y),
nrow=length(y)/2, byrow=TRUE)), seq_along(lst), lst))
d1 <- setNames(as.data.frame(m1), c('Row', 'Col', 'Value'))
with(d1, sparseMatrix(Row, Col, x=Value))
})
# user system elapsed
# 0.068 0.003 0.070
system.time({d1 <- unnest(lst, group)
d2 <- bind_cols(slice(d1, seq(1, n(), by=2)),
slice(d1, seq(2, n(), by=2))[2])
colnames(d2)[3] <- 'val'
res <- spread(d2, x, val) %>%
select(-group)})
# user system elapsed
# 0.259 0.002 0.261
Using the first method is slightly slower
system.time({
lst1 <- lapply(lst, function(x) { x<- as.numeric(x)
indx <- c(TRUE, FALSE)
v1 <- tabulate(x[indx])
is.na(v1) <- v1==0
v1[!is.na(v1)] <- x[!indx]
v1 })
setNames(do.call(rbind.data.frame,lapply(lst1, `length<-`,
max(lengths(lst1)))), paste0('X', 1:5))
})
# user system elapsed
#1.459 0.004 1.463
On a 33000 list
set.seed(42)
lst <- lapply(1:33000, function(i) {v1 <- sample(50:200)[1L]
v2 <- sample(1:200, v1, replace=FALSE)
as.character(c(rbind(v2, rnorm(v1))))})
system.time({
m1 <- do.call(rbind,Map(function(x,y) cbind(x,matrix(as.numeric(y),
nrow=length(y)/2, byrow=TRUE)), seq_along(lst), lst))
m2 <- matrix(NA, ncol=max(m1[,2]), nrow=length(lst))
m2[m1[,-3]] <- m1[,3]
})
# user system elapsed
# 6.160 0.102 6.260
#alexis_laz method is faster
system.time({
ulst = unlist(lst)
cols = seq(1, length(ulst), 2)
inds = cbind(row = rep(seq_along(lst), lengths(lst) %/% 2),
col = as.integer(ulst[cols]))
vals = as.numeric(ulst[-cols])
ans = matrix(, max(inds[, "row"]), max(inds[, "col"]))
ans[inds] = vals
})
# user system elapsed
# 2.421 0.041 2.460
data
lst <- list(c('1', '29.12', '2', '20.78', '3', '12.09'), c('1', '20.45',
'3', '10.56', '4', '12.34'), c('2', '12.20', '3', '19.93', '5', '23.05'))
I want to reshape my dataframe from long to wide format and I loose some data that I'd like to keep.
For the following example:
df <- data.frame(Par1 = unlist(strsplit("AABBCCC","")),
Par2 = unlist(strsplit("DDEEFFF","")),
ParD = unlist(strsplit("foo,bar,baz,qux,bla,xyz,meh",",")),
Type = unlist(strsplit("pre,post,pre,post,pre,post,post",",")),
Val = c(10,20,30,40,50,60,70))
# Par1 Par2 ParD Type Val
# 1 A D foo pre 10
# 2 A D bar post 20
# 3 B E baz pre 30
# 4 B E qux post 40
# 5 C F bla pre 50
# 6 C F xyz post 60
# 7 C F meh post 70
dfw <- dcast(df,
formula = Par1 + Par2 ~ Type,
value.var = "Val",
fun.aggregate = mean)
# Par1 Par2 post pre
# 1 A D 20 10
# 2 B E 40 30
# 3 C F 65 50
this is almost what I need but I would like to have
some field keeping data from ParD field (for example, as single merged string),
number of observations used for aggregation.
i.e. I would like the resulting data.frame to be as follows:
# Par1 Par2 post pre Num.pre Num.post ParD
# 1 A D 20 10 1 1 foo_bar
# 2 B E 40 30 1 1 baz_qux
# 3 C F 65 50 1 2 bla_xyz_meh
I would be grateful for any ideas. For example, I tried to solve the second task by writing in dcast: fun.aggregate=function(x) c(Val=mean(x),Num=length(x)) - but this causes an error.
Late to the party, but here's another alternative using data.table:
require(data.table)
dt <- data.table(df, key=c("Par1", "Par2"))
dt[, list(pre=mean(Val[Type == "pre"]),
post=mean(Val[Type == "post"]),
pre.num=length(Val[Type == "pre"]),
post.num=length(Val[Type == "post"]),
ParD = paste(ParD, collapse="_")),
by=list(Par1, Par2)]
# Par1 Par2 pre post pre.num post.num ParD
# 1: A D 10 20 1 1 foo_bar
# 2: B E 30 40 1 1 baz_qux
# 3: C F 50 65 1 2 bla_xyz_meh
[from Matthew] +1 Some minor improvements to save repeating the same ==, and to demonstrate local variables inside j.
dt[, list(pre=mean(Val[.pre <- Type=="pre"]), # save .pre
post=mean(Val[.post <- Type=="post"]), # save .post
pre.num=sum(.pre), # reuse .pre
post.num=sum(.post), # reuse .post
ParD = paste(ParD, collapse="_")),
by=list(Par1, Par2)]
# Par1 Par2 pre post pre.num post.num ParD
# 1: A D 10 20 1 1 foo_bar
# 2: B E 30 40 1 1 baz_qux
# 3: C F 50 65 1 2 bla_xyz_meh
dt[, { .pre <- Type=="pre" # or save .pre and .post up front
.post <- Type=="post"
list(pre=mean(Val[.pre]),
post=mean(Val[.post]),
pre.num=sum(.pre),
post.num=sum(.post),
ParD = paste(ParD, collapse="_")) }
, by=list(Par1, Par2)]
# Par1 Par2 pre post pre.num post.num ParD
# 1: A D 10 20 1 1 foo_bar
# 2: B E 30 40 1 1 baz_qux
# 3: C F 50 65 1 2 bla_xyz_meh
And if a list column is ok rather than a paste, then this should be faster :
dt[, { .pre <- Type=="pre"
.post <- Type=="post"
list(pre=mean(Val[.pre]),
post=mean(Val[.post]),
pre.num=sum(.pre),
post.num=sum(.post),
ParD = list(ParD)) } # list() faster than paste()
, by=list(Par1, Par2)]
# Par1 Par2 pre post pre.num post.num ParD
# 1: A D 10 20 1 1 foo,bar
# 2: B E 30 40 1 1 baz,qux
# 3: C F 50 65 1 2 bla,xyz,meh
Solution in 2 steps using ddply ( i am not happy with , but I get the result)
dat <- ddply(df,.(Par1,Par2),function(x){
data.frame(ParD=paste(paste(x$ParD),collapse='_'),
Num.pre =length(x$Type[x$Type =='pre']),
Num.post = length(x$Type[x$Type =='post']))
})
merge(dfw,dat)
Par1 Par2 post pre ParD Num.pre Num.post
1 A D 2.0 1 foo_bar 1 1
2 B E 4.0 3 baz_qux 1 1
3 C F 6.5 5 bla_xyz_meh 1 2
You could do a merge of two dcasts and an aggregate, here all wrapped into one large expression mostly to avoid having intermediate objects hanging around afterwards:
Reduce(merge, list(
dcast(df, formula = Par1+Par2~Type, value.var="Val",
fun.aggregate=mean),
setNames(dcast(df, formula = Par1+Par2~Type, value.var="Val",
fun.aggregate=length), c("Par1", "Par2", "Num.post",
"Num.pre")),
aggregate(df["ParD"], df[c("Par1", "Par2")], paste, collapse="_")
))
I'll post but agstudy's puts me to shame:
step1 <- with(df, split(df, list(Par1, Par2)))
step2 <- step1[sapply(step1, nrow) > 0]
step3 <- lapply(step2, function(x) {
piece1 <- tapply(x$Val, x$Type, mean)
piece2 <- tapply(x$Type, x$Type, length)
names(piece2) <- paste0("Num.", names(piece2))
out <- x[1, 1:2]
out[, 3:6] <- c(piece1, piece2)
names(out)[3:6] <- names(c(piece1, piece2))
out$ParD <- paste(unique(x$ParD), collapse="_")
out
})
data.frame(do.call(rbind, step3), row.names=NULL)
Yielding:
Par1 Par2 post pre Num.post Num.pre ParD
1 A D 2.0 1 1 1 foo_bar
2 B E 4.0 3 1 1 baz_qux
3 C F 6.5 5 2 1 bla_xyz_meh
What a great opprotunity to benchmark!
Below are some runs of the plyr method (as suggested by #agstudy) compared with the data.table method (as suggested by #Arun)
using different sample sizes (N = 900, 2700, 10800)
Summary:
The data.table method outperforms the plyr method by a factor of 7.5
#-------------------#
# M E T H O D S #
#-------------------#
# additional methods below, in the updates
# Method 1 -- suggested by #agstudy
plyrMethod <- quote({
dfw<-dcast(df,
formula = Par1+Par2~Type,
value.var="Val",
fun.aggregate=mean)
dat <- ddply(df,.(Par1,Par2),function(x){
data.frame(ParD=paste(paste(x$ParD),collapse='_'),
Num.pre =length(x$Type[x$Type =='pre']),
Num.post = length(x$Type[x$Type =='post']))
})
merge(dfw,dat)
})
# Method 2 -- suggested by #Arun
dtMethod <- quote(
dt[, list(pre=mean(Val[Type == "pre"]),
post=mean(Val[Type == "post"]),
Num.pre=length(Val[Type == "pre"]),
Num.post=length(Val[Type == "post"]),
ParD = paste(ParD, collapse="_")),
by=list(Par1, Par2)]
)
# Method 3 -- suggested by #regetz
reduceMethod <- quote(
Reduce(merge, list(
dcast(df, formula = Par1+Par2~Type, value.var="Val",
fun.aggregate=mean),
setNames(dcast(df, formula = Par1+Par2~Type, value.var="Val",
fun.aggregate=length), c("Par1", "Par2", "Num.post",
"Num.pre")),
aggregate(df["ParD"], df[c("Par1", "Par2")], paste, collapse="_")
))
)
# Method 4 -- suggested by #Ramnath
castddplyMethod <- quote(
reshape::cast(Par1 + Par2 + ParD ~ Type,
data = ddply(df, .(Par1, Par2), transform,
ParD = paste(ParD, collapse = "_")),
fun = c(mean, length)
)
)
# SAMPLE DATA #
#-------------#
library(data.table)
library(plyr)
library(reshape2)
library(rbenchmark)
# for Par1, ParD
LLL <- apply(expand.grid(LETTERS, LETTERS, LETTERS, stringsAsFactors=FALSE), 1, paste0, collapse="")
lll <- apply(expand.grid(letters, letters, letters, stringsAsFactors=FALSE), 1, paste0, collapse="")
# max size is 17568 with current sample data setup, ie: floor(length(LLL) / 18) * 18
size <- 17568
size <- 10800
size <- 900
set.seed(1)
df<-data.frame(Par1=rep(LLL[1:(size/2)], times=rep(c(2,2,3), size)[1:(size/2)])[1:(size)]
, Par2=rep(lll[1:(size/2)], times=rep(c(2,2,3), size)[1:(size/2)])[1:(size)]
, ParD=sample(unlist(lapply(c("f", "b"), paste0, lll)), size, FALSE)
, Type=rep(c("pre","post"), size/2)
, Val =sample(seq(10,100,10), size, TRUE)
)
dt <- data.table(df, key=c("Par1", "Par2"))
# Confirming Same Results #
#-------------------------#
# Evaluate
DF1 <- eval(plyrMethod)
DF2 <- eval(dtMethod)
# Convert to DF and sort columns and sort ParD levels, for use in identical
colOrder <- sort(names(DF1))
DF1 <- DF1[, colOrder]
DF2 <- as.data.frame(DF2)[, colOrder]
DF2$ParD <- factor(DF2$ParD, levels=levels(DF1$ParD))
identical((DF1), (DF2))
# [1] TRUE
#-------------------------#
RESULTS
#--------------------#
# BENCHMARK #
#--------------------#
benchmark(plyr=eval(plyrMethod), dt=eval(dtMethod), reduce=eval(reduceMethod), castddply=eval(castddplyMethod),
replications=5, columns=c("relative", "test", "elapsed", "user.self", "sys.self", "replications"),
order="relative")
# SAMPLE SIZE = 900
relative test elapsed user.self sys.self replications
1.000 reduce 0.392 0.375 0.018 5
1.003 dt 0.393 0.377 0.016 5
7.064 plyr 2.769 2.721 0.047 5
8.003 castddply 3.137 3.030 0.106 5
# SAMPLE SIZE = 2,700
relative test elapsed user.self sys.self replications
1.000 dt 1.371 1.327 0.090 5
2.205 reduce 3.023 2.927 0.102 5
7.291 plyr 9.996 9.644 0.377 5
# SAMPLE SIZE = 10,800
relative test elapsed user.self sys.self replications
1.000 dt 8.678 7.168 1.507 5
2.769 reduce 24.029 23.231 0.786 5
6.946 plyr 60.277 52.298 7.947 5
13.796 castddply 119.719 113.333 10.816 5
# SAMPLE SIZE = 17,568
relative test elapsed user.self sys.self replications
1.000 dt 27.421 13.042 14.470 5
4.030 reduce 110.498 75.853 34.922 5
5.414 plyr 148.452 105.776 43.156 5
Update : Added results for baseMethod1
# Used only sample size of 90, as it was taking long
relative test elapsed user.self sys.self replications
1.000 dt 0.044 0.043 0.001 5
7.773 plyr 0.342 0.339 0.003 5
65.614 base1 2.887 2.866 0.028 5
Where
baseMethod1 <- quote({
step1 <- with(df, split(df, list(Par1, Par2)))
step2 <- step1[sapply(step1, nrow) > 0]
step3 <- lapply(step2, function(x) {
piece1 <- tapply(x$Val, x$Type, mean)
piece2 <- tapply(x$Type, x$Type, length)
names(piece2) <- paste0("Num.", names(piece2))
out <- x[1, 1:2]
out[, 3:6] <- c(piece1, piece2)
names(out)[3:6] <- names(c(piece1, piece2))
out$ParD <- paste(unique(x$ParD), collapse="_")
out
})
data.frame(do.call(rbind, step3), row.names=NULL)
})
Update 2: Added keying the DT as part of the metric
Adding the indexing step to the benchmark for fairness as per #MatthewDowle s comment.
However, presumably, if data.table is used, it will be in place of the data.frame and
hence the indexing will occur once and not simply for this procedure
dtMethod.withkey <- quote({
dt <- data.table(df, key=c("Par1", "Par2"))
dt[, list(pre=mean(Val[Type == "pre"]),
post=mean(Val[Type == "post"]),
Num.pre=length(Val[Type == "pre"]),
Num.post=length(Val[Type == "post"]),
ParD = paste(ParD, collapse="_")),
by=list(Par1, Par2)]
})
# SAMPLE SIZE = 10,800
relative test elapsed user.self sys.self replications
1.000 dt 9.155 7.055 2.137 5
1.043 dt.withkey 9.553 7.245 2.353 5
3.567 reduce 32.659 31.196 1.586 5
6.703 plyr 61.364 54.080 7.600 5
Update 3: Benchmarking #MD's edits to #Arun's original answer
dtMethod.MD1 <- quote(
dt[, list(pre=mean(Val[.pre <- Type=="pre"]), # save .pre
post=mean(Val[.post <- Type=="post"]), # save .post
pre.num=sum(.pre), # reuse .pre
post.num=sum(.post), # reuse .post
ParD = paste(ParD, collapse="_")),
by=list(Par1, Par2)]
)
dtMethod.MD2 <- quote(
dt[, { .pre <- Type=="pre" # or save .pre and .post up front
.post <- Type=="post"
list(pre=mean(Val[.pre]),
post=mean(Val[.post]),
pre.num=sum(.pre),
post.num=sum(.post),
ParD = paste(ParD, collapse="_")) }
, by=list(Par1, Par2)]
)
dtMethod.MD3 <- quote(
dt[, { .pre <- Type=="pre"
.post <- Type=="post"
list(pre=mean(Val[.pre]),
post=mean(Val[.post]),
pre.num=sum(.pre),
post.num=sum(.post),
ParD = list(ParD)) } # list() faster than paste()
, by=list(Par1, Par2)]
)
benchmark(dt.M1=eval(dtMethod.MD1), dt.M2=eval(dtMethod.MD2), dt.M3=eval(dtMethod.MD3), dt=eval(dtMethod),
replications=5, columns=c("relative", "test", "elapsed", "user.self", "sys.self", "replications"),
order="relative")
#--------------------#
Comparing the different data.table methods amongst themselves
# SAMPLE SIZE = 900
relative test elapsed user.self sys.self replications
1.000 dt.M3 0.198 0.197 0.001 5 <~~~ "list()" Method
1.242 dt.M1 0.246 0.243 0.004 5
1.253 dt.M2 0.248 0.242 0.007 5
1.884 dt 0.373 0.367 0.007 5
# SAMPLE SIZE = 17,568
relative test elapsed user.self sys.self replications
1.000 dt.M3 33.492 24.487 9.122 5 <~~~ "list()" Method
1.086 dt.M1 36.388 11.442 25.086 5
1.086 dt.M2 36.388 10.845 25.660 5
1.126 dt 37.701 13.256 24.535 5
Comparing MD3 ("list" method) with MD1 (best of DT non-list methods)
Using a clean session (ie, removing string cache)
_Note: Ran the following twice, fresh session each time, with practically identical results
Then re-ran in the *same* session, with reps=5. Results very different._
benchmark(dt.M1=eval(dtMethod.MD1), dt.M3=eval(dtMethod.MD3), replications=1, columns=c("relative", "test", "elapsed", "user.self", "sys.self", "replications"), order="relative")
# SAMPLE SIZE=17,568; CLEAN SESSION
relative test elapsed user.self sys.self replications
1.000 dt.M1 8.885 4.260 4.617 1
1.633 dt.M3 14.506 12.821 1.677 1
# SAMPLE SIZE=17,568; *SAME* SESSION
relative test elapsed user.self sys.self replications
1.000 dt.M1 33.443 10.200 23.226 5
1.048 dt.M3 35.060 26.127 8.915 5
#--------------------#
New benchmarks against previous methods
_Note: Not using the "list method" as results are not the same as other methods_
# SAMPLE SIZE = 900
relative test elapsed user.self sys.self replications
1.000 dt.M1 0.254 0.247 0.008 5
1.705 reduce 0.433 0.425 0.010 5
11.280 plyr 2.865 2.842 0.031 5
# SAMPLE SIZE = 17,568
relative test elapsed user.self sys.self replications
1.000 dt.M1 24.826 10.427 14.458 5
4.348 reduce 107.935 70.107 38.314 5
5.942 plyr 147.508 106.958 41.083 5
One Step solution combining reshape::cast with plyr::ddply
cast(Par1 + Par2 + ParD ~ Type, data = ddply(df, .(Par1, Par2), transform,
ParD = paste(ParD, collapse = "_")), fun = c(mean, length))
NOTE that the dcast function in reshape2 does not allow multiple aggregate functions to be passed, while the cast function in reshape does.
I believe this base R solution is comparable with #Arun's data table solution. (Which isn't to say I would prefer it; that code is much simpler!)
baseMethod2 <- quote({
is <- unname(split(1:nrow(df), with(df, paste(Par1, Par2, sep="\b"))))
i1 <- sapply(is, `[`, 1)
out <- with(df, data.frame(Par1=Par1[i1], Par2=Par2[i1]))
js <- lapply(is, function(i) split(i, df$Type[i]))
out$post <- sapply(js, function(j) mean(df$Val[j$post]))
out$pre <- sapply(js, function(j) mean(df$Val[j$pre]))
out$Num.pre <- sapply(js, function(j) length(j$pre))
out$Num.post <- sapply(js, function(j) length(j$post))
out$ParD <- sapply(is, function(x) paste(df$ParD[x], collapse="_"))
out
})
Using #RicardoSaporta's timing code with 900, 2700, and 10,800, respectively:
> relative test elapsed user.self sys.self replications
3 1.000 baseMethod2 0.230 0.229 0 5
1 1.130 dt 0.260 0.257 0 5
2 8.752 plyr 2.013 2.006 0 5
> relative test elapsed user.self sys.self replications
3 1.000 baseMethod2 0.877 0.872 0 5
1 1.068 dt 0.937 0.934 0 5
2 8.060 plyr 7.069 7.043 0 5
> relative test elapsed user.self sys.self replications
1 1.000 dt 6.232 6.178 0.031 5
3 1.085 baseMethod2 6.763 6.683 0.054 5
2 7.263 plyr 45.261 44.983 0.104 5
Trying to wrap different aggregation expressions into a self-contained function (expressions should yield atomic values)...
multi.by <- function(X, INDEX,...) {
expressions <- substitute(...())
duplicates <- duplicated(INDEX)
res <- do.call(rbind,sapply(split(X,cumsum(!duplicates),drop=T), function(part)
sapply(expressions,eval,part,simplify=F),simplify=F))
if (is.data.frame(INDEX)) res <- cbind(INDEX[!duplicates,],res)
else rownames(res) <- INDEX[!duplicates]
res
}
multi.by(df,df[,1:2],
pre=mean(Val[Type=="pre"]),
post=mean(Val[Type=="post"]),
Num.pre=sum(Type=="pre"),
Num.post=sum(Type=="post"),
ParD=paste(ParD, collapse="_"))
I have the matrix
m <- matrix(1:9, nrow = 3, ncol = 3, byrow = TRUE,dimnames = list(c("s1", "s2", "s3"),c("tom", "dick","bob")))
tom dick bob
s1 1 2 3
s2 4 5 6
s3 7 8 9
#and the data frame
current<-c("tom", "dick","harry","bob")
replacement<-c("x","y","z","b")
df<-data.frame(current,replacement)
current replacement
1 tom x
2 dick y
3 harry z
4 bob b
#I need to replace the existing names i.e. df$current with df$replacement if
#colnames(m) are equal to df$current thereby producing the following matrix
m <- matrix(1:9, nrow = 3, ncol = 3, byrow = TRUE,dimnames = list(c("s1", "s2", "s3"),c("x", "y","b")))
x y b
s1 1 2 3
s2 4 5 6
s3 7 8 9
Any advice? Should I use an 'if' loop? Thanks.
You can use which to match the colnames from m with the values in df$current. Then, when you have the indices, you can subset the replacement colnames from df$replacement.
colnames(m) = df$replacement[which(df$current %in% colnames(m))]
In the above:
%in% tests for TRUE or FALSE for any matches between the objects being compared.
which(df$current %in% colnames(m)) identifies the indexes (in this case, the row numbers) of the matched names.
df$replacement[...] is the basic way to subset the column df$replacement returning only the rows matched with step 2.
A slightly more direct way to find the indices is to use match:
> id <- match(colnames(m), df$current)
> id
[1] 1 2 4
> colnames(m) <- df$replacement[id]
> m
x y b
s1 1 2 3
s2 4 5 6
s3 7 8 9
As discussed below %in% is generally more intuitive to use and the difference in efficiency is marginal unless the sets are relatively large, e.g.
> n <- 50000 # size of full vector
> m <- 10000 # size of subset
> query <- paste("A", sort(sample(1:n, m)))
> names <- paste("A", 1:n)
> all.equal(which(names %in% query), match(query, names))
[1] TRUE
> library(rbenchmark)
> benchmark(which(names %in% query))
test replications elapsed relative user.self sys.self user.child sys.child
1 which(names %in% query) 100 0.267 1 0.268 0 0 0
> benchmark(match(query, names))
test replications elapsed relative user.self sys.self user.child sys.child
1 match(query, names) 100 0.172 1 0.172 0 0 0
There is a nice explanation here describing how to eliminate duplicates in a data frame by picking the maximum variable.
I can also see how this can be applied to pick the duplicate with the minimum variable.
my question now is how do I display the mean of all duplicates?
for example:
z <- data.frame(id=c(1,1,2,2,3,4),var=c(2,4,1,3,5,2))
# id var
# 1 2
# 1 4
# 2 1
# 2 3
# 3 5
# 4 2
I would like the output:
# id var
# 1 3 mean(2,4)
# 2 2 mean(1,3)
# 3 5
# 4 2
My current code is:
averages<-do.call(rbind,lapply(split(z,z$id),function(chunk) mean(chunk$var)))
z<-z[order(z$id),]
z<-z[!duplicated(z$id),]
z$var<-averages
My code runs very slowly and is takes about 10 times longer than the method for picking the maximum. How do I optimize this code?
Here is a faster solution using data.table
library(data.table)
z <- data.frame(id=sample(letters, 6e5, replace = TRUE),var = rnorm(6e5))
fn1 <- function(z){
z$var <- ave(z$var, z$id, FUN=mean)
return(unique(z))
}
fn2 <- function(z) {
t(sapply(split(z,z$id), function(x) sapply(x,mean)))
}
fn3 <- function(z){
data.table(z)[,list(var = mean(var)), 'id']
}
library(rbenchmark)
benchmark(f1 <- fn1(z), f2 <- fn2(z), f3 <- fn3(z), replications = 2)
est replications elapsed relative user.self sys.self
1 f1 <- fn1(z) 2 3.619 8.455607 3.331 0.242
2 f2 <- fn2(z) 2 0.586 1.369159 0.365 0.220
3 f3 <- fn3(z) 2 0.428 1.000000 0.341 0.086
I think split() and unsplit() is one way.
dupMean <- function(x)
{
result <- split(x[, 2], x[, 1])
result <- lapply(result, mean)
result <- unsplit(result, unique(x[, 1]))
return(result)
}
Or, to save a line with plyr:
require(plyr)
dupMean <- function(x)
{
result <- split(x[, 2], x[, 1])
result <- laply(result, mean)
return(result)
}
Update:
Just for curiosity, here is a comparison of the various functions suggested. Ramnath (fn3) looks to be the winner on my computer.
require(plyr)
require(data.table)
require(rbenchmark)
fn1 <- function(z){
z$var <- ave(z$var, z$id, FUN=mean)
return(unique(z))
}
fn2 <- function(z) {
t(sapply(split(z,z$id), function(x) sapply(x,mean)))
}
fn3 <- function(z){
data.table(z)[,list(var = mean(var)), 'id']
}
fn4 <- function(x)
{
result <- t(sapply(split(x,x$id), function(y) sapply(y,mean)))
return(result)
}
fn5 <- function(x)
{
x$var <- ave(x$var, x$id, FUN=mean)
x <- unique(x)
return(x)
}
fn6 <- function(x)
{
result <- do.call(rbind,lapply(split(x,x$id),function(chunk) mean(chunk$var)))
return(data.frame(id = unique(x[, 1]), var = result))
}
fn7 <- function(x)
{
result <- split(x[, 2], x[, 1])
result <- lapply(result, mean)
result <- unsplit(result, unique(x[, 1]))
return(data.frame(id = unique(x[, 1]), var = result))
}
fn8 <- function(x)
{
result <- split(x[, 2], x[, 1])
result <- laply(result, mean)
return(data.frame(id = unique(x[, 1]), var = result))
}
z <- data.frame(id = rep(c(1,1,2,2,3,4,5,6,6,7), 1e5), var = rnorm(1e6))
benchmark(f1 <- fn1(z), f2 <- fn2(z), f3 <- fn3(z), f4 <- fn4(z), f5 <- fn5(z), f6 <- fn6(z), f7 <- fn7(z), f8 <- fn8(z), replications = 2)
Result:
test replications elapsed relative user.self sys.self
1 f1 <- fn1(z) 2 13.45 20.692308 13.27 0.15
2 f2 <- fn2(z) 2 3.54 5.446154 3.43 0.09
3 f3 <- fn3(z) 2 0.65 1.000000 0.54 0.10
4 f4 <- fn4(z) 2 3.62 5.569231 3.50 0.09
5 f5 <- fn5(z) 2 13.57 20.876923 13.25 0.25
6 f6 <- fn6(z) 2 3.53 5.430769 3.36 0.14
7 f7 <- fn7(z) 2 3.34 5.138462 3.28 0.03
8 f8 <- fn8(z) 2 3.34 5.138462 3.26 0.03
I would use a combination of ave and unique:
z <- data.frame(id=rep(c(1,1,2,2,3,4),1e5),var=rnorm(6e5))
z$var <- ave(z$var, z$id, FUN=mean)
z <- unique(z)
UPDATE: after actually timing the solution, here's something that's a little faster.
z <- data.frame(id=rep(c(1,1,2,2,3,4),1e5),var=rnorm(6e5))
system.time({
averages <- t(sapply(split(z,z$id), function(x) sapply(x,mean)))
})
# user system elapsed
# 1.32 0.00 1.33
system.time({
z$var <- ave(z$var, z$id, FUN=mean)
z <- unique(z)
})
# user system elapsed
# 4.33 0.02 4.37