Convert a dataframe to presence absence matrix - r

I have a table which has unequal number of element in string format
File1 A B C
File2 A B D
File3 E F
I want to convert into a format as follows
A B C D E F
File1 1 1 1 0 0 0
FIle2 1 1 0 1 0 0
File3 0 0 0 0 1 1
I tried to do it using reshape2 but was not successful.
Sample data:
mydata <- structure(list(V1 = c("File1", "File2", "File3"),
V2 = c("A", "A", "E"), V3 = c("B", "B", "F"),
V4 = c("C", "D", "")),
.Names = c("V1", "V2", "V3", "V4"),
class = "data.frame", row.names = c(NA, -3L))

One possibility:
library(reshape2)
df2 <- melt(df, id.var = "V1")
with(df2, table(V1, value))
# value
# V1 A B C D E F
# File1 1 1 1 0 0 0
# File2 1 1 0 1 0 0
# File3 0 0 0 0 1 1

A reasonably efficient approach is to use the (presently) non-exported charMat function from my "splitstackshape" package. Since it's not exported, you will have to use ::: to access it.
library(splitstackshape)
cbind(mydata[1], splitstackshape:::charMat(
split.default(mydata[-1], sequence(ncol(mydata)-1)), fill=0))
# V1 V1 A B C D E F
# 1 File1 0 1 1 1 0 0 0
# 2 File2 0 1 1 0 1 0 0
# 3 File3 1 0 0 0 0 1 1
Under the hood, charMat makes use of matrix indexing to process everything pretty efficiently. Step-by-step, this is what charMat does.
X <- split.default(mydata[-1], sequence(ncol(mydata)-1))
len <- length(X)
vec <- unlist(X, use.names=FALSE)
lvl <- sort(unique(vec))
out <- matrix(0L, nrow = len, ncol = length(lvl), dimnames = list(NULL, lvl))
i.idx <- rep(seq.int(len), vapply(X, length, integer(1L)))
j.idx <- match(vec, lvl)
out[cbind(i.idx, j.idx)] <- 1
out
# A B C D E F
# [1,] 0 1 1 1 0 0 0
# [2,] 0 1 1 0 1 0 0
# [3,] 1 0 0 0 0 1 1
That looks like a mouthful, but it is actually quite a fast operation, made faster by using the charMat function :-)
Update: Benchmarks
The following benchmarks test Henrik's answer with my charMat answer, and also adapts Henrik's answer to use "data.table" instead, for better efficiency.
Two tests were run. The first is on a similar dataset with 90K rows, and the second on one with 900K rows.
Here's the sample data:
biggerdata <- do.call(rbind, replicate(30000, mydata, simplify = FALSE))
biggerdata$V1 <- make.unique(biggerdata$V1)
dim(biggerdata)
# [1] 90000 4
evenBigger <- do.call(rbind, replicate(10, biggerdata, simplify = FALSE))
evenBigger$V1 <- make.unique(evenBigger$V1)
dim(evenBigger)
# [1] 900000 4
Here are the functions to benchmark:
fun1 <- function(indf) {
cbind(indf[1], splitstackshape:::charMat(
split.default(indf[-1], sequence(ncol(indf)-1)), fill=0))
}
library(reshape2)
fun2 <- function(indf) {
df2 <- melt(indf, id.var = "V1")
with(df2, table(V1, value))
}
library(data.table)
library(reshape2)
DT <- data.table(biggerdata)
DT2 <- data.table(evenBigger)
fun3 <- function(inDT) {
DTL <- melt(inDT, id.vars="V1")
dcast.data.table(DTL, V1 ~ value, fun.aggregate=length)
}
And the results of the benchmarking.
library(microbenchmark)
microbenchmark(fun1(biggerdata), fun2(biggerdata), fun3(DT), times = 20)
# Unit: milliseconds
# expr min lq median uq max neval
# fun1(biggerdata) 185.3652 199.8725 289.0206 308.5826 327.4185 20
# fun2(biggerdata) 1453.8791 1605.6053 1639.8567 1758.3984 1797.2229 20
# suppressMessages(fun3(DT)) 469.8979 570.4664 586.4715 598.6229 675.2961 20
microbenchmark(fun1(evenBigger), fun2(evenBigger), fun3(DT2), times = 5)
# Unit: seconds
# expr min lq median uq max neval
# fun1(evenBigger) 1.871611 1.896351 2.071355 2.140580 2.464569 5
# fun2(evenBigger) 26.911523 27.212910 27.363442 27.469812 27.938178 5
# fun3(DT2) 7.103615 7.131603 7.141908 7.205006 7.218321 5

Related

How to compare with values adjacent in a sequence in the same group

Let's say I have something like this:
set.seed(0)
the.df <- data.frame( x=rep(letters[1:3], each=4),
n=rep(0:3, 3),
val=round(runif(12)))
the.df
x n val
1 a 0 1
2 a 1 0
3 a 2 0
4 a 3 1
5 b 0 1
6 b 1 0
7 b 2 1
8 b 3 1
9 c 0 1
10 c 1 1
11 c 2 0
12 c 3 0
Within each x, starting from n==2 (going from small to large), I want to set val to 0 if the previous val (in terms of n) is 0; otherwise, leave it as is.
For example, in the subset x=="b", I first ignore the two rows where n < 2. Now, in Row 7, because the previous val is 0 (the.df$val[the.df$x=="b" & the.df$n==1]), I set val to 0 (the.df$val[the.df$x=="b" & the.df$n==2] <- 0). Then on Row 8, now that val for the previous n is 0 (we just set it), I also want to set val here to 0 (the.df$val[the.df$x=="b" & the.df$n==3] <- 0).
Imagine that the data.frame is not sorted. Therefore procedures that depend on the order would require a sort. I also can't assume that adjacent rows exist (e.g., the row the.df[the.df$x=="a" & the.df$n==1, ] might be missing).
The trickiest part seems to be evaluating val in sequence. I can do this using a loop but I imagine that it would be inefficient (I have millions of rows). Is there a way I can do this more efficiently?
EDIT: wanted output
the.df
x n val wanted
1 a 0 1 1
2 a 1 0 0
3 a 2 0 0
4 a 3 1 0
5 b 0 1 1
6 b 1 0 0
7 b 2 1 0
8 b 3 1 0
9 c 0 1 1
10 c 1 1 1
11 c 2 0 0
12 c 3 0 0
Also, I don't mind making new columns (e.g., putting the wanted values there).
Using data.table I would try the following
library(data.table)
setDT(the.df)[order(n),
val := if(length(indx <- which(val[2:.N] == 0L)))
c(val[1:(indx[1L] + 1L)], rep(0L, .N - (indx[1L] + 1L))),
by = x]
the.df
# x n val
# 1: a 0 1
# 2: a 1 0
# 3: a 2 0
# 4: a 3 0
# 5: b 0 1
# 6: b 1 0
# 7: b 2 0
# 8: b 3 0
# 9: c 0 1
# 10: c 1 1
# 11: c 2 0
# 12: c 3 0
This will simultaneously order the data by n (as you said it's not ordered in real life) and recreate val by condition (meaning that if condition not satisfied, val will be untouched).
Hopefully in the near future this will be implemented and then the code could potentially be
setDT(the.df)[order(n), val[n > 2] := if(val[2L] == 0) 0L, by = x]
Which could be a great improvement both performance and syntax wise
A base R approach might be
df <- the.df[order(the.df$x, the.df$n),]
df$val <- ave(df$val, df$x, FUN=fun)
As for fun, #DavidArenburg's answer in plain R and written a bit more poetically might be
fun0 <- function(v) {
idx <- which.max(v[2:length(v)] == 0L) + 1L
if (length(idx))
v[idx:length(v)] <- 0L
v
}
It seems like a good idea to formulate the solution as an independent function first, because then it is easy to test. fun0 fails for some edge cases, e.g.,
> fun0(0)
[1] 0 0 0
> fun0(1)
[1] 0 0 0
> fun0(c(1, 1))
[1] 1 0
A better version is
fun1 <- function(v) {
tst <- tail(v, -1) == 0L
if (any(tst)) {
idx <- which.max(tst) + 1L
v[idx:length(v)] <- 0L
}
v
}
And even better, following #Arun
fun <- function(v)
if (length(v) > 2) c(v[1], cummin(v[-1])) else v
This is competitive (same order of magnitude) with the data.table solution, with ordering and return occurring in less than 1s for the ~10m row data.frame of #m-dz 's timings. At a second for millions of rows, it doesn't seem worth while to pursue further optimization.
Nonetheless, when there are a very large number of small groups (e.g., 2M each of size 5) an improvement is to avoid the tapply() function call by using group identity to offset the minimum. For instance,
df <- df[order(df$x, df$n),]
grp <- match(df$x, unique(df$x)) # strictly sequential groups
keep <- duplicated(grp) # ignore the first of each group
df$val[keep] <- cummin(df$val[keep] - grp[keep]) + grp[keep]
Hmmm, should be pretty efficient if you switch to data.table...
library(data.table)
# Define the.df as a data.table (or use data.table::setDT() function)
set.seed(0)
the.df <- data.table(
x = rep(letters[1:3], each = 4),
n = rep(0:3, 3),
val = round(runif(12))
)
m_dz <- function() {
setorder(the.df, x, n)
repeat{
# Get IDs of rows to change
# ids <- which(the.df[, (n > 1) & (val == 1) & (shift(val, 1L, type = "lag") == 0)])
ids <- the.df[(n > 1) & (val == 1) & (shift(val, 1L, type = "lag") == 0), , which = TRUE]
# If no IDs break
if(length(ids) == 0){
break
}
# Set val to 0
# for (i in ids) set(the.df, i = i, j = "val", value = 0)
set(the.df, i = ids, j = "val", value = 0)
}
return(the.df)
}
Edit: Above function is slightly modified thanks to #jangorecki's, i.e. uses which = TRUE and set(the.df, i = ids, j = "val", value = 0), which made the timings much more stable (no very high max timings).
Edit: timing comparison with #David Arenburgs's answer on a slightly bigger table, m-dz() updated (#FoldedChromatin's answer skipped because of diffrent results).
My function is slightly faster in terms of median and upper quantile, but there is quite a big spread in timings (see max...), I cannot figure out why. Hopefully the timing methodology is correct (returning the result to different object etc.).
Anything bigger will kill my PC :(
set.seed(0)
groups_ids <- replicate(300, paste(sample(LETTERS, 5, replace=TRUE), collapse = ""))
size1 <- length(unique(groups_ids))
size2 <- round(1e7/size1)
the.df1 <- data.table(
x = rep(groups_ids, each = size2), # 52 * 500 = 26000
n = rep(0:(size2-1), size1),
val = round(runif(size1*size2))
)
the.df2 <- copy(the.df1)
# m-dz
m_dz <- function() {
setorder(df1, x, n)
repeat{
ids <- df1[(n > 1) & (val == 1) & (shift(val, 1L, type = "lag") == 0), , which = TRUE]
if(length(ids) == 0){
break
}
set(df1, i = ids, j = "val", value = 0)
}
return(df1)
}
# David Arenburg
DavidArenburg <- function() {
setorder(df2, x, n)
df2[, val := if(length(indx <- which.max(val[2:.N] == 0) + 1L)) c(val[1:indx], rep(0L, .N - indx)), by = x]
return(df2)
}
library(microbenchmark)
microbenchmark(
res1 <- m_dz(),
res2 <- DavidArenburg(),
times = 100
)
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# res1 <- m_dz() 247.4136 268.5005 363.0117 288.4216 312.7307 7071.0960 100 a
# res2 <- DavidArenburg() 270.6074 281.3935 314.7864 303.5229 328.1210 525.8095 100 a
identical(res1, res2)
# [1] TRUE
Edit: (Old) results for even bigger table:
set.seed(0)
groups_ids <- replicate(300, paste(sample(LETTERS, 5, replace=TRUE), collapse = ""))
size1 <- length(unique(groups_ids))
size2 <- round(1e8/size1)
# Unit: seconds
# expr min lq mean median uq max neval cld
# res1 <- m_dz() 5.599855 5.800264 8.773817 5.923721 6.021132 289.85107 100 a
# res2 <- m_dz2() 5.571911 5.836191 9.047958 5.970952 6.123419 310.65280 100 a
# res3 <- DavidArenburg() 9.183145 9.519756 9.714105 9.723325 9.918377 10.28965 100 a
Why not just use by
> set.seed(0)
> the.df <- data.frame( x=rep(letters[1:3], each=4),
n=rep(0:3, 3),
val=round(runif(12)))
> the.df
x n val
1 a 0 1
2 a 1 0
3 a 2 0
4 a 3 1
5 b 0 1
6 b 1 0
7 b 2 1
8 b 3 1
9 c 0 1
10 c 1 1
11 c 2 0
12 c 3 0
> Mod.df<-by(the.df,INDICES=the.df$x,function(x){
x$val[x$n==2]=0
Which=which(x$n==2 & x$val==0)+1
x$val[Which]=0
x})
> do.call(rbind,Mod.df)
x n val
a.1 a 0 1
a.2 a 1 0
a.3 a 2 0
a.4 a 3 0
b.5 b 0 1
b.6 b 1 0
b.7 b 2 0
b.8 b 3 0
c.9 c 0 1
c.10 c 1 1
c.11 c 2 0
c.12 c 3 0

Most efficient way to turn factor matrix into binary (indicator) matrix in R

I can think of several ways to turn matrix (data frame) of this type:
dat = data.frame(
x1 = rep(c('a', 'b'), 100),
x2 = rep(c('x', 'y'), 100)
)
head(dat)
x1 x2
1 a x
2 b y
3 a x
4 b y
5 a x
6 b y
Into a binary (indicator) matrix (or data frame) like this:
a b x y
1 0 1 0
0 1 0 1
...
(This structure is, of course, trivial and only for illustrative purpose!)
Many thanks!
We can use table
tbl <- table(rep(1:nrow(dat),2),unlist(dat))
head(tbl, 2)
# a b x y
# 1 1 0 1 0
# 2 0 1 0 1
Or a possibly efficient option would be
library(Matrix)
sM <- sparse.model.matrix(~ -1 + x1 +x2, dat,
contrasts.arg = lapply(dat, contrasts, contrasts = FALSE))
colnames(sM) <- sub(".*\\d", "", colnames(sM))
head(sM, 2)
# 2 x 4 sparse Matrix of class "dgCMatrix"
# a b x y
#1 1 . 1 .
#2 . 1 . 1
It can be converted to binary by converting to matrix
head(as.matrix(sM),2)
# a b x y
#1 1 0 1 0
#2 0 1 0 1
There are some good solutions posted already, but none are optimal for performance. We can optimize performance by looping over each input column, and then looping over each factor level index within each input column and doing a straight integer comparison of the factor indexes. It's not the most concise or elegant piece of code, but it's fairly straightforward and fast:
do.call(cbind,lapply(dat,function(col)
`colnames<-`(do.call(cbind,lapply(seq_along(levels(col)),function(i)
as.integer(as.integer(col)==i)
)),levels(col))
));
Performance:
library(Matrix);
library(data.table);
library(microbenchmark);
bgoldst <- function(dat) do.call(cbind,lapply(dat,function(col) `colnames<-`(do.call(cbind,lapply(seq_along(levels(col)),function(i) as.integer(as.integer(col)==i))),levels(col))));
akrun1 <- function(dat) table(rep(1:nrow(dat),2),unlist(dat));
akrun2 <- function(dat) sparse.model.matrix(~-1+x1+x2,dat,contrasts.arg=lapply(dat,contrasts,contrasts=FALSE));
davidar <- function(dat) { dat[,rowid:=.I]; dcast(melt(dat,id='rowid'),rowid~value,length); }; ## requires a data.table
dataminer <- function(dat) t(apply(dat,1,function(x) as.numeric(unique(unlist(dat))%in%x)));
N <- 100L; dat <- data.frame(x1=rep(c('a','b'),N),x2=rep(c('x','y'),N)); datDT <- setDT(copy(dat));
identical(unname(bgoldst(dat)),matrix(as.vector(akrun1(dat)),ncol=4L));
## [1] TRUE
identical(unname(bgoldst(dat)),unname(matrix(as.integer(as.matrix(akrun2(dat))),ncol=4L)));
## [1] TRUE
identical(bgoldst(dat),as.matrix(davidar(datDT)[,rowid:=NULL]));
## [1] TRUE
identical(unname(bgoldst(dat)),matrix(as.integer(dataminer(dat)),ncol=4L));
## [1] TRUE
N <- 100L;
dat <- data.frame(x1=rep(c('a','b'),N),x2=rep(c('x','y'),N)); datDT <- setDT(copy(dat));
microbenchmark(bgoldst(dat),akrun1(dat),akrun2(dat),davidar(datDT),dataminer(dat));
## Unit: microseconds
## expr min lq mean median uq max neval
## bgoldst(dat) 67.570 92.374 106.2853 99.6440 121.2405 188.596 100
## akrun1(dat) 581.182 652.386 773.6300 690.6605 916.4625 1192.299 100
## akrun2(dat) 4429.208 4836.119 5554.5902 5145.3135 5977.0990 11263.537 100
## davidar(datDT) 5064.273 5498.555 6104.7621 5664.9115 6203.9695 11713.856 100
## dataminer(dat) 47577.729 49529.753 55217.3726 53190.8940 60041.9020 74346.268 100
N <- 1e4L;
dat <- data.frame(x1=rep(c('a','b'),N),x2=rep(c('x','y'),N)); datDT <- setDT(copy(dat));
microbenchmark(bgoldst(dat),akrun1(dat),akrun2(dat),davidar(datDT));
## Unit: milliseconds
## expr min lq mean median uq max neval
## bgoldst(dat) 1.775617 1.820949 2.299493 1.84725 1.972124 8.362336 100
## akrun1(dat) 38.954524 41.109257 48.409613 45.60304 52.147633 162.365472 100
## akrun2(dat) 16.915832 17.762799 21.288200 19.20164 23.775180 46.494055 100
## davidar(datDT) 36.151684 38.366715 42.875940 42.38794 45.916937 58.695008 100
N <- 1e5L;
dat <- data.frame(x1=rep(c('a','b'),N),x2=rep(c('x','y'),N)); datDT <- setDT(copy(dat));
microbenchmark(bgoldst(dat),akrun1(dat),akrun2(dat),davidar(datDT));
## Unit: milliseconds
## expr min lq mean median uq max neval
## bgoldst(dat) 17.16473 22.97654 35.01815 26.76662 31.75562 152.6188 100
## akrun1(dat) 501.72644 626.14494 671.98315 680.91152 727.88262 828.8313 100
## akrun2(dat) 212.12381 242.65505 298.90254 272.28203 357.65106 429.6023 100
## davidar(datDT) 368.04924 461.60078 500.99431 511.54921 540.39358 638.3840 100
If you have a data.frame as you are showing (not a matrix), you could as well recast the data
library(data.table)
setDT(dat)[, rowid := .I] # Creates a row index
res <- dcast(melt(dat, id = "rowid"), rowid ~ value, length) # long/wide format
head(res)
# rowid a b x y
# 1 1 1 0 1 0
# 2 2 0 1 0 1
# 3 3 1 0 1 0
# 4 4 0 1 0 1
# 5 5 1 0 1 0
# 6 6 0 1 0 1
Some benchmarks
dat = data.frame(
x1 = rep(c('a', 'b'), 1e3),
x2 = rep(c('x', 'y'), 1e3)
)
library(data.table)
library(Matrix)
library(microbenchmark)
dat2 <- copy(dat)
microbenchmark("akrun1 : " = table(rep(1:nrow(dat),2),unlist(dat)),
"akrun2 : " = sparse.model.matrix(~ -1 + x1 +x2, dat, contrasts.arg = lapply(dat, contrasts, contrasts = FALSE)),
"DatamineR : " = t(apply(dat,1, function(x) as.numeric(unique(unlist(dat)) %in% x))),
"David Ar : " = {setDT(dat2)[, rowid := .I] ; dcast(melt(dat2, id = "rowid"), rowid ~ value, length)},
times = 10L)
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# akrun1 : 3.826075 4.061904 6.654399 5.165376 11.26959 11.82029 10 a
# akrun2 : 5.269531 5.713672 8.794434 5.943422 13.34118 20.01961 10 a
# DatamineR : 3199.336286 3343.774160 3410.618547 3385.756972 3517.22133 3625.70909 10 b
# David Ar : 8.092769 8.254682 11.030785 8.465232 15.44893 19.83914 10 a
The apply solution is highly inefficient and will take forever on a bigger data set. Comparing for a bigger data set while excluding the apply solution
dat = data.frame(
x1 = rep(c('a', 'b'), 1e4),
x2 = rep(c('x', 'y'), 1e4)
)
dat2 <- copy(dat)
microbenchmark("akrun1 : " = table(rep(1:nrow(dat),2),unlist(dat)),
"akrun2 : " = sparse.model.matrix(~ -1 + x1 +x2, dat, contrasts.arg = lapply(dat, contrasts, contrasts = FALSE)),
#"DatamineR : " = t(apply(dat,1, function(x) as.numeric(unique(unlist(dat)) %in% x))),
"David Ar : " = {setDT(dat2)[, rowid := .I] ; dcast(melt(dat2, id = "rowid"), rowid ~ value, length)},
times = 100L)
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# akrun1 : 38.66744 41.27116 52.97982 42.72534 47.17203 161.0420 100 b
# akrun2 : 17.02006 18.93534 27.27582 19.35580 20.72022 153.2397 100 a
# David Ar : 34.15915 37.91659 46.11050 38.58536 41.40412 149.0038 100 b
Seems like the Matrix package shines for a bigger data sets.
It probably worth comparing different scenarios when there are more columns/unique values too.
One alternative using apply
head(t(apply(dat,1, function(x) as.numeric(unique(unlist(dat)) %in% x))))
[,1] [,2] [,3] [,4]
[1,] 1 0 1 0
[2,] 0 1 0 1
[3,] 1 0 1 0
[4,] 0 1 0 1
[5,] 1 0 1 0
[6,] 0 1 0 1

Find frequencies of combinations where the data.frame needs to be parsed

I'm sure there's a simple solution to this, but I can't figure it out!! Suppose I have a dataframe that has the following information:
aaa<-c("A,B","B,C","B,D,E")
vvv<-c("101","101,102","102,103,104")
data_h<-data.frame(aaa,vvv)
data_h
aaa vvv
1 A,B 101
2 B,C 101,102
3 B,D,E 102,103,104
Desired output is a frequency map of individual hits, for subsequent analysis in a heat map. So:
101 102 103 104
A 1 0 0 0
B 2 2 1 1
C 1 1 0 0
D 0 1 1 1
E 0 1 1 1
How do I make this transformation? I've seen many similar examples, but none where the contents of the data-frame need to be parsed.
The goal is to ultimately use heatmap or something similar on the output table to visualize the correlation between "aaa" and "vvv".
Here is a base R solution in 4 lines of code. First we define a function, spl which splits the components of a comma separated string producing a vector of all the fields. eg takes two string arguments and applies spl to each of them and then creates a grid of the result of the splitting. Finally we apply eg to each row of data_h, rbind the results together and tabulate them with xtabs:
spl <- function(x) strsplit(as.character(x), ",")[[1]]
eg <- function(aaa, vvv) expand.grid(aaa = spl(aaa), vvv = spl(vvv))
dd <- do.call("rbind", Map(eg, data_h$aaa, data_h$vvv))
xtabs(data = dd)
The result is:
vvv
aaa 101 102 103 104
A 1 0 0 0
B 2 2 1 1
C 1 1 0 0
D 0 1 1 1
E 0 1 1 1
dcast Alternately replace the last line of code above (the one with the xtabs) with:
library(reshape2)
dcast(dd, aaa ~ vvv, fun = length, value.var = "vvv")
in which case the result is:
aaa 101 102 103 104
1 A 1 0 0 0
2 B 2 2 1 1
3 C 1 1 0 0
4 D 0 1 1 1
5 E 0 1 1 1
tapply. Another alternative would be tapply (however, it will fill in empty cells with NA rather than 0):
tapply(1:nrow(dd), dd, length)
ADDED Alternatives. Some improvements.
The shape of the data.frame suggests using splitstackshape package. But I don't know very well this package so I just use it to reshape the data, and then compute frequencies by hand using table:
library(splitstackshape)
data_h_split <- concat.split.multiple(data_h,1:2)
# aaa_1 aaa_2 aaa_3 vvv_1 vvv_2 vvv_3
# 1 A B <NA> 101 NA NA
# 2 B C <NA> 101 102 NA
# 3 B D E 102 103 104
Once you have the data in this format (no comma , regular columns), it is easy to compute frequencies using table( you can use tapply,reshape):
table(cbind.data.frame(ff= unlist(data_h_split[1:3]),
xx= unlist(data_h_split[4:6])))
xx
ff 101 102 103 104
A 1 0 0 0
B 1 1 0 0
C 0 1 0 0
D 0 0 1 0
0 0 0 0
E 0 0 0 1
Ananda's edit
Here's a multi-step approach to get the result using "splitstackshape" to work for this.
library(splitstackshape)
## Split the "vvv" column first, and reshape at the same time
x <- concat.split.multiple(data_h, split.cols="vvv", ",", "long")
## Add an ID column
x$id <- 1:nrow(x)
## Split the "aaa" column next, again reshaping as we do so
x <- concat.split.multiple(x[complete.cases(x), ], split.cols="aaa", ",", "long")
## Use `table` with `droplevels`
with(droplevels(x), table(aaa, vvv))
# vvv
# aaa 101 102 103 104
# A 1 0 0 0
# B 2 2 1 1
# C 1 1 0 0
# D 0 1 1 1
# E 0 1 1 1
My concat.split.multiple function is in great need of a rewrite to improve its efficiency. I've done some work on that in my cSplit function, which might be useful if you have a particularly large dataset.
Here's how I would solve your given problem with cSplit:
table(
cSplit(
cSplit(data_h, splitCols = 2, sep = ",",
direction = "long", makeEqual = FALSE),
splitCols = 1, sep = ",", direction = "long",
makeEqual = FALSE))
# vvv
# aaa 101 102 103 104
# A 1 0 0 0
# B 2 2 1 1
# C 1 1 0 0
# D 0 1 1 1
# E 0 1 1 1
It seems to be pretty efficient too...
First, the functions to test:
fun1 <- function() table(cSplit(cSplit(df, 2, ",", "long", FALSE), 1, ",", "long", FALSE))
fun2 <- function() {
spl <- function(x) strsplit(as.character(x), ",")[[1]]
eg <- function(aaa, vvv) expand.grid(aaa = spl(aaa), vvv = spl(vvv))
dd <- do.call("rbind", Map(eg, df$A, df$V))
xtabs(data = dd)
}
Second, some sample data. Change Nrows and re-generate to see the effect on different sized data.frames.
set.seed(1)
Nrow <- 100
aaa <- 100:200
vvv <- LETTERS
maxA <- 10
maxV <- 10
Aaa <- sample(maxA, Nrow, TRUE)
Vvv <- sample(maxV, Nrow, TRUE)
A <- vapply(seq_along(Aaa), function(x)
paste(sample(aaa, Aaa[x], TRUE), collapse = ","), character(1L))
V <- vapply(seq_along(Vvv), function(x)
paste(sample(vvv, Vvv[x], TRUE), collapse = ","), character(1L))
df <- data.frame(A, V)
head(df)
# A V
# 1 127,122,152 E,E,O,S,W,S,M
# 2 127,118,152,156 V,A,Z,Q
# 3 113,125,172,197,110,177 L,A,T
# 4 195,182,131,165,196,196,134,126,116,132 F,Z,X,S,T,M,W,E,Q,H
# 5 151,193,151 L,B,E,B,Y,I,N
# 6 126,104,142,186,135,113,137,163,139 Q,G,N
Compare the two approaches to make sure the results are the same:
X <- fun1()
Y <- fun2()
all(X == Y[dimnames(X)[[1]], dimnames(X)[[2]]])
# [1] TRUE
Benchmark (on 100 rows).
library(microbenchmark)
## Nrow = 100
microbenchmark(fun1(), fun2(), times = 10)
# Unit: milliseconds
# expr min lq median uq max neval
# fun1() 7.263802 7.326237 7.440843 7.868905 10.26451 10
# fun2() 62.869130 64.046836 68.525880 73.595061 80.02027 10
Benchmark (on 1000 rows).
## Nrow = 1000
microbenchmark(fun1(), fun2(), times = 10)
# Unit: milliseconds
# expr min lq median uq max neval
# fun1() 19.2303 20.21857 23.14337 26.97776 35.56338 10
# fun2() 775.6586 815.01639 835.98951 852.47804 888.15345 10

Conditional Statements; creating new binary variable

I have a pair of binary variables (1's and 0's), and my professor wants me to create a new binary variable that takes the value 1 if both of the previous variables have the value 1 (i.e., x,y=1) and takes the value zero otherwise.
How would I do this in R?
Thanks!
JMC
Here's one example with some sample data to play with:
set.seed(1)
A <- sample(0:1, 10, replace = TRUE)
B <- sample(0:1, 10, replace = TRUE)
A
# [1] 0 0 1 1 0 1 1 1 1 0
B
# [1] 0 0 1 0 1 0 1 1 0 1
as.numeric(A + B == 2)
# [1] 0 0 1 0 0 0 1 1 0 0
as.numeric(rowSums(cbind(A, B)) == 2)
# [1] 0 0 1 0 0 0 1 1 0 0
as.numeric(A == 1 & B == 1)
# [1] 0 0 1 0 0 0 1 1 0 0
Update (to introduce some more alternatives and share a link and a benchmark)
set.seed(1)
A <- sample(0:1, 1e7, replace = TRUE)
B <- sample(0:1, 1e7, replace = TRUE)
fun1 <- function() ifelse(A == 1 & B == 1, 1, 0)
fun2 <- function() as.numeric(A + B == 2)
fun3 <- function() as.numeric(A & B)
fun4 <- function() as.numeric(A == 1 & B == 1)
fun5 <- function() as.numeric(rowSums(cbind(A, B)) == 2)
library(microbenchmark)
microbenchmark(fun1(), fun2(), fun3(), fun4(), fun5(), times = 5)
# Unit: milliseconds
# expr min lq median uq max neval
# fun1() 4842.8559 4871.7072 5022.3525 5093.5932 10424.6589 5
# fun2() 220.8336 220.9867 226.1167 229.1225 472.4408 5
# fun3() 440.7427 445.9342 461.0114 462.6184 488.6627 5
# fun4() 604.1791 613.9284 630.4838 645.2146 682.4689 5
# fun5() 373.8088 373.8532 373.9460 435.0385 1084.6227 5
As can be seen, ifelse is indeed much slower than the other approaches mentioned here. See this SO question and answer for some more details about the efficiency of ifelse.

Split a string column into several dummy variables

As a relatively inexperienced user of the data.table package in R, I've been trying to process one text column into a large number of indicator columns (dummy variables), with a 1 in each column indicating that a particular sub-string was found within the string column. For example, I want to process this:
ID String
1 a$b
2 b$c
3 c
into this:
ID String a b c
1 a$b 1 1 0
2 b$c 0 1 1
3 c 0 0 1
I have figured out how to do the processing, but it takes longer to run than I would like, and I suspect that my code is inefficient. A reproduceable version of my code with dummy data is below. Note that in the real data, there are over 2000 substrings to search for, each substring is roughly 30 characters long, and there may be up to a few million rows. If need be, I can parallelize and throw lots of resources at the problem, but I want to optimize the code as much as possible. I have tried running Rprof, which suggested no obvious (to me) improvements.
set.seed(10)
elements_list <- c(outer(letters, letters, FUN = paste, sep = ""))
random_string <- function(min_length, max_length, separator) {
selection <- paste(sample(elements_list, ceiling(runif(1, min_length, max_length))), collapse = separator)
return(selection)
}
dt <- data.table(id = c(1:1000), messy_string = "")
dt[ , messy_string := random_string(2, 5, "$"), by = id]
create_indicators <- function(search_list, searched_string) {
y <- rep(0, length(search_list))
for(j in 1:length(search_list)) {
x <- regexpr(search_list[j], searched_string)
x <- x[1]
y[j] <- ifelse(x > 0, 1, 0)
}
return(y)
}
timer <- proc.time()
indicators <- matrix(0, nrow = nrow(dt), ncol = length(elements_list))
for(n in 1:nrow(dt)) {
indicators[n, ] <- dt[n, create_indicators(elements_list, messy_string)]
}
indicators <- data.table(indicators)
setnames(indicators, elements_list)
dt <- cbind(dt, indicators)
proc.time() - timer
user system elapsed
13.17 0.08 13.29
EDIT
Thanks for the great responses--all much superior to my method. The results of some speed tests below, with slight modifications to each function to use 0L and 1L in my own code, to store the results in separate tables by method, and to standardize the ordering. These are elapsed times from single speed tests (rather than medians from many tests), but the larger runs each take a long time.
Number of rows in dt 2K 10K 50K 250K 1M
OP 28.6 149.2 717.0
eddi 5.1 24.6 144.8 1950.3
RS 1.8 6.7 29.7 171.9 702.5
Original GT 1.4 7.4 57.5 809.4
Modified GT 0.7 3.9 18.1 115.2 473.9
GT4 0.1 0.4 2.26 16.9 86.9
Pretty clearly, the modified version of GeekTrader's approach is best. I'm still a bit vague on what each step is doing, but I can go over that at my leisure. Although somewhat out of bounds of the original question, if anyone wants to explain what GeekTrader and Ricardo Saporta's methods are doing more efficiently, it would be appreciated both by me and probably by anyone who visits this page in the future. I'm particularly interested to understand why some methods scale better than others.
*****EDIT # 2*****
I tried to edit GeekTrader's answer with this comment, but that seems not to work. I made two very minor modifications to the GT3 function, to a) order the columns, which adds a small amount of time, and b) replace 0 and 1 with 0L and 1L, which speeds things up a bit. Call the resulting function GT4. Table above edited to add times for GT4 at different table sizes. Clearly the winner by a mile, and it has the added advantage of being intuitive.
UPDATE : VERSION 3
Found even faster way. This function is also highly memory efficient.
Primary reason previous function was slow because of copy/assignments happening inside lapply loop as well as rbinding of the result.
In following version, we preallocate matrix with appropriate size, and then change values at appropriate coordinates, which makes it very fast compared to other looping versions.
funcGT3 <- function() {
#Get list of column names in result
resCol <- unique(dt[, unlist(strsplit(messy_string, split="\\$"))])
#Get dimension of result
nresCol <- length(resCol)
nresRow <- nrow(dt)
#Create empty matrix with dimensions same as desired result
mat <- matrix(rep(0, nresRow * nresCol), nrow = nresRow, dimnames = list(as.character(1:nresRow), resCol))
#split each messy_string by $
ll <- strsplit(dt[,messy_string], split="\\$")
#Get coordinates of mat which we need to set to 1
coords <- do.call(rbind, lapply(1:length(ll), function(i) cbind(rep(i, length(ll[[i]])), ll[[i]] )))
#Set mat to 1 at appropriate coordinates
mat[coords] <- 1
#Bind the mat to original data.table
return(cbind(dt, mat))
}
result <- funcGT3() #result for 1000 rows in dt
result
ID messy_string zn tc sv db yx st ze qs wq oe cv ut is kh kk im le qg rq po wd kc un ft ye if zl zt wy et rg iu
1: 1 zn$tc$sv$db$yx 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2: 2 st$ze$qs$wq 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
3: 3 oe$cv$ut$is 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
4: 4 kh$kk$im$le$qg 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
5: 5 rq$po$wd$kc 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0
---
996: 996 rp$cr$tb$sa 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
997: 997 cz$wy$rj$he 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0
998: 998 cl$rr$bm 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
999: 999 sx$hq$zy$zd 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1000: 1000 bw$cw$pw$rq 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
Benchmark againt version 2 suggested by Ricardo (this is for 250K rows in data) :
Unit: seconds
expr min lq median uq max neval
GT2 104.68672 104.68672 104.68672 104.68672 104.68672 1
GT3 15.15321 15.15321 15.15321 15.15321 15.15321 1
VERSION 1
Following is version 1 of suggested answer
set.seed(10)
elements_list <- c(outer(letters, letters, FUN = paste, sep = ""))
random_string <- function(min_length, max_length, separator) {
selection <- paste(sample(elements_list, ceiling(runif(1, min_length, max_length))), collapse = separator)
return(selection)
}
dt <- data.table(ID = c(1:1000), messy_string = "")
dt[ , messy_string := random_string(2, 5, "$"), by = ID]
myFunc <- function() {
ll <- strsplit(dt[,messy_string], split="\\$")
COLS <- do.call(rbind,
lapply(1:length(ll),
function(i) {
data.frame(
ID= rep(i, length(ll[[i]])),
COL = ll[[i]],
VAL= rep(1, length(ll[[i]]))
)
}
)
)
res <- as.data.table(tapply(COLS$VAL, list(COLS$ID, COLS$COL), FUN = length ))
dt <- cbind(dt, res)
for (j in names(dt))
set(dt,which(is.na(dt[[j]])),j,0)
return(dt)
}
create_indicators <- function(search_list, searched_string) {
y <- rep(0, length(search_list))
for(j in 1:length(search_list)) {
x <- regexpr(search_list[j], searched_string)
x <- x[1]
y[j] <- ifelse(x > 0, 1, 0)
}
return(y)
}
OPFunc <- function() {
indicators <- matrix(0, nrow = nrow(dt), ncol = length(elements_list))
for(n in 1:nrow(dt)) {
indicators[n, ] <- dt[n, create_indicators(elements_list, messy_string)]
}
indicators <- data.table(indicators)
setnames(indicators, elements_list)
dt <- cbind(dt, indicators)
return(dt)
}
library(plyr)
plyrFunc <- function() {
indicators = do.call(rbind.fill, sapply(1:dim(dt)[1], function(i)
dt[i,
data.frame(t(as.matrix(table(strsplit(messy_string,
split = "\\$")))))
]))
dt = cbind(dt, indicators)
#dt[is.na(dt)] = 0 #THIS DOESN'T WORK. USING FOLLOWING INSTEAD
for (j in names(dt))
set(dt,which(is.na(dt[[j]])),j,0)
return(dt)
}
BENCHMARK
system.time(res <- myFunc())
## user system elapsed
## 1.01 0.00 1.01
system.time(res2 <- OPFunc())
## user system elapsed
## 21.58 0.00 21.61
system.time(res3 <- plyrFunc())
## user system elapsed
## 1.81 0.00 1.81
VERSION 2 : Suggested by Ricardo
I'm posting this here instead of in my answer as the framework is really #GeekTrader's -Rick_
myFunc.modified <- function() {
ll <- strsplit(dt[,messy_string], split="\\$")
## MODIFICATIONS:
# using `rbindlist` instead of `do.call(rbind.. )`
COLS <- rbindlist( lapply(1:length(ll),
function(i) {
data.frame(
ID= rep(i, length(ll[[i]])),
COL = ll[[i]],
VAL= rep(1, length(ll[[i]])),
# MODICIATION: Not coercing to factors
stringsAsFactors = FALSE
)
}
)
)
# MODIFICATION: Preserve as matrix, the output of tapply
res2 <- tapply(COLS$VAL, list(COLS$ID, COLS$COL), FUN = length )
# FLATTEN into a data.table
resdt <- data.table(r=c(res2))
# FIND & REPLACE NA's of single column
resdt[is.na(r), r:=0L]
# cbind with dt, a matrix, with the same attributes as `res2`
cbind(dt,
matrix(resdt[[1]], ncol=ncol(res2), byrow=FALSE, dimnames=dimnames(res2)))
}
### Benchmarks:
orig = quote({dt <- copy(masterDT); myFunc()})
modified = quote({dt <- copy(masterDT); myFunc.modified()})
microbenchmark(Modified = eval(modified), Orig = eval(orig), times=20L)
# Unit: milliseconds
# expr min lq median uq max
# 1 Modified 895.025 971.0117 1011.216 1189.599 2476.972
# 2 Orig 1953.638 2009.1838 2106.412 2230.326 2356.802
# split the `messy_string` and create a long table, keeping track of the id
DT2 <- setkey(DT[, list(val=unlist(strsplit(messy_string, "\\$"))), by=list(ID, messy_string)], "val")
# add the columns, initialize to 0
DT2[, c(elements_list) := 0L]
# warning expected, re:adding large ammount of columns
# iterate over each value in element_list, assigning 1's ass appropriate
for (el in elements_list)
DT2[el, c(el) := 1L]
# sum by ID
DT2[, lapply(.SD, sum), by=list(ID, messy_string), .SDcols=elements_list]
Note that we are carrying along the messy_string column since it is cheaper than leaving it behind and then joining on ID to get it back.
If you dont need it in the final output, just delete it above.
Benchmarks:
Creating the sample data:
# sample data, using OP's exmple
set.seed(10)
N <- 1e6 # number of rows
elements_list <- c(outer(letters, letters, FUN = paste, sep = ""))
messy_string_vec <- random_string_fast(N, 2, 5, "$") # Create the messy strings in a single shot.
masterDT <- data.table(ID = c(1:N), messy_string = messy_string_vec, key="ID") # create the data.table
Side Note
It is significantly faster to create the random strings all at once and assign the results as a single column
than to call the function N times and assign each, one by one.
# Faster way to create the `messy_string` 's
random_string_fast <- function(N, min_length, max_length, separator) {
ints <- seq(from=min_length, to=max_length)
replicate(N, paste(sample(elements_list, sample(ints)), collapse=separator))
}
Comparing Four Methods:
this answer -- "DT.RS"
#eddi's answer -- "Plyr.eddi"
#GeekTrader's answer -- DT.GT
GeekTrader's' answer with some modifications -- DT.GT_Mod
Here is the setup:
library(data.table); library(plyr); library(microbenchmark)
# data.table method - RS
usingDT.RS <- quote({DT <- copy(masterDT);
DT2 <- setkey(DT[, list(val=unlist(strsplit(messy_string, "\\$"))), by=list(ID, messy_string)], "val"); DT2[, c(elements_list) := 0L]
for (el in elements_list) DT2[el, c(el) := 1L]; DT2[, lapply(.SD, sum), by=list(ID, messy_string), .SDcols=elements_list]})
# data.table method - GeekTrader
usingDT.GT <- quote({dt <- copy(masterDT); myFunc()})
# data.table method - GeekTrader, modified by RS
usingDT.GT_Mod <- quote({dt <- copy(masterDT); myFunc.modified()})
# ply method from below
usingPlyr.eddi <- quote({dt <- copy(masterDT); indicators = do.call(rbind.fill, sapply(1:dim(dt)[1], function(i) dt[i, data.frame(t(as.matrix(table(strsplit(messy_string, split = "\\$"))))) ]));
dt = cbind(dt, indicators); dt[is.na(dt)] = 0; dt })
Here are the benchmark results:
microbenchmark( usingDT.RS=eval(usingDT.RS), usingDT.GT=eval(usingDT.GT), usingDT.GT_Mod=eval(usingDT.GT_Mod), usingPlyr.eddi=eval(usingPlyr.eddi), times=5L)
On smaller data:
N = 600
Unit: milliseconds
expr min lq median uq max
1 usingDT.GT 1189.7549 1198.1481 1200.6731 1202.0972 1203.3683
2 usingDT.GT_Mod 581.7003 591.5219 625.7251 630.8144 650.6701
3 usingDT.RS 2586.0074 2602.7917 2637.5281 2819.9589 3517.4654
4 usingPlyr.eddi 2072.4093 2127.4891 2225.5588 2242.8481 2349.6086
N = 1,000
Unit: seconds
expr min lq median uq max
1 usingDT.GT 1.941012 2.053190 2.196100 2.472543 3.096096
2 usingDT.RS 3.107938 3.344764 3.903529 4.010292 4.724700
3 usingPlyr 3.297803 3.435105 3.625319 3.812862 4.118307
N = 2,500
Unit: seconds
expr min lq median uq max
1 usingDT.GT 4.711010 5.210061 5.291999 5.307689 7.118794
2 usingDT.GT_Mod 2.037558 2.092953 2.608662 2.638984 3.616596
3 usingDT.RS 5.253509 5.334890 6.474915 6.740323 7.275444
4 usingPlyr.eddi 7.842623 8.612201 9.142636 9.420615 11.102888
N = 5,000
expr min lq median uq max
1 usingDT.GT 8.900226 9.058337 9.233387 9.622531 10.839409
2 usingDT.GT_Mod 4.112934 4.293426 4.460745 4.584133 6.128176
3 usingDT.RS 8.076821 8.097081 8.404799 8.800878 9.580892
4 usingPlyr.eddi 13.260828 14.297614 14.523016 14.657193 16.698229
# dropping the slower two from the tests:
microbenchmark( usingDT.RS=eval(usingDT.RS), usingDT.GT=eval(usingDT.GT), usingDT.GT_Mod=eval(usingDT.GT_Mod), times=6L)
N = 10,000
Unit: seconds
expr min lq median uq max
1 usingDT.GT_Mod 8.426744 8.739659 8.750604 9.118382 9.848153
2 usingDT.RS 15.260702 15.564495 15.742855 16.024293 16.249556
N = 25,000
... (still running)
-----------------
Functions Used in benchmarking:
# original random string function
random_string <- function(min_length, max_length, separator) {
selection <- paste(sample(elements_list, ceiling(runif(1, min_length, max_length))), collapse = separator)
return(selection)
}
# GeekTrader's function
myFunc <- function() {
ll <- strsplit(dt[,messy_string], split="\\$")
COLS <- do.call(rbind,
lapply(1:length(ll),
function(i) {
data.frame(
ID= rep(i, length(ll[[i]])),
COL = ll[[i]],
VAL= rep(1, length(ll[[i]]))
)
}
)
)
res <- as.data.table(tapply(COLS$VAL, list(COLS$ID, COLS$COL), FUN = length ))
dt <- cbind(dt, res)
for (j in names(dt))
set(dt,which(is.na(dt[[j]])),j,0)
return(dt)
}
# Improvements to #GeekTrader's `myFunc` -RS '
myFunc.modified <- function() {
ll <- strsplit(dt[,messy_string], split="\\$")
## MODIFICATIONS:
# using `rbindlist` instead of `do.call(rbind.. )`
COLS <- rbindlist( lapply(1:length(ll),
function(i) {
data.frame(
ID= rep(i, length(ll[[i]])),
COL = ll[[i]],
VAL= rep(1, length(ll[[i]])),
# MODICIATION: Not coercing to factors
stringsAsFactors = FALSE
)
}
)
)
# MODIFICATION: Preserve as matrix, the output of tapply
res2 <- tapply(COLS$VAL, list(COLS$ID, COLS$COL), FUN = length )
# FLATTEN into a data.table
resdt <- data.table(r=c(res2))
# FIND & REPLACE NA's of single column
resdt[is.na(r), r:=0L]
# cbind with dt, a matrix, with the same attributes as `res2`
cbind(dt,
matrix(resdt[[1]], ncol=ncol(res2), byrow=FALSE, dimnames=dimnames(res2)))
}
### Benchmarks comparing the two versions of GeekTrader's function:
orig = quote({dt <- copy(masterDT); myFunc()})
modified = quote({dt <- copy(masterDT); myFunc.modified()})
microbenchmark(Modified = eval(modified), Orig = eval(orig), times=20L)
# Unit: milliseconds
# expr min lq median uq max
# 1 Modified 895.025 971.0117 1011.216 1189.599 2476.972
# 2 Orig 1953.638 2009.1838 2106.412 2230.326 2356.802
Here's a somewhat newer approach, using cSplit_e() from the splitstackshape package.
library(splitstackshape)
cSplit_e(dt, split.col = "String", sep = "$", type = "character",
mode = "binary", fixed = TRUE, fill = 0)
# ID String String_a String_b String_c
#1 1 a$b 1 1 0
#2 2 b$c 0 1 1
#3 3 c 0 0 1
Here's a ~10x faster version using rbind.fill.
library(plyr)
indicators = do.call(rbind.fill, sapply(1:dim(dt)[1], function(i)
dt[i,
data.frame(t(as.matrix(table(strsplit(messy_string,
split = "\\$")))))
]))
dt = cbind(dt, indicators)
# dt[is.na(dt)] = 0
# faster NA replace (thanks geektrader)
for (j in names(dt))
set(dt, which(is.na(dt[[j]])), j, 0L)
Here is an approach using rapply and table.
I'm sure there would be a slightly faster approach than using table here, but it is still slightly faster than the myfunc.Modified from #ricardo;s answer
# a copy with enough column pointers available
dtr <- alloc.col(copy(dt) ,1000L)
rapplyFun <- function(){
ll <- strsplit(dtr[, messy_string], '\\$')
Vals <- rapply(ll, classes = 'character', f= table, how = 'replace')
Names <- unique(rapply(Vals, names))
dtr[, (Names) := 0L]
for(ii in seq_along(Vals)){
for(jj in names(Vals[[ii]])){
set(dtr, i = ii, j = jj, value =Vals[[ii]][jj])
}
}
}
microbenchmark(myFunc.modified(), rapplyFun(),times=5)
Unit: milliseconds
# expr min lq median uq max neval
# myFunc.modified() 395.1719 396.8706 399.3218 400.6353 401.1700 5
# rapplyFun() 308.9103 309.5763 309.9368 310.2971 310.3463 5
Here's another solution, that constructs a sparse matrix object instead of what you have. This shaves off a lot of time AND memory.
It produces ordered results and even with conversion to data.table it's faster than GT3 with 0L and 1L and without reordering (this could be because I use a different method for arriving at the required coordinates - I didn't go through the GT3 algo), however if you don't convert and keep it as a sparse matrix it's about 10-20x faster than GT3 (and has a much smaller memory footprint).
library(Matrix)
strings = strsplit(dt$messy_string, split = "$", fixed = TRUE)
element.map = data.table(el = elements_list, n = seq_along(elements_list), key = "el")
tmp = data.table(n = seq_along(strings), each = unlist(lapply(strings, length)))
rows = tmp[, rep(n, each = each), by = n][, V1]
cols = element.map[J(unlist(strings))][,n]
dt.sparse = sparseMatrix(rows, cols, x = 1,
dims = c(max(rows), length(elements_list)))
# optional, should be avoided until absolutely necessary
dt = cbind(dt, as.data.table(as.matrix(dt.sparse)))
setnames(dt, c('id', 'messy_string', elements_list))
The idea is to split to strings, then use a data.table as a map object to map each substring to its correct column position. From there on it's just a matter of figuring out the rows correctly and filling in the matrix.

Resources