data.table, apply function to portion of a table - r

I want to apply function to portion of a table.
With data.frame, no problem:
df <- data.frame(name = paste("a", 1:10, sep = "-"),
x = 1:10,
y = rep(1:5),
z = rep(1:2, each = 5))
df[2:5, -1] <- scale(df[2:5, -1], center = c(1,2,3), scale = c(4,5,6))
But data.table complains:
dt <- data.table(name = paste("a", 1:10, sep = "-"),
x = 1:10,
y = rep(1:5),
z = rep(1:2, each = 5))
dt[2:5, -1] <- scale(dt[2:5, -1], center = c(1,2,3), scale = c(4,5,6))
Error in [<-.data.table(*tmp*, 2:5, -1, value = c(0.25, 0.5, 0.75, :
Item 1 of column numbers in j is -1 which is outside range [1,ncol=4]. Use column names
instead in j to add new columns.
What is the correct way in data.table? Thanks!

data.table needs more work to apply scale :
library(data.table)
cols <- names(dt)[-1]
dt[, (cols) := lapply(.SD, as.numeric), .SDcols = cols]
dt[2:5, (cols) := Map(scale, .SD, c(1,2,3), c(4,5,6)), .SDcols = cols]

Related

R fast cosine distance between consecutive rows of a data.table

How can I efficiently calculate distances between (almost) consecutive rows of a large-ish (~4m rows) of a data.table? I've outlined my current approach, but it is very slow. My actual data has up to a few hundred columns. I need to calculate lags and leads for future use, so I create these and use them to calculate distances.
library(data.table)
library(proxy)
set_shift_col <- function(df, shift_dir, shift_num, data_cols, byvars = NULL){
df[, (paste0(data_cols, "_", shift_dir, shift_num)) := shift(.SD, shift_num, fill = NA, type = shift_dir), byvars, .SDcols = data_cols]
}
set_shift_dist <- function(dt, shift_dir, shift_num, data_cols){
stopifnot(shift_dir %in% c("lag", "lead"))
shift_str <- paste0(shift_dir, shift_num)
dt[, (paste0("dist", "_", shift_str)) := as.numeric(
proxy::dist(
rbindlist(list(
.SD[,data_cols, with=FALSE],
.SD[, paste0(data_cols, "_" , shift_str), with=FALSE]
), use.names = FALSE),
method = "cosine")
), 1:nrow(dt)]
}
n <- 10000
test_data <- data.table(a = rnorm(n), b = rnorm(n), c = rnorm(n), d = rnorm(n))
cols <- c("a", "b", "c", "d")
set_shift_col(test_data, "lag", 1, cols)
set_shift_col(test_data, "lag", 2, cols)
set_shift_col(test_data, "lead", 1, cols)
set_shift_col(test_data, "lead", 2, cols)
set_shift_dist(test_data, "lag", 1, cols)
I'm sure this is a very inefficient approach, any suggestions would be appreciated!
You aren't using the vectorisation efficiencies in the proxy::dist function - rather than call it once for each row you can get all the distances you need from a single call.
Try this replacement function and compare the speed:
set_shift_dist2 <- function(dt, shift_dir, shift_num, data_cols){
stopifnot(shift_dir %in% c("lag", "lead"))
shift_str <- paste0(shift_dir, shift_num)
dt[, (paste0("dist2", "_", shift_str)) := proxy::dist(
.SD[,data_cols, with=FALSE],
.SD[, paste0(data_cols, "_" , shift_str), with=FALSE],
method = "cosine",
pairwise = TRUE
)]
}
You could also do it in one go without storing copies of the data in the table
test_data[, dist_lag1 := proxy::dist(
.SD,
as.data.table(shift(.SD, 1)),
pairwise = TRUE,
method = 'cosine'
), .SDcols = c('a', 'b', 'c', 'd')]

combining separate data.table calls into one by group call

I'm trying to improve the efficiency of the following simple data.table syntax, so I'm trying to combine it into one call without repeatedly calling by = "group".
#data
library(data.table)
DT <- data.table(group = c(rep("a", 40), rep("b", 40)),
other = rnorm(80),
num = c(1:80))
#reduce this to one "by" call
DT[, c1 := ifelse(num <= 7, NA, num), by = "group"]
DT[, sprintf("c%d", 2:10) := shift(c1, 1:9, type = 'lag'), by = "group"]
DT[, d1 := shift(c10, 1, type = 'lag'), by = "group"]
DT[, sprintf("d%d", 2:10) := shift(d1, 1:9, type = 'lag'), by = "group"]
DT[, e1 := shift(d10, 1, type = 'lag'), by = "group"]
DT[, sprintf("e%d", 2:10) := shift(e1, 1:9, type = 'lag'), by = "group"]
Something like
DT[, .(c1 := ifelse(num <= 7, NA, num),
sprintf("c%d", 2:10) := shift(c1, 1:9, type = 'lag'),
d1 := shift(c10, 1, type = 'lag'),
sprintf("d%d", 2:10) := shift(d1, 1:9, type = 'lag'),
e1 := shift(d10, 1, type = 'lag'),
sprintf("e%d", 2:10) := shift(e1, 1:9, type = 'lag')), by = "group"]
Edit:
This is similar but slightly different to this question as the variables created here are not independent of one another.
Any suggestions?
Thanks
Here is an option:
ix <- 2L:10L
m <- 1L:9L
DT[, c(sprintf("c%d", ix), sprintf("d%d", ix), sprintf("e%d", ix)) := {
c1 = replace(num, num <= 7L, NA_integer_)
lc = shift(c1, m)
d1 = shift(lc[[9L]])
ld = shift(d1, m)
e1 = shift(ld[[9L]])
c(lc, ld, shift(e1, m))
}, group]
# You can write function:
f <- function(num) {
c1 <- ifelse(num <= 7, NA, num)
cl <- shift(c1, 1:9, type = 'lag')
names(cl) <- sprintf("c%d", 2:10)
d1 <- shift(cl[9], 1, type = 'lag')
dl <- shift(d1, 1:9, type = 'lag')
names(dl) <- sprintf("d%d", 2:10)
e1 <- shift(dl[9], 1, type = 'lag')
el <- shift(e1, 1:9, type = 'lag')
names(el) <- sprintf("e%d", 2:10)
c(c1 = list(c1), cl, d1 = d1, dl, e1 = e1, el) # list of desired columns
}
x <- DT[, f(num), by = group] # apply it by group
DT <- cbind(DT, x[, -'group']) # add to initial data
Maybe this will be faster. Also, the function probably could be written better. Make sure that the function return list with your desired column names.
You can call by once using the fact that (1) every column in the j argument of a data.table
becomes a column in the return data.table, and that (2) curly braces can be used for
intermediate calculations in j.
Because the default value of the argument type in the shift function is lag,
I did not specify it.
Note that the last line in the curly braces, lst, is the only object returned.
DT[, {
nms = paste0(rep(c("c", "d", "e"), each = 10), 1:10)
lst = setNames(vector("list", 30), nms)
lst[["c1"]] = ifelse(num <= 7, NA, num)
lst[sprintf("c%d", 2:10)] = shift(lst[["c1"]], 1:9)
lst[["d1"]] = shift(lst[["c10"]], 1)
lst[sprintf("d%d", 2:10)] = shift(lst[["d1"]], 1:9)
lst[["e1"]] = shift(lst[["d10"]], 1)
lst[sprintf("e%d", 2:10)] = shift(lst[["e1"]], 1:9)
lst
}, by = group]
The output contains 30 columns: c1, ...,c10, d1,...,d10 and e1,...,e10

Paste multiple column values with column name with empty values

dt <- data.table(v1 = c("", "17-Sep-2019"),
v2 = c("", "17-Sep-2019"))
cols <- paste0("v",1:2)
dt[, do.call(paste, Map(function(x, y) paste(x, y, sep = ':'),
lubridate::dmy(.SD),
toupper(gsub(".*(\\(.*\\)).*","\\1", names(.SD))))),
.SDcols = cols]
I want the code to return c(NA, "2019-09-17:V1 2019-09-17:V2")
Maybe something like:
dt <- data.table(v1 = c("", "17-Sep-2019"), v2 = c("", "17-Sep-2019"))
cols <- paste0("v",1:2)
dt[, fifelse(v1!="",
do.call(paste, Map(function(x, v) paste0(as.Date(x, "%d-%b-%Y"), ":", toupper(v)), .SD, cols)),
NA_character_), .SDcols=cols]

Efficient way to paste multiple column pairs in R data.table

I'm looking for an efficient way to paste/combine multiple pairs of adjacent columns at once using data.table. My feeble attempt is slow and not so elegant:
library(data.table)
dt <- data.table(ids = 1:3,
x1 = c("A","B","C"),
x2 = 1:3,
y1 = c("D", "E", "F"),
y2 = 4:6,
z1 = c("G", "H", "I"),
z3 = 7:9)
paste.pairs <- function(x, sep = "-"){
xx <- unlist(x)
x.len <- length(x)
r <- rep(NA, x.len/2)
s <- seq(1, x.len, by = 2)
for(i in 1:(x.len/2)) {
r[i] <- paste(xx[i], xx[i+1], sep = sep)
}
return(as.list(r))
}
dt[, paste.pairs(.SD), by = "ids"]
Is there a better way?
An option with Map by creating column index with seq
i1 <- seq(1, length(dt)-1, 2)
i2 <- seq(2, length(dt)-1, 2)
dt[, Map(paste,
.SD[, i1, with = FALSE], .SD[, i2, with = FALSE],
MoreArgs = list(sep="-")),
by = "ids"]
Another option would be to split by the names of the dataset and then paste
data.frame(lapply(split.default(dt[, -1, with = FALSE],
sub("\\d+$", "", names(dt)[-1])), function(x) do.call(paste, c(x, sep="-"))))
# x y z
#1 A-1 D-4 G-7
#2 B-2 E-5 H-8
#3 C-3 F-6 I-9
Or another option is with melt/dcast
dcast(melt(dt, id.var = 'ids')[, paste(value, collapse = "-"),
.(grp = sub("\\d+", "", variable), ids)], ids ~ grp, value.var = 'V1')
a solution using matrices
#create matrices
#use the columns you want to paste together...
m1 <- as.matrix( dt[,c(2,4,6)] )
m2 <- as.matrix( dt[, c(3,5,7)] )
#paste the matrices element-by-element, and convert result back to data.table
as.data.table( matrix( paste( m1, m2, sep="-"), nrow=nrow(m1), dimnames=dimnames(m1) ) )
Should run pretty fast, and is very readable and easy to adapt.
output
# x1 y1 z1
# 1: A-1 D-4 G-7
# 2: B-2 E-5 H-8
# 3: C-3 F-6 I-9
benchmarks
microbenchmark::microbenchmark(
wimpel = {
#create matrices
m1 <- as.matrix( dt[,c(2,4,6)] )
m2 <- as.matrix( dt[, c(3,5,7)] )
#paste the matrices element-by-element, and comvert to data.table
as.data.table( matrix( paste( m1, m2, sep="-"), nrow=nrow(m1), dimnames=dimnames(m1) ) )
},
akrun_df = {
data.frame(lapply(split.default(dt[, -1, with = FALSE],
sub("\\d+$", "", names(dt)[-1])), function(x) do.call(paste, c(x, sep="-"))))
},
akrun_map = {
i1 <- seq(2, length(dt), 2)
i2 <- seq(3, length(dt), 2)
dt[, Map(paste, .SD[, i1, with = FALSE], .SD[, i2, with = FALSE], MoreArgs = list(sep="-"))]
},
akrun_dcast = {
dcast(melt(dt, id.var = 'ids')[, paste(value, collapse = "-"),.(grp = sub("\\d+", "", variable), ids)], ids ~ grp, value.var = 'V1')
},
times = 10 )
# Unit: microseconds
# expr min lq mean median uq max neval
# wimpel 303.072 315.122 341.2417 319.1895 327.775 531.429 10
# akrun_df 1022.790 1028.515 1251.7812 1069.1850 1172.519 2779.460 10
# akrun_map 742.013 751.051 785.6059 778.1650 799.855 884.812 10
# akrun_dcast 4104.719 4175.215 4414.6596 4348.7430 4650.911 4939.221 10

Subset data.table columns independently

I'm starting with the below table dt and try to subset its column by the list keys:
library(data.table)
set.seed(123)
randomchar <- function(n, w){
chararray <- replicate(w, sample(c(letters, LETTERS), n, replace = TRUE))
apply(chararray, 1, paste0, collapse = "")
}
dt <- data.table(x = randomchar(1000, 3),
y = randomchar(1000, 3),
z = randomchar(1000, 3),
key = c("x", "y", "z"))
keys <- with(dt, list(x = sample(x, 501),
y = sample(y, 500),
z = sample(z, 721)))
I can get the result I want by using a loop:
desired <- copy(dt)
for(i in seq_along(keys)){
keyname <- names(keys)[i]
desired <- desired[get(keyname) %in% keys[[i]]]
}
desired
The question is - Is there a more data.table idiomatic way to do this subset?
I tried using CJ: dt[CJ(keys)], but it takes a very long time.
What about building a mask and filter dt on this mask:
dt[Reduce(`&`, Map(function(key, col) col %in% key, keys, dt)),]

Resources