Related
I have the following data.table:
DT <- data.table(A = c(rep("aa",2),rep("bb",2)),
B = c(rep("H",2),rep("Na",2)),
Low = c(0,3,1,1),
High = c(8,10,9,8),
Time =c("0,1,2,3,4,5,6,7,8,9,10","0,1,2,3,4,5,6,7,8,9,10","0,1,2,3,4,5,6,7,8,9,10","0,1,2,3,4,5,6,7,8,9,10"),
Intensity = c("0,0,0,0,561464,0,0,0,0,0,0","0,0,0,6548,5464,5616,0,0,0,68716,0","5658,12,6548,6541,8,5646854,54565,56465,546,65,0","0,561464,0,0,0,0,0,0,0,0,0")
)
and use this code to extract the the highest number of consecutive intensity values above a certain value For a more detailed explanation on how this calculation works please see Reading and counting of consecutive points:
newCols <- do.call(rbind, Map(function(u, v, x, y) {
u1 <- as.numeric(u)
v1 <- as.numeric(v)
lb <- which.min(abs(x - u1))
ub <- which.min(abs(y - u1))
v3 <- as.numeric(v[(lb+1):(ub-1)])
i3 = with(rle(v3 > min(as.numeric(v[c(lb, ub)]))),
pmax(max(lengths[values]), 0))
data.frame(Consec.Points.base = i3)
},
strsplit(DT$Time, ","), strsplit(DT$Intensity, ","), DT$Low, DT$High))
DT <- cbind(DT, newCols)
I was wondering how it would be possible to instead of getting the length of the Consec.Points.base, to extract their actual points (Time and Intensity) as two vectors?
Thanks a lot in advance!
I think this answers your question, but let me know if I made a mistake, or something needs more thought/clarification.
DT <- data.table(A = c(rep("aa",2),rep("bb",2)),
B = c(rep("H",2),rep("Na",2)),
Low = c(0,3,1,1),
High = c(8,10,9,8),
Time =c("0,1,2,3,4,5,6,7,8,9,10","0,1,2,3,4,5,6,7,8,9,10","0,1,2,3,4,5,6,7,8,9,10","0,1,2,3,4,5,6,7,8,9,10"),
Intensity = c("0,0,0,0,561464,0,0,0,0,0,0","0,0,0,6548,5464,5616,0,0,0,68716,0","5658,12,6548,6541,8,5646854,54565,56465,546,65,0","0,561464,0,0,0,0,0,0,0,0,0")
)
# unique identifier
DT[, i := .I]
# re-structure
DT2 <- DT[, .(Time = as.numeric(strsplit(Time, ",")[[1]]),
Intensity = as.numeric(strsplit(Intensity, ",")[[1]])), by = i]
DT2 <- merge(DT2, DT[, .(i,A,B,Low,High)], by="i")
DT2 <- DT2[between(Time, Low, High, incbounds = FALSE),]
DT2[, IntensityGood := Intensity != min(Intensity), by=i]
# encode each part of sequence with its own value, if not FALSE
encoder <- function(x){
rle.response <- rle(x)
v2 <- rep(0, length(rle.response$values))
v2[rle.response$values!=FALSE] <- which(rle.response$values != FALSE)
rep(v2, rle.response$lengths)
}
DT2[, encodeI := encoder(IntensityGood), by = i]
# remove ones which are all 0, easily handle seperately
DT3 <- DT2[, test := all(encodeI==0), by=i][test==FALSE,][, test:=NULL]
# get count - can infer missing are 0
count <- DT3[encodeI!=0, .(max(table(encodeI))), by = i]
# get sequence
findMaxDt <- DT3[encodeI != 0, .N, by=.(i, encodeI)]
DT3 <- merge(DT3, findMaxDt, by=c("i", "encodeI"))
DT3 <- DT3[, Best := N==max(N), by=i]
DT3[Best==TRUE, .(list(Intensity)), by=i]
I would like to build a structure which, for each record, stores a string, an index and a numeric value. I would like to be able to access the numeric value by querying the data structure with either the index or the string. Also, the data structure is small (on the order of 30 records) but it must be accessed and modified many times (possibly even a million times). Normally I would just use a data frame, but given the efficiency requirements, do you think there would be a better (faster) way? Judging by the syntax, I have the impression that my_struct needs to be accessed two times for each operation (read or write): maybe it's not a big deal, but I wonder if expert R coders, when efficiency is a constraint, would use this code or something different.
# define data structure
my_struct <- data.frame(index = c(3:14,24), variable = c("Pin", "Pout", "Tout", "D", "L", "mu", "R","K","c","omega","alpha","beta","gamma"), value = runif(13), stringsAsFactors = FALSE)
# examples of read/write statements
my_struct$value[my_struct$variable == "Pin"]
my_struct$value[my_struct$index %in% c(3:14)]
my_struct$value[my_struct$index %in% c(3,5)] <- rnorm(2)
The data.table package supports indices and has nice syntax for read and write:
library(data.table)
dat <- data.table(index = c(3:14,24), variable = c("Pin", "Pout", "Tout", "D", "L", "mu", "R","K","c","omega","alpha","beta","gamma"), value = runif(13))
setindex(dat, index)
setindex(dat, variable)
# read
dat[ index %in% 3:4, value ]
# write
dat[ index %in% 3:4, value := 2:3 ]
To see how the index works, add verbose = TRUE, like dat[ index %in% 3:4, value := 2:3, verbose = TRUE ] and read the vignettes. (Indices are covered in the fourth one.)
Benchmark for OP's example
library(microbenchmark)
datDF = data.frame(dat)
n_idx = 2L
idxcol = "variable"
idx = sample(dat[[idxcol]], n_idx)
v = rnorm(length(idx))
e = substitute(idxcol %in% idx, list(idxcol = as.name(idxcol)))
microbenchmark(
DT = dat[eval(e), value := v ],
DF = datDF$value[ datDF[[idxcol]] %in% idx ] <- v
)
# Unit: microseconds
# expr min lq mean median uq max neval
# DT 449.694 473.136 487.17583 481.042 487.0065 1049.193 100
# DF 27.742 30.239 44.21525 36.065 38.4225 854.723 100
So it's actually slower. I'd still go with it for the (in my opinion) nicer syntax. Note that dplyr has no syntax for updating a subset of rows.
With a large table, you'd see the benchmark reversed:
dat = data.table(variable = do.call(paste0, CJ(LETTERS, LETTERS, LETTERS, LETTERS)))
dat[, index := .I ]
dat[, value := rnorm(.N) ]
setindex(dat, index)
setindex(dat, variable)
datDF = data.frame(dat)
n_idx = 2L
idxcol = "variable"
idx = sample(dat[[idxcol]], n_idx)
v = rnorm(length(idx))
e = substitute(idxcol %in% idx, list(idxcol = as.name(idxcol)))
microbenchmark(
DT = dat[eval(e), value := v ],
DF = datDF$value[ datDF[[idxcol]] %in% idx ] <- v
)
# Unit: microseconds
# expr min lq mean median uq max neval
# DT 471.887 492.5545 701.7914 757.766 817.827 1647.582 100
# DF 17387.134 17729.3280 23750.6721 22629.490 25912.309 83057.928 100
Note: The DF way can also be written datDF$value[ match(idx, datDF[[idxcol]]) ] <- v, but I'm seeing about the same timing.
I am trying to calculate a measure of association between all variables in a data.table. (This is not a stats question, but as an aside: the variables are all factors, and the measure is Cramér's V.)
Example dataset:
p = 50; n = 1e5; # actual dataset has p > 1e3, n > 1e5, much wider but barely longer
set.seed(1234)
obs <- as.data.table(
data.frame(
cbind( matrix(sample(c(LETTERS[1:4],NA), n*(p/2), replace=TRUE),
nrow=n, ncol=p/2),
matrix(sample(c(letters[1:6],NA), n*(p/2), replace=TRUE),
nrow=n, ncol=p/2) ),
stringsAsFactors=TRUE ) )
I am currently using the split-apply-combine approach, which involves looping (via plyr::adply) through all pairs of indices and returning one row for each pair. (I attempted to parallelize adply but failed.)
# Calculate Cramér's V between all variables -- my kludgey approach
pairs <- t( combn(ncol(obs), 2) ) # nx2 matrix contains indices of upper triangle of df
# library('doParallel') # I tried to parallelize -- bonus points for help here (Win 7)
# cl <- makeCluster(8)
# registerDoParallel(cl)
library('plyr')
out <- adply(pairs, 1, function(ix) {
complete_cases <- obs[,which(complete.cases(.SD)), .SDcols=ix]
chsq <- chisq.test(x= dcast(data = obs[complete_cases, .SD, .SDcols=ix],
formula = paste( names(obs)[ix], collapse='~'),
value.var = names(obs)[ix][1], # arbitrary
fun.aggregate=length)[,-1, with=FALSE] )
return(data.table(index_1 = ix[1],
var_1 = names(obs)[ix][1],
index_2 = ix[2],
var_2 = names(obs)[ix][2],
cramers_v = sqrt(chsq$statistic /
(sum(chsq$observed) *
(pmin(nrow(chsq$observed),
ncol(chsq$observed) ) -1 ) )
) )
)
})[,-1] #}, .parallel = TRUE)[,-1] # using .parallel returns Error in do.ply(i) :
# task 1 failed - "object 'obs' not found"
out <- data.table(out) # adply won't return a data.table
# stopCluster(cl)
What are my options for speeding up this calculation? My challenge is in passing the row-wise operation on pairs into the column-wise calculations in obs. I am wondering if it is possible to generate the column pairs directly into J, but the Force is just not strong enough with this data.table padawan.
First, I would go with 'long' data format as following:
obs[, id := 1:n]
mobs <- melt(obs, id.vars = 'id')
Next set key on data table setkeyv(mobs, 'id').
Finally, iterate through variables and do calculations on pairs:
out <- list()
for(i in 1:p) {
vari <- paste0('X', i)
tmp <- mobs[mobs[variable == vari]]
nn <- tmp[!(is.na(value) | is.na(i.value)), list(i.variable = i.variable[1], nij = length(id)), keyby = list(variable, value, i.value)]
cj <- nn[, CJ(value = value, i.value = i.value, sorted = FALSE, unique = TRUE), by = variable]
setkeyv(cj, c('variable', 'value', 'i.value'))
nn <- nn[cj]
nn[is.na(nij), nij := 0]
nn[, ni := sum(nij), by = list(variable, i.value)]
nn[, nj := sum(nij), by = list(variable, value)]
nn[, c('n', 'r', 'k') := list(sum(nij), length(unique(i.value)), length(unique(value))), by = variable]
out[[i]] <- nn[, list(i.variable = vari, cramers_v = (sqrt(sum((nij - ni * nj / n) ^ 2 / (ni * nj / n)) / n[1]) / min(k[1] - 1, r[1] - 1))), by = variable]
}
out <- rbindlist(out)
So you need to iterate only once through variables. As you see I would also wouldn't use chisq.test and would write computations myself.
I'm looking for a faster solution to the problem below. I'll illustrate the problem with a small example and then provide the code to simulate a large data as that's the point of this question. My actual problem size is of list length = 1 million entries.
Say, I've two lists as shown below:
x <- list(c(82, 18), c(35, 50, 15))
y <- list(c(1,2,3,55,90), c(37,38,95))
Properties of x and y:
Each element of the list x always sums up to 100.
Each element of y will always be sorted and will be always between 1 and 100.
The problem:
Now, what I'd like is this. Taking x[[1]] and y[[1]], I'd like to find the count of numbers in y[[1]] that are 1) <= 82 and 2) > 82 and <= 100. That would be, c(4, 1) because numbers <= 82 are c(1,2,3,55) and number between 83 and 100 is c(90). Similarly for x[[2]] and y[[2]], c(0, 2, 1). That is, the answer should be:
[[1]]
[1] 4 1
[[2]]
[1] 0 2 1
Let me know if this is still unclear.
Simulated data with 1 million entries
set.seed(1)
N <- 100
n <- 1e6
len <- sample(2:3, n, TRUE)
x <- lapply(seq_len(n), function(ix) {
probs <- sample(100:1000, len[ix])
probs <- probs/sum(probs)
oo <- round(N * probs)
if (sum(oo) != 100) {
oo[1] <- oo[1] + (100 - sum(oo))
}
oo
})
require(data.table)
ss <- sample(1:10, n, TRUE)
dt <- data.table(val=sample(1:N, sum(ss), TRUE), grp=rep(seq_len(n), ss))
setkey(dt, grp, val)
y <- dt[, list(list(val)),by=grp]$V1
What I've done so far:
Using mapply (slow):
I thought of using rank with ties.method="first" and mapply (obvious choice with 2 lists) first and tried out this:
tt1 <- mapply(y, x, FUN=function(a,b) {
tt <- rank(c(a, cumsum(b)), ties="first")[-(1:length(a))]; c(tt[1]-1, diff(tt)-1)
})
Although this works just fine, it takes a lot of time on 1M entries. I think the overhead of computing rank and diff that many times adds to it. This takes 241 seconds!
Therefore, I decided to try and overcome the usage of rank and diff by using data.table and sorting with a "group" column. I came up with a longer but much faster solution shown below:
Using data.table (faster):
xl <- sapply(x, length)
yl <- sapply(y, length)
xdt <- data.table(val=unlist(x, use.names=FALSE), grp=rep(seq_along(xl), xl), type = "x")
xdt[, cumval := cumsum(val), by=grp]
ydt <- data.table(val=unlist(y, use.names=FALSE), grp=rep(seq_along(yl), yl), type = "y")
tt2 <-rbindlist(list(ydt, xdt[, list(cumval, grp, type)]))
setkey(tt2, grp, val)
xdt.pos <- which(tt2$type == "x")
tt2[, type.x := 0L][xdt.pos, type.x := xdt.pos]
tt2 <- tt2[xdt.pos][tt2[, .N, by = grp][, N := cumsum(c(0, head(N, -1)))]][, sub := type.x - N]
tt2[, val := xdt$val]
# time consuming step
tt2 <- tt2[, c(sub[1]-1, sub[2:.N] - sub[1:(.N-1)] - 1), by = grp]
tt2 <- tt2[, list(list(V1)),by=grp]$V1
This takes 26 seconds. So it's about 9 times faster. I'm wondering if it's possible to get much more speedup as I'll have to recursively compute this on 5-10 such 1 million elements. Thank you.
Here's another data.table approach. Edit I added a (dirty?) hack that speeds this up and makes it ~2x faster than the OP data.table solution.
# compile the data.table's, set appropriate keys
xl <- sapply(x, length)
yl <- sapply(y, length)
xdt <- data.table(val=unlist(x, use.names=FALSE), grp=rep(seq_along(xl), xl))
xdt[, cumval := cumsum(val), by=grp]
ydt <- data.table(val=unlist(y, use.names=FALSE), grp=rep(seq_along(yl), yl))
# hack #0, set key but prevent sorting, since we know data is already sorted
setattr(ydt, 'sorted', c('grp', 'val'))
# by setting the key in y to val and in x to cumval we can
# leverage the rolling joins
setattr(xdt, 'sorted', c('grp', 'cumval')) # hack #1 set key, but prevent sorting
vals = xdt[, cumval.copy := cumval][ydt, roll = -Inf]
# hack #2, same deal as above
# we know that the order of cumval and cumval.copy is the same
# so let's convince data.table in that
setattr(vals, 'sorted', c('grp', 'cumval.copy'))
# compute the counts and fill in the missing 0's
# for when there is no y in the appropriate x interval
tt2 = vals[, .N, keyby = list(grp, cumval.copy)][xdt][is.na(N), N := 0L]
# convert to list
tt2 = tt2[order(grp, cumval.copy), list(list(N)), by = grp]$V1
This is about 25% faster but outputs as a matrix rather than a list. You many be able to use appy/sappy to make it work with a list (saving as a list was slowing it down).
c=matrix(0,length(x),100)
for(j in 1:length(x)){
a=-1
b=0
for(i in 1:length(x[[j]])){
a=b
b=b+x[[j]][i]
c[j,i]=sum((a<=y[[j]])*(y[[j]]<=b))
}
}
I am trying to get function application based on lagging/forwarding. I use extensively data.table and I even have working code, but knowing power of data.table I think there must be a simpler way to achieve the same with possibly improving the performance (I do a lot of creation of variables inside the functions). Below is working code of functions (available in https://gist.github.com/tomaskrehlik/5262087#file-gistfile1-r )
# Lag-function lags the given variable by the date_variable
lag_variable <- function(data, variable, lags, date_variable = c("Date")) {
if (lags == 0) {
return(data)
}
if (lags>0) {
name <- "lag"
} else {
name <- "forward"
}
require(data.table)
setkeyv(data, date_variable)
if (lags>0) {
data[,index:=seq(1:.N)]
} else {
data[,index:=rev(seq(1:.N))]
}
setkeyv(data, "index")
lags <- abs(lags)
position <- which(names(data)==variable)
for ( j in 1:lags ) {
lagname <- paste(variable,"_",name,j,sep="")
lag <- paste("data[, ",lagname,":=data[list(index-",j,"), ",variable,", roll=TRUE][[",position,"L]]]", sep = "")
eval(parse( text = lag ))
}
setkeyv(data, date_variable)
data[,index:=NULL]
}
# window_func applies the function to the lagged or forwarded variables created by lag_variable
window_func <- function(data, func.name, variable, direction = "window", steps, date_variable = c("Date"), clean = TRUE) {
require(data.table)
require(stringr)
transform <- match.fun(func.name)
l <- length(names(data))
if (direction == "forward") {
lag_variable(data, variable, -steps, date_variable)
cols <- which((!(is.na(str_match(names(a), paste(variable,"_forward(",paste(1:steps,collapse="|"),")",sep=""))[,1])))*1==1)
} else {
if (direction == "backward") {
lag_variable(data, variable, steps, date_variable)
cols <- which((!(is.na(str_match(names(a), paste(variable,"_lag(",paste(1:steps,collapse="|"),")",sep=""))[,1])))*1==1)
} else {
if (direction == "window") {
lag_variable(data, variable, -steps, date_variable)
lag_variable(data, variable, steps, date_variable)
cols <- which((!(is.na(str_match(names(a), paste(variable,"_lag(",paste(1:steps,collapse="|"),")",sep=""))[,1])))*1==1)
cols <- c(cols,which((!(is.na(str_match(names(a), paste(variable,"_forward(",paste(1:steps,collapse="|"),")",sep=""))[,1])))*1==1))
} else {
stop("The direction must be either backward, forward or window.")
}
}
}
data[,transf := apply(data[,cols, with=FALSE], 1, transform)]
if (clean) {
data[,cols:=NULL,with=FALSE]
}
return(data)
}
# Typical use:
# I have a data.table DT with variables Date (class IDate), value1, value2
# I want to get cumulative sum of next five days
# window_func(DT, "sum", "value1", direction = "forward", steps = 5)
Edit: Sample data can be created by:
a <- data.table(Date = 1:1000, value = rnorm(1000))
For each Date (which, here, are integers just for an example, does not matter much), I want to create the sum of next ten observations. To run the code and get output, do:
window_func(data = a, func.name = "sum", variable = "value",
direction = "forward", steps = 10, date_variable = "Date", clean = TRUE)
The function first takes the variable and creates ten lagged variables (using function lag_variable) and then applies function column-wise and cleans after itself. Code is bloated because I sometimes need to use functions only on lag observations, sometimes on forward observations and sometimes on both, which is called window.
Any suggestions how to implement this better? My code seems to be somehow too big.
I'm not sure about the rest of your function, but you can get your lagged sum rather efficiently as follows:
a[ , lagSum :=
a[, list(sum=sum(value)), by=list(grp=(Date+lag-i) %/% lag)] [grp!=0, sum]
, by=list(i=Date %% lag)]
eg:
set.seed(1)
a[ , lagSum :=
a[, list(sum=sum(value)), by=list(grp=(Date+lag-i) %/% lag)] [grp!=0, sum]
, by=list(i=Date %% lag)]
> a
Date value lagSum
1: 1 -0.6264538 1.32202781
2: 2 0.1836433 3.46026279
3: 3 -0.8356286 3.66646270
4: 4 1.5952808 3.88085074
5: 5 0.3295078 0.07087005
---
996: 996 -0.3132929 -3.79332038
997: 997 -0.8806707 -3.48002750
998: 998 -0.4192869 -2.59935677
999: 999 -1.4827517 -2.18006988
1000: 1000 -0.6973182 -1.88854602
Confirming correct values:
# first n values
n <- 5
for (i in seq(n))
a[seq(i, length.out=10), print(sum(value))]
# [1] 1.322028
# [1] 3.460263
# [1] 3.666463
# [1] 3.880851
# [1] 0.07087005
BENCHMARKS (against for loop, so not quite fair)
set.seed(1)
a <- data.table(Date = 1:1000, value = rnorm(1000))
system.time({ a[ , lagSum :=
a[, list(sum=sum(value)), by=list(grp=(Date+lag-i) %/% lag)] [grp!=0, sum]
, by=list(i=Date %% lag)]
})
# user system elapsed
# 0.049 0.001 0.056
set.seed(1)
a <- data.table(Date = 1:1000, value = rnorm(1000))
system.time({ for (i in seq(nrow(a)-lag+1))
a[seq(i, length.out=10), lagSum := sum(value)]})
# user system elapsed
# 1.526 0.019 2.220