Speed up Tidyverse Calculation of Multiple Quantiles - r

I have this great little function summarise_posterior (given below) as part of my package driver (available here).
The function is great and super useful. The one problem is that I have been working on larger and larger data and it can be very slow. In short, my question is: Is there a tidyverse-esque way of speeding this up while still retaining the key flexibility of this function (see examples in documentation).
At least one key speed up could come from figuring out how to put the calculation of the quantiles in a single call rather than calling the quantile function over and over. The latter option which is currently implemented is probably re-sorting same vectors over and over.
#' Shortcut for summarize variable with quantiles and mean
#'
#' #param data tidy data frame
#' #param var variable name (unquoted) to be summarised
#' #param ... other expressions to pass to summarise
#'
#' #return data.frame
#' #export
#' #details Notation: \code{pX} refers to the \code{X}\% quantile
#' #import dplyr
#' #importFrom stats quantile
#' #importFrom rlang quos quo UQ
#' #examples
#' d <- data.frame("a"=sample(1:10, 50, TRUE),
#' "b"=rnorm(50))
#'
#' # Summarize posterior for b over grouping of a and also calcuate
#' # minmum of b (in addition to normal statistics returned)
#' d <- dplyr::group_by(d, a)
#' summarise_posterior(d, b, mean.b = mean(b), min=min(b))
summarise_posterior <- function(data, var, ...){
qvar <- enquo(var)
qs <- quos(...)
data %>%
summarise(p2.5 = quantile(!!qvar, prob=0.025),
p25 = quantile(!!qvar, prob=0.25),
p50 = quantile(!!qvar, prob=0.5),
mean = mean(!!qvar),
p75 = quantile(!!qvar, prob=0.75),
p97.5 = quantile(!!qvar, prob=0.975),
!!!qs)
}
Rcpp back-end options are also more than welcome.
Thanks!

Here's a solution that makes use of nesting to avoid calling quantile multiple times. Any time you need to store a vector of results inside summarize, simply wrap it inside list. Afterwards, you can unnest these results, pair them up against their names, and use spread to put them in separate columns:
summarise_posterior2 <- function(data, var, ...){
qvar <- ensym(var)
vq <- c(0.025, 0.25, 0.5, 0.75, 0.975)
summarise( data, .qq = list(quantile(!!qvar, vq, names=FALSE)),
.nms = list(str_c("p", vq*100)), mean = mean(!!qvar), ... ) %>%
unnest %>% spread( .nms, .qq )
}
This doesn't give you nearly the same speed up as #jay.sf's solution
d <- data.frame("a"=sample(1:10, 5e5, TRUE), "b"=rnorm(5e5))
microbenchmark::microbenchmark( f1 = summarise_posterior(d, b, mean.b = mean(b), min=min(b)),
f2 = summarise_posterior2(d, b, mean.b = mean(b), min=min(b)) )
# Unit: milliseconds
# expr min lq mean median uq max neval
# f1 49.06697 50.81422 60.75100 52.43030 54.17242 200.2961 100
# f2 29.05209 29.66022 32.32508 30.84492 32.56364 138.9579 100
but it will work correctly with group_by and inside nested functions (whereas substitute-based solutions will break when nested).
r1 <- d %>% dplyr::group_by(a) %>% summarise_posterior(b, mean.b = mean(b), min=min(b))
r2 <- d %>% dplyr::group_by(a) %>% summarise_posterior2(b, mean.b = mean(b), min=min(b))
all_equal( r1, r2 ) # TRUE
If you profile the code, you can see where the major hang ups are
Rprof()
for( i in 1:100 )
d %>% dplyr::group_by(a) %>% summarise_posterior2(b, mean.b = mean(b), min=min(b))
Rprof(NULL)
summaryRprof()$by.self %>% head
# self.time self.pct total.time total.pct
# ".Call" 1.84 49.73 3.18 85.95
# "sort.int" 0.94 25.41 1.12 30.27
# "eval" 0.08 2.16 3.64 98.38
# "tryCatch" 0.08 2.16 1.44 38.92
# "anyNA" 0.08 2.16 0.08 2.16
# "structure" 0.04 1.08 0.08 2.16
The .Call corresponds mainly to the C++ backend of dplyr, while sort.int is the worker behind quantile(). #jay.sf's solution gains a major speedup by decoupling from dplyr, but it also loses the associated flexibility (e.g., integration with group_by). Ultimately, it's up to you to decide which is more important.

Why not something like this?
summarise_posterior2 <- function(data, x, ...){
x <- deparse(substitute(x))
nm <- deparse(substitute(...))
M <- matrix(unlist(data[, x]), ncol=length(data[, x]))
qs <- t(sapply(list(...), do.call, list(M)))
'rownames<-'(cbind(p2.5 = quantile(M, prob=0.025),
p25 = quantile(M, prob=0.25),
p50 = quantile(M, prob=0.5),
mean = mean(M),
p75 = quantile(M, prob=0.75),
p97.5 = quantile(M, prob=0.975), qs), NULL
)
}
> summarise_posterior2(df1, X4, mean=mean, mean=mean, min=min)
p2.5 p25 p50 mean p75 p97.5 mean mean min
[1,] 28.2 30 32 32 34 35.8 32 32 28
# > summarise_posterior(df1, X4, mean.b = mean(X4), min=min(X4))
# p2.5 p25 p50 mean p75 p97.5 mean.b min
# 1 28.2 30 32 32 34 35.8 32 28
Runs six times faster:
> microbenchmark::microbenchmark(orig.fun=summarise_posterior(df1, X4, max(X4), min(X4)),
+ new.fun=summarise_posterior2(df1, X4, max=max, min=min))
Unit: microseconds
expr min lq mean median uq max neval
orig.fun 4289.541 4324.490 4514.1634 4362.500 4411.225 8928.316 100
new.fun 716.071 734.694 802.9949 755.867 778.317 4759.439 100
Data
df1 <- data.frame(matrix(1:144, 9, 16))

Related

Looking for an apply, tidyr or dplyr solution to a nested for loop situation in R

Weirdly for this one, I think its easier to start by viewing the df.
#reproducible data
quantiles<-c("50","90")
var=c("w","d")
df=data.frame(a=runif(20,0.01,.5),b=runif(20,0.02,.5),c=runif(20,0.03,.5),e=runif(20,0.04,.5),
q50=runif(20,1,5),q90=runif(20,10,50))
head(df)
I want to automate a function that I've created (below) to calculate vars using different combinations of values from my df.
For example, the calculation of w needs to use a and b, and d needs to use c and e such that w = a *q ^ b and d = c * q ^ e. Further, q is a quantile, so I actually want w50, w90, etc., which will correspond to q50, q90 etc. from the df.
The tricky part as i see it is setting the condition to use a & b vs. c & d without using nested loops.
I have a function to calculate vars using the appropriate columns, however I can't get all the pieces together efficiently.
#function to calculate the w, d
calc_wd <- function(df,col_name,col1,col2,col3){
#Calculate and create new column col_name for each combo of var and quantile, e.g. "w_50", "d_50", etc.
df[[col_name]] <- df[[col1]] * (df[[col2]] ^ (df[[col3]]))
df
}
I can get this to work for a single case, but not by automating the coefficient swap... you'll see I specify "a" and "b" below.
wd<-c("w_","d_")
make_wd_list<-apply(expand.grid(wd, quantiles), 1, paste,collapse="")
calc_wdv(df,make_wd_list[1],"a",paste0("q",sapply(strsplit(make_wd_list[1],"_"),tail,1)),"b")
Alternatively, I have tried to make this work using nested for loops, but can't seem to append the data correctly. And its ugly.
var=c("w","d")
dataf<-data.frame()
for(j in unique(var)){
if(j=="w"){
coeff1="a"
coeff2="b"
}else if(j=="d"){
coeff1="c"
coeff1="e"
}
print(coeff1)
print(coeff2)
for(k in unique(quantiles)){
dataf<-calc_wd(df,paste0(j,k),coeff1,paste0("q",k),coeff2)
dataf[k,j]=rbind(df,dataf) #this aint right. tried to do.call outside, etc.
}
}
In the end, I'm looking to have new columns with w_50, w_90, etc., which use q50, q90 and the corresponding coefficients as defined originally.
One approach I find easy to type is using purrr::pmap. I like this because when you use with(list(...),), you can access the column names of your data.frame by name. Additionally, you can supply additional arguments.
library(purrr)
pmap_df(df, quant = "q90", ~with(list(...),{
list(w = a * get(quant) ^ b, d = c * get(quant) ^ e)
}))
## A tibble: 20 x 2
# w d
# <dbl> <dbl>
# 1 0.239 0.295
# 2 0.152 0.392
# 3 0.476 0.828
# 4 0.344 0.236
# 5 0.439 1.00
You could combine this with for example a second map call to iterate over quantiles.
library(dplyr)
map(setNames(quantiles,quantiles),
~ pmap_df(df, quant = paste0("q",.x),
~ with(list(...),{list(w = a * get(quant) ^ b, d = c * get(quant) ^ e)}))
) %>% do.call(cbind,.)
# 50.w 50.d 90.w 90.d
#1 0.63585897 0.11045837 1.7276019 0.1784987
#2 0.17286184 0.22033649 0.2333682 0.5200265
#3 0.32437528 0.72502654 0.5722203 1.4490065
#4 0.68020897 0.33797621 0.8749206 0.6179557
#5 0.73516886 0.38481785 1.2782923 0.4870877
Then assigning a custom function is trivial.
calcwd <- function(df,quantiles){
map(setNames(quantiles,quantiles),
~ pmap_df(df, quant = paste0("q",.x),
~ with(list(...),{list(w = a * get(quant) ^ b, d = c * get(quant) ^ e)}))
) %>% do.call(cbind,.)
}
I love #Ian's answer for the completeness and the use of classics like with and do.call. I'm late to the scene with my solution but since I have been trying to get better with rowwise operations (including the use of rowwise thought I would offer up a less elegant but simpler and faster solution using just mutate, formula.tools and map_dfc
library(dplyr)
library(purrr)
require(formula.tools)
# same type example data plus a much larger version in df2 for
# performance testing
df <- data.frame(a = runif(20, 0.01, .5),
b = runif(20, 0.02, .5),
c = runif(20, 0.03, .5),
e = runif(20, 0.04, .5),
q50 = runif(20,1,5),
q90 = runif(20,10,50)
)
df2 <- data.frame(a = runif(20000, 0.01, .5),
b = runif(20000, 0.02, .5),
c = runif(20000, 0.03, .5),
e = runif(20000, 0.04, .5),
q50 = runif(20000,1,5),
q90 = runif(20000,10,50)
)
# from your original post
quantiles <- c("q50", "q90")
wd <- c("w_", "d_")
make_wd_list <- apply(expand.grid(wd, quantiles),
1,
paste, collapse = "")
make_wd_list
#> [1] "w_q50" "d_q50" "w_q90" "d_q90"
# an empty list to hold our formulas
eqn_list <- vector(mode = "list",
length = length(make_wd_list)
)
# populate the list makes it very extensible to more outcomes
# or to more quantile levels
for (i in seq_along(make_wd_list)) {
if (substr(make_wd_list[[i]], 1, 1) == "w") {
eqn_list[[i]] <- as.formula(paste(make_wd_list[[i]], "~ a * ", substr(make_wd_list[[i]], 3, 5), " ^ b"))
} else if (substr(make_wd_list[[i]], 1, 1) == "d") {
eqn_list[[i]] <- as.formula(paste(make_wd_list[[i]], "~ c * ", substr(make_wd_list[[i]], 3, 5), " ^ e"))
}
}
# formula.tools helps us grab both left and right sides
add_column <- function(df, equation){
df <- transmute_(df, rhs(equation))
colnames(df)[ncol(df)] <- as.character(lhs(equation))
return(df)
}
result <- map_dfc(eqn_list, ~ add_column(df = df, equation = .x))
#> w_q50 d_q50 w_q90 d_q90
#> 1 0.10580863 0.29136904 0.37839737 0.9014040
#> 2 0.34798729 0.35185585 0.64196417 0.4257495
#> 3 0.79714122 0.37242915 1.57594506 0.6198531
#> 4 0.56446922 0.43432160 1.07458217 1.1082825
#> 5 0.26896574 0.07374273 0.28557366 0.1678035
#> 6 0.36840408 0.72458466 0.72741030 1.2480547
#> 7 0.64484009 0.69464045 1.93290705 2.1663690
#> 8 0.43336109 0.21265672 0.46187366 0.4365486
#> 9 0.61340404 0.47528697 0.89286358 0.5383290
#> 10 0.36983212 0.53292900 0.53996112 0.8488402
#> 11 0.11278412 0.12532491 0.12486156 0.2413191
#> 12 0.03599639 0.25578020 0.04084221 0.3284659
#> 13 0.26308183 0.05322304 0.87057854 0.1817630
#> 14 0.06533586 0.22458880 0.09085436 0.3391683
#> 15 0.11625845 0.32995233 0.12749040 0.4730407
#> 16 0.81584442 0.07733376 2.15108243 0.1041342
#> 17 0.38198254 0.60263861 0.68082354 0.8502999
#> 18 0.51756058 0.43398089 1.06683204 1.3397900
#> 19 0.34490492 0.13790601 0.69168711 0.1580659
#> 20 0.39771037 0.33286225 1.32578056 0.4141457
microbenchmark::microbenchmark(result <- map_dfc(eqn_list, ~ add_column(df = df2, equation = .x)), times = 10)
#> Unit: milliseconds
#> expr min
#> result <- map_dfc(eqn_list, ~add_column(df = df2, equation = .x)) 10.58004
#> lq mean median uq max neval
#> 11.34603 12.56774 11.6257 13.24273 16.91417 10
The mutate and formula solution is about fifty times faster although both rip through 20,000 rows in less than a second
Created on 2020-04-30 by the reprex package (v0.3.0)

repeated measures bootstrap stats, grouped by multiple factors

I have a data frame that looks like this, but obviously with many more rows etc:
df <- data.frame(id=c(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2),
cond=c('A', 'A', 'B', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B'),
comm=c('X', 'Y', 'X', 'Y', 'X', 'Y', 'X', 'Y','X', 'Y', 'X', 'Y', 'X', 'Y', 'X', 'Y'),
measure=c(0.8, 1.1, 0.7, 1.2, 0.9, 2.3, 0.6, 1.1, 0.7, 1.3, 0.6, 1.5, 1.0, 2.1, 0.7, 1.2))
So we have 2 factors (each with 2 levels, thus 4 combinations) and one continuous measure. We also have a repeated measures design in that we have multiple measure's within each cell that correspond to the same id.
I've attempted to first solve the groupby issue, then the bootstrap issue, then combine the two, but am pretty much stuck...
Stats, grouped by the 2 factors
I can get multiple summary stats for each of the 4 cells by:
summary_stats <- aggregate(df$measure,
by = list(df$cond, df$comm),
function(x) c(mean = mean(x), median = median(x), sd = sd(x)))
print(summary_stats)
resulting in
Group.1 Group.2 x.mean x.median x.sd
1 A X 0.85000000 0.85000000 0.12909944
2 B X 0.65000000 0.65000000 0.05773503
3 A Y 1.70000000 1.70000000 0.58878406
4 B Y 1.25000000 1.20000000 0.17320508
This is great as we are getting multiple stats for each of the 4 cells.
But what I'd really like is the 95% bootstrap CI's, for each stat, for each of the 4 cells. I don't mind if I have to run a final solution once for statistic (e.g. mean, median, etc), but bonus points for doing it all in one go.
Bootstrap for repeated measures
Can't quite make this work, but what I want is 95% bootstrap CI's, done in a way which is appropriate for this repeated measures design. Unless I'm mistaken then I want to select bootstrap samples on the basis of id (not on the basis of rows of the dataframe), then calculate a summary measure (e.g. mean) for each of the 4 cells.
library(boot)
myfunc <- function(data, indices) {
# select bootstrap sample to index into `id`
d <- data[data$id==indicies,]
return(c(mean=mean(d), median=median(d), sd = sd(d)))
}
bresults <- boot(data = CO2$uptake, statistic = myfunc, R = 1000)
Q1: I'm getting errors in selecting the bootstrap sample by id, i.e. the line d <- data[ data$id==indicies, ]
Combining bootstrap and the groupby 2 factors
Q2: I have no intuition of how to gel the two approaches together to achieve the final desired result. My only idea is to put the aggregate call in myfunc, to repeatedly calculate cell stats under each bootstrap replicate, but I'm out of my comfort zone with R here.
With your two questions, you have two issues:
How to bootstrap (resample) your data in such a way that you resample based on id, rather than rows
How to perform separate bootstraps for the four groups in your 2x2 design
One easy way to do this would be by using the following packages (all part of the tidyverse):
dplyr for manipulating your data (in particular, summarising the data you have for each id) and also for the neat %>% forward pipe operator which supplies the result of an expression as the first argument to the next expression so you can chain commands
broom for doing an operation for each group in your dataframe
boot (which you already use) for the bootstrapping
Load the packages:
library(dplyr)
library(broom)
library(boot)
First of all, to make sure when we resample we include a subject or not, I would save the various values each subject has as a list:
df <- df %>%
group_by(id, cond, comm) %>%
summarise(measure=list(measure)) %>%
ungroup()
Now the dataframe has fewer rows (4 per ID), and the variable measure is not numeric anymore (instead, it's a list). This means we can just use the indices that boot provides (solving issue 1), but also that we'll have to "unlist" it when we actually want to do calculations with it, so your function now becomes:
myfunc <- function(data, indices) {
data <- data[indices,]
return(c(mean=mean(unlist(data$measure)),
median=median(unlist(data$measure)),
sd = sd(unlist(data$measure))))
}
Now that we can simply use boot to resample each row, we can think about how to do it neatly per group. This is where the broom package comes in: you can ask it to do an operation for each group in your data frame, and store it in a tidy dataframe, with one row for each of your groups, and a column for the values that your function produces. So we simply group the dataframe again, and then call do(tidy(...)), with a . instead of the name of our variable. This hopefully solves issue 2 for you!
bootresults <- df %>%
group_by(cond, comm) %>%
do(tidy(boot(data = ., statistic = myfunc, R = 1000)))
This produces:
# Groups: cond, comm [4]
cond comm term statistic bias std.error
<fctr> <fctr> <chr> <dbl> <dbl> <dbl>
1 A X mean 0.85000000 0.000000000 5.280581e-17
2 A X median 0.85000000 0.000000000 5.652979e-17
3 A X sd 0.12909944 -0.004704999 4.042676e-02
4 A Y mean 1.70000000 0.000000000 1.067735e-16
5 A Y median 1.70000000 0.000000000 1.072347e-16
6 A Y sd 0.58878406 -0.005074338 7.888294e-02
7 B X mean 0.65000000 0.000000000 0.000000e+00
8 B X median 0.65000000 0.000000000 0.000000e+00
9 B X sd 0.05773503 0.000000000 0.000000e+00
10 B Y mean 1.25000000 0.001000000 7.283065e-02
11 B Y median 1.20000000 0.027500000 7.729634e-02
12 B Y sd 0.17320508 -0.030022214 5.067446e-02
Hopefully this is what you'd like to see!
If you want to then use the values from this dataframe a bit more, you can use other dplyr functions to select which rows in this table you look at. For example, to look at the bootstrapped standard error of the standard deviation of your measure for condition A / X, you can do the following:
bootresults %>% filter(cond=='A', comm=='X', term=='sd') %>% pull(std.error)
I hope that helps!
For a bootstrap with a cluster variable, here's a solution without additional packages. I didn't use the boot package though.
Part 1: Bootstrap
This function draws a random sample from a set of clustered observations.
.clusterSample <- function(x, id){
boot.id <- sample(unique(id), replace=T)
out <- lapply(boot.id, function(i) x[id%in%i,])
return( do.call("rbind",out) )
}
Part 2: Boostrap estimates and CIs
The next function draws multiple samples and applies the same aggregate statement to each of them. The bootstrap estimates and CIs are then obtained by mean and quantile.
clusterBoot <- function(data, formula, cluster, R=1000, alpha=.05, FUN){
# cluster variable
cls <- model.matrix(cluster,data)[,2]
template <- aggregate(formula, .clusterSample(data,cls), FUN)
var <- which( names(template)==all.vars(formula)[1] )
grp <- template[,-var,drop=F]
val <- template[,var]
x <- vapply( 1:R, FUN=function(r) aggregate(formula, .clusterSample(data,cls), FUN)[,var],
FUN.VALUE=val )
if(is.vector(x)) dim(x) <- c(1,1,length(x))
if(is.matrix(x)) dim(x) <- c(nrow(x),1,ncol(x))
# bootstrap estimates
est <- apply( x, 1:2, mean )
lo <- apply( x, 1:2, function(i) quantile(i,alpha/2) )
up <- apply( x, 1:2, function(i) quantile(i,1-alpha/2) )
colnames(lo) <- paste0(colnames(lo), ".lo")
colnames(up) <- paste0(colnames(up), ".up")
return( cbind(grp,est,lo,up) )
}
Note the use of vapply. I use it because I prefer working with arrays over lists. Note also that I used the formula interface to aggregate, which I also like better.
Part 3: Examples
It can be used with any kind of stats, basically, even without grouping variables. Some examples include:
myStats <- function(x) c(mean = mean(x), median = median(x), sd = sd(x))
clusterBoot(data=df, formula=measure~cond+comm, cluster=~id, R=10, FUN=myStats)
# cond comm mean median sd mean.lo median.lo sd.lo mean.up median.up sd.up
# 1 A X 0.85 0.850 0.11651125 0.85 0.85 0.05773503 0.85 0.85 0.17320508
# 2 B X 0.65 0.650 0.05773503 0.65 0.65 0.05773503 0.65 0.65 0.05773503
# 3 A Y 1.70 1.700 0.59461417 1.70 1.70 0.46188022 1.70 1.70 0.69282032
# 4 B Y 1.24 1.215 0.13856406 1.15 1.15 0.05773503 1.35 1.35 0.17320508
clusterBoot(data=df, formula=measure~cond+comm, cluster=~id, R=10, FUN=mean)
# cond comm est .lo .up
# 1 A X 0.85 0.85 0.85
# 2 B X 0.65 0.65 0.65
# 3 A Y 1.70 1.70 1.70
# 4 B Y 1.25 1.15 1.35
clusterBoot(data=df, formula=measure~1, cluster=~id, R=10, FUN=mean)
# est .lo .up
# 1 1.1125 1.0875 1.1375

Large raster frequency table / counts

I try to calculate the frequency/count of pixel values of a raster in R using freq().
Create two example rasters for comparison:
library(raster)
RastSmall <- raster(nrow=70, ncol=70)
RastBig <- raster(nrow=7000, ncol=7000)
set.seed(0)
RastSmall[] <- round(runif(1:ncell(r_hr), 1, 5))
RastBig[] <- round(runif(1:ncell(r_hr), 1, 5))
Get the pixel count using freq()
freq(RastSmall)
value count
[1,] 1 6540000
[2,] 2 12150000
[3,] 3 12140000
[4,] 4 11720000
[5,] 5 6450000
However, it is a fairly large file and takes extremely long, i.e. up to hours. Is there a faster way in R?
Here the speed difference for a small and a large raster:
system.time(freq(RastSmall))
user system elapsed
0.008 0.000 0.004
system.time(freq(RastBig))
user system elapsed
40.484 0.964 41.445
Is there a way to speed this up? Alternatively can this be done in the command line using something like gdal tools?
I did exactly that last week, however I couldn't find other faster ways to do it in R. I've tried to do it with the rqgis package by calling the r.report of GRASS. It works but was slower than the R native function. Maybe you'll have a better luck. Here is my code with grass in case you want to try it:
library(RQGIS)
monqgis <- set_env("C:\\Mrnmicro\\Applic\\OSGeo4W")
find_algorithms(search_term = "report", qgis_env = monqgis)
get_usage(alg = "grass7:r.report", qgis_env = monqgis)
params <- get_args_man(alg = "grass7:r.report", qgis_env = monqgis)
get_usage(alg = "grass7:r.report", qgis_env = monqgis)
params$map <- classif
params$units <- 5
params$rawoutput <- "C:\\temp\\outputRQGIS_raw"
params$html <- "C:\\temp\\outputRQGIS"
system.time(asas <- run_qgis(alg = "grass7:r.report", params=params,load_output = params$OUTPUT, qgis_env = monqgis))
not an amazing saving but if you getValues from your raster and then run the base::table function, it saves about 20%. My raster was c.500m cells.
# read in raster to obtain frequency table
r <- raster("./path/myraster.tif")
# perform tests; traditional freq() vs. getValues() & table()
require(microbenchmark)
mbm <- microbenchmark(
Freq = {freqf <- freq(r,useNA="no");
freq.df <- data.frame(CODE=freqf[,1], N=freqf[,2]},
GetVals = {v <- getValues(r);
vt <- table(v);
getval.df <- data.frame(CODE=as.numeric(names(vt)),N=as.numeric(as.matrix(vt)))},
times=5
)
mbm
Unit: seconds
expr min lq mean median uq max neval
Freq 191.1649 191.8001 198.8567 192.5256 193.0986 225.6942 5
GetVals 153.5552 154.8776 156.9173 157.0539 159.0400 160.0598 5
# check the routines have identical results
identical(freq.df,getval.df)
[1] TRUE
bit of a saving i guess
(N.B. the reason i make the data frames is that I go on to process the data that comes out of the frequency analysis)
I think the most effective way to calculate that is by using GetHistogram( ) from GDAL. Unfortunately, I can't find a way to use it from R. The closest approach is by using gdalUtilities::gdalinfo from R, and use the flag -hist, or hist = TRUE, but is limited the calculations between 0 - 255.
Another option is using rasterDT::freqDT, which is faster than regular options. Here an example:
library(gdalUtilities)
library(raster)
library(rasterDT)
library(microbenchmark)
RastBig <- raster(nrow=7000, ncol=7000)
set.seed(0)
RastBig[] <- round(runif(1:ncell(RastBig), 1, 5))
writeRaster(RastBig, filename = 'C:/temp/RastBig.tif')
mbm <- microbenchmark(times = 50,
freq1 = freq(RastBig),
freq2 = table(RastBig[]),
freq3 = freqDT(RastBig),
freq4 = ({
gdalLog <- capture.output(gdalUtilities::gdalinfo(datasetname = 'C:/temp/RastBig.tif', hist = TRUE));
(bucxml <- as.numeric(sub('buckets.+', '', grep('buckets ', gdalLog, value = TRUE))));
(minxml <- as.numeric(gsub('.+from | to.+', '', grep('buckets ', gdalLog, value = TRUE)) ));
(maxxml <- as.numeric(gsub('.+to |:', '', grep('buckets ', gdalLog, value = TRUE))));
(histxml <- as.numeric(strsplit(split = '[[:space:]]', gsub("^ |^ ", "", gdalLog[grep('buckets', gdalLog)+1]))[[1]]));
labs <- seq(from = minxml, to = maxxml, length.out = bucxml);
df <- data.frame(labs, nwlab = c(ceiling(labs[1]),
round(labs[2:(bucxml-1)]),
floor(labs[bucxml])),
val = histxml);
hist <- aggregate(df$val, by = list(df$nwlab), sum)})
)
Results:
> freq1
value count
[1,] 1 6127755
[2,] 2 12251324
[3,] 3 12249376
[4,] 4 12248938
[5,] 5 6122607
> freq2
1 2 3 4 5
6127755 12251324 12249376 12248938 6122607
> freq3
ID freq
1: 1 6127755
2: 2 12251324
3: 3 12249376
4: 4 12248938
5: 5 6122607
> freq4
Group.1 x
1 1 6127755
2 2 12251324
3 3 12249376
4 4 12248938
5 5 6122607
Unit: milliseconds
expr min lq mean median uq max neval
freq1 58628.486301 59100.539302 59400.304887 59383.913701 59650.412 60841.3975 50
freq2 55912.170401 56663.025202 56954.032395 56919.905051 57202.001 58307.9500 50
freq3 3785.767301 4006.858102 4288.699531 4292.447250 4536.382 4996.0598 50
freq4 7.892201 8.883102 9.255641 9.154001 9.483 15.6072 50
EDIT: using this is quite faster than option 3:
rB <- raster('C:/temp/RastBig.tif')
freq3B <- freqDT(rB)

Fastest by column sort in R

I have a data frame full from which I want to take the last column and a column v. I then want to sort both columns on v in the fastest way possible. full is read in from a csv but this can be used for testing (included some NAs for realism):
n <- 200000
full <- data.frame(A = runif(n, 1, 10000), B = floor(runif(n, 0, 1.9)))
full[sample(n, 10000), 'A'] <- NA
v <- 1
I have v as one here, but in reality it could change, and full has many columns.
I have tried sorting data frames, data tables and matrices each with order and sort.list (some ideas taken from this thread). The code for all these:
# DATA FRAME
ord_df <- function() {
a <- full[c(v, length(full))]
a[with(a, order(a[1])), ]
}
sl_df <- function() {
a <- full[c(v, length(full))]
a[sort.list(a[[1]]), ]
}
# DATA TABLE
require(data.table)
ord_dt <- function() {
a <- as.data.table(full[c(v, length(full))])
colnames(a)[1] <- 'values'
a[order(values)]
}
sl_dt <- function() {
a <- as.data.table(full[c(v, length(full))])
colnames(a)[1] <- 'values'
a[sort.list(values)]
}
# MATRIX
ord_mat <- function() {
a <- as.matrix(full[c(v, length(full))])
a[order(a[, 1]), ]
}
sl_mat <- function() {
a <- as.matrix(full[c(v, length(full))])
a[sort.list(a[, 1]), ]
}
Time results:
ord_df sl_df ord_dt sl_dt ord_mat sl_mat
Min. 0.230 0.1500 0.1300 0.120 0.140 0.1400
Median 0.250 0.1600 0.1400 0.140 0.140 0.1400
Mean 0.244 0.1610 0.1430 0.136 0.142 0.1450
Max. 0.250 0.1700 0.1600 0.140 0.160 0.1600
Or using microbenchmark (results are in milliseconds):
min lq median uq max
1 ord_df() 243.0647 248.2768 254.0544 265.2589 352.3984
2 ord_dt() 133.8159 140.0111 143.8202 148.4957 181.2647
3 ord_mat() 140.5198 146.8131 149.9876 154.6649 191.6897
4 sl_df() 152.6985 161.5591 166.5147 171.2891 194.7155
5 sl_dt() 132.1414 139.7655 144.1281 149.6844 188.8592
6 sl_mat() 139.2420 146.8578 151.6760 156.6174 186.5416
Seems like ordering the data table wins. There isn't all that much difference between order and sort.list except when using data frames where sort.list is much faster.
In the data table versions I also tried setting v as the key (since it is then sorted according to the documentation) but I couldn't get it work since the contents of v are not integer.
I would ideally like to speed this up as much as possible since I have to do it many times for different v values. Does anyone know how I might be able to speed this process up even further? Also might it be worth trying an Rcpp implementation? Thanks.
Here's the code I used for timing if it's useful to anyone:
sortMethods <- list(ord_df, sl_df, ord_dt, sl_dt, ord_mat, sl_mat)
require(plyr)
timings <- raply(10, sapply(sortMethods, function(x) system.time(x())[[3]]))
colnames(timings) <- c('ord_df', 'sl_df', 'ord_dt', 'sl_dt', 'ord_mat', 'sl_mat')
apply(timings, 2, summary)
require(microbenchmark)
mb <- microbenchmark(ord_df(), sl_df(), ord_dt(), sl_dt(), ord_mat(), sl_mat())
plot(mb)
I don't know if it's better to put this sort of thing in as an edit but it seems more like answer so here will do. Updated test functions:
n <- 1e7
full <- data.frame(A = runif(n, 1, 10000), B = floor(runif(n, 0, 1.9)))
full[sample(n, 100000), 'A'] <- NA
fdf <- full
fma <- as.matrix(full)
fdt <- as.data.table(full)
setnames(fdt, colnames(fdt)[1], 'values')
# DATA FRAME
ord_df <- function() { fdf[order(fdf[1]), ] }
sl_df <- function() { fdf[sort.list(fdf[[1]]), ] }
# DATA TABLE
require(data.table)
ord_dt <- function() { fdt[order(values)] }
key_dt <- function() {
setkey(fdt, values)
fdt
}
# MATRIX
ord_mat <- function() { fma[order(fma[, 1]), ] }
sl_mat <- function() { fma[sort.list(fma[, 1]), ] }
Results (using a different computer, R 2.13.1 and data.table 1.8.2):
ord_df sl_df ord_dt key_dt ord_mat sl_mat
Min. 37.56 20.86 2.946 2.249 20.22 20.21
1st Qu. 37.73 21.15 2.962 2.255 20.54 20.59
Median 38.43 21.74 3.002 2.280 21.05 20.82
Mean 38.76 21.75 3.074 2.395 21.09 20.95
3rd Qu. 39.85 22.18 3.151 2.445 21.48 21.42
Max. 40.36 23.08 3.330 2.797 22.41 21.84
So data.table is the clear winner. Using a key is faster than ordering, and has a nicer syntax as well I'd argue. Thanks for the help everyone.

quick/elegant way to construct mean/variance summary table

I can achieve this task, but I feel like there must be a "best" (slickest, most compact, clearest-code, fastest?) way of doing it and have not figured it out so far ...
For a specified set of categorical factors I want to construct a table of means and variances by group.
generate data:
set.seed(1001)
d <- expand.grid(f1=LETTERS[1:3],f2=letters[1:3],
f3=factor(as.character(as.roman(1:3))),rep=1:4)
d$y <- runif(nrow(d))
d$z <- rnorm(nrow(d))
desired output:
f1 f2 f3 y.mean y.var
1 A a I 0.6502307 0.09537958
2 A a II 0.4876630 0.11079670
3 A a III 0.3102926 0.20280568
4 A b I 0.3914084 0.05869310
5 A b II 0.5257355 0.21863126
6 A b III 0.3356860 0.07943314
... etc. ...
using aggregate/merge:
library(reshape)
m1 <- aggregate(y~f1*f2*f3,data=d,FUN=mean)
m2 <- aggregate(y~f1*f2*f3,data=d,FUN=var)
mvtab <- merge(rename(m1,c(y="y.mean")),
rename(m2,c(y="y.var")))
using ddply/summarise (possibly best but haven't been able to make it work):
mvtab2 <- ddply(subset(d,select=-c(z,rep)),
.(f1,f2,f3),
summarise,numcolwise(mean),numcolwise(var))
results in
Error in output[[var]][rng] <- df[[var]] :
incompatible types (from closure to logical) in subassignment type fix
using melt/cast (maybe best?)
mvtab3 <- cast(melt(subset(d,select=-c(z,rep)),
id.vars=1:3),
...~.,fun.aggregate=c(mean,var))
## now have to drop "variable"
mvtab3 <- subset(mvtab3,select=-variable)
## also should rename response variables
Won't (?) work in reshape2. Explaining ...~. to someone could be tricky!
Here is a solution using data.table
library(data.table)
d2 = data.table(d)
ans = d2[,list(avg_y = mean(y), var_y = var(y)), 'f1, f2, f3']
I'm a bit puzzled. Does this not work:
mvtab2 <- ddply(d,.(f1,f2,f3),
summarise,y.mean = mean(y),y.var = var(y))
This give me something like this:
f1 f2 f3 y.mean y.var
1 A a I 0.6502307 0.095379578
2 A a II 0.4876630 0.110796695
3 A a III 0.3102926 0.202805677
4 A b I 0.3914084 0.058693103
5 A b II 0.5257355 0.218631264
Which is in the right form, but it looks like the values are different that what you specified.
Edit
Here's how to make your version with numcolwise work:
mvtab2 <- ddply(subset(d,select=-c(z,rep)),.(f1,f2,f3),summarise,
y.mean = numcolwise(mean)(piece),
y.var = numcolwise(var)(piece))
You forgot to pass the actual data to numcolwise. And then there's the little ddply trick that each piece is called piece internally. (Which Hadley points out in the comments shouldn't be relied upon as it may change in future versions of plyr.)
(I voted for Joshua's.) Here's an Hmisc::summary.formula solution. The advantage of this for me is that it is well integrated with the Hmisc::latex output "channel".
summary(y ~ interaction(f3,f2,f1), data=d, method="response",
fun=function(y) c(mean.y=mean(y) ,var.y=var(y) ))
#-----output----------
y N=108
+-----------------------+-------+---+---------+-----------+
| | |N |mean.y |var.y |
+-----------------------+-------+---+---------+-----------+
|interaction(f3, f2, f1)|I.a.A | 4|0.6502307|0.095379578|
| |II.a.A | 4|0.4876630|0.110796695|
snipped output to show the latex -> PDF -> png output:
#joran is spot-on with the ddply answer. Here's how I would do it with aggregate. Note that I avoid the formula interface (it is slower).
aggregate(d$y, d[,c("f1","f2","f3")], FUN=function(x) c(mean=mean(x),var=var(x)))
I'm slightly addicted to speed comparisons even though they're largely irrelevant for me in this situation ...
joran_ddply <- function(d) ddply(d,.(f1,f2,f3),
summarise,y.mean = mean(y),y.var = var(y))
joshulrich_aggregate <- function(d) {
aggregate(d$y, d[,c("f1","f2","f3")],
FUN=function(x) c(mean=mean(x),var=var(x)))
}
formula_aggregate <- function(d) {
aggregate(y~f1*f2*f3,data=d,
FUN=function(x) c(mean=mean(x),var=var(x)))
}
library(data.table)
d2 <- data.table(d)
ramnath_datatable <- function(d) {
d[,list(avg_y = mean(y), var_y = var(y)), 'f1, f2, f3']
}
library(Hmisc)
dwin_hmisc <- function(d) {summary(y ~ interaction(f3,f2,f1),
data=d, method="response",
fun=function(y) c(mean.y=mean(y) ,var.y=var(y) ))
}
library(rbenchmark)
benchmark(joran_ddply(d),
joshulrich_aggregate(d),
ramnath_datatable(d2),
formula_aggregate(d),
dwin_hmisc(d))
aggregate is fastest (even faster than data.table, which is a surprise to me, although things might be different with a bigger table to aggregate), even using the formula interface ...)
test replications elapsed relative user.self sys.self
5 dwin_hmisc(d) 100 1.235 2.125645 1.168 0.044
4 formula_aggregate(d) 100 0.703 1.209983 0.656 0.036
1 joran_ddply(d) 100 3.345 5.757315 3.152 0.144
2 joshulrich_aggregate(d) 100 0.581 1.000000 0.596 0.000
3 ramnath_datatable(d2) 100 0.750 1.290878 0.708 0.000
(Now I just need Dirk to step up and post an Rcpp solution that is 1000 times faster than anything else ...)
I find the doBy package has some very convenient functions for things like this. For example, the function ?summaryBy is quite handy. Consider:
> summaryBy(y~f1+f2+f3, data=d, FUN=c(mean, var))
f1 f2 f3 y.mean y.var
1 A a I 0.6502307 0.095379578
2 A a II 0.4876630 0.110796695
3 A a III 0.3102926 0.202805677
4 A b I 0.3914084 0.058693103
5 A b II 0.5257355 0.218631264
6 A b III 0.3356860 0.079433136
7 A c I 0.3367841 0.079487973
8 A c II 0.6273320 0.041373836
9 A c III 0.4532720 0.022779672
10 B a I 0.6688221 0.044184575
11 B a II 0.5514724 0.020359289
12 B a III 0.6389354 0.104056229
13 B b I 0.5052346 0.138379070
14 B b II 0.3933283 0.050261804
15 B b III 0.5953874 0.161943989
16 B c I 0.3490460 0.079286849
17 B c II 0.5534569 0.207381592
18 B c III 0.4652424 0.187463143
19 C a I 0.3340988 0.004994589
20 C a II 0.3970315 0.126967554
21 C a III 0.3580250 0.066769484
22 C b I 0.7676858 0.124945402
23 C b II 0.3613772 0.182689385
24 C b III 0.4175562 0.095933470
25 C c I 0.3592491 0.039832864
26 C c II 0.7882591 0.084271963
27 C c III 0.3936949 0.085758343
So the function call is simple, easy to use, and I would say, elegant.
Now, if your primary concern is speed, it seems that it would be reasonable--at least with smaller sized tasks (note that I couldn't get the ramnath_datatable function to work for whatever reason):
test replications elapsed relative user.self
4 dwin_hmisc(d) 100 0.50 2.778 0.50
3 formula_aggregate(d) 100 0.23 1.278 0.24
5 gung_summaryBy(d) 100 0.34 1.889 0.35
1 joran_ddply(d) 100 1.34 7.444 1.32
2 joshulrich_aggregate(d) 100 0.18 1.000 0.19
I've came accross with this question and found the benchmarks are done with small tables, so it's hard to tell which method is better with 100 rows.
I've also modified the data a bit also to make it "unsorted", this would be a more common case, for example as the data is in a DB.
I've added a few more data.table trials to see if setting a key is faster beforehand. It seems here, setting the key beforehand doesn't improve much the performance, so ramnath solution seems to be the fastest.
set.seed(1001)
d <- data.frame(f1 = sample(LETTERS[1:3], 30e5, replace = T), f2 = sample(letters[1:3], 30e5, replace = T),
f3 = sample(factor(as.character(as.roman(1:3))), 30e5, replace = T), rep = sample(1:4, replace = T))
d$y <- runif(nrow(d))
d$z <- rnorm(nrow(d))
str(d)
require(Hmisc)
require(plyr)
require(data.table)
d2 = data.table(d)
d3 = data.table(d)
# Set key of d3 to compare how fast it is if the DT is already keyded
setkey(d3,f1,f2,f3)
joran_ddply <- function(d) ddply(d,.(f1,f2,f3),
summarise,y.mean = mean(y),y.var = var(y))
formula_aggregate <- function(d) {
aggregate(y~f1*f2*f3,data=d,
FUN=function(x) c(mean=mean(x),var=var(x)))
}
ramnath_datatable <- function(d) {
d[,list(avg_y = mean(y), var_y = var(y)), 'f1,f2,f3']
}
key_agg_datatable <- function(d) {
setkey(d2,f1,f2,f3)
d[,list(avg_y = mean(y), var_y = var(y)), 'f1,f2,f3']
}
one_key_datatable <- function(d) {
setkey(d2,f1)
d[,list(avg_y = mean(y), var_y = var(y)), 'f1,f2,f3']
}
including_3key_datatable <- function(d) {
d[,list(avg_y = mean(y), var_y = var(y)), 'f1,f2,f3']
}
dwin_hmisc <- function(d) {summary(y ~ interaction(f3,f2,f1),
data=d, method="response",
fun=function(y) c(mean.y=mean(y) ,var.y=var(y) ))
}
require(rbenchmark)
benchmark(joran_ddply(d),
joshulrich_aggregate(d),
ramnath_datatable(d2),
including_3key_datatable(d3),
one_key_datatable(d2),
key_agg_datatable(d2),
formula_aggregate(d),
dwin_hmisc(d)
)
# test replications elapsed relative user.self sys.self
# dwin_hmisc(d) 100 1757.28 252.121 1590.89 165.65
# formula_aggregate(d) 100 433.56 62.204 390.83 42.50
# including_3key_datatable(d3) 100 7.00 1.004 6.02 0.98
# joran_ddply(d) 100 173.39 24.877 119.35 53.95
# joshulrich_aggregate(d) 100 328.51 47.132 307.14 21.22
# key_agg_datatable(d2) 100 24.62 3.532 19.13 5.50
# one_key_datatable(d2) 100 29.66 4.255 22.28 7.34
# ramnath_datatable(d2) 100 6.97 1.000 5.96 1.01
And here is a solution using Hadley Wickham's new dplyr library.
library(dplyr)
d %>% group_by(f1, f2, f3) %>%
summarise(y.mean = mean(y), z.mean = mean(z))

Resources