R microbenching loop vs vector vs parallel is yielding inverted results - r

I'm having a weird issue and not able to identify the root cause. I wrote a simple for-loop, vector, and parallel core and running in the Jupyter environment.
fn_loop <- function(x1, nrep = 1000) {
n1 <- length(x1)
B <- nrep
Tboot <- rep(NA, B)
xx1 <- rep(NA, n1)
for (i in 1:B) {
xx1 <- sample(x1, n1, replace = TRUE) # sample of size n1 with replacement from x1
Tboot[i] <- mean(xx1)
}
return(Tboot)
}
fn_vec <- function(x1, nrep = 1000) {
n1 <- length(x1)
B <- nrep
Tboot <- lapply(1:nrep, function(i){ mean(sample(x1, n1, replace = TRUE))})
return(Tboot)
}
fn_par <- function(x1, nrep = 1000) {
n1 <- length(x1)
B <- nrep
Tboot <- mclapply(1:nrep, function(i){ mean(sample(x1, n1, replace = TRUE))}, mc.cores=8)
return(Tboot)
}
#Benchmark forloop, vector, parallel codes
library(microbenchmark)
m <- microbenchmark(times = 1000, # default is 100
"loop" = fn_loop(x1, 1000),
"vector" = fn_vec(x1, 1000),
"parallel" = fn_par(x1, 1000))
ggplot2::autoplot(m)
The results in the ggplot2 are inverted...loop is taking the least amount of code compared to parallel. Can someone please point my mistake. Thanks .

Related

Cannot make sense of the error while using OptimParallel in R

I'm trying to run the following function mentioned below using OptimParallel in R on a certain data set. The code is as follows:
install.packages("optimParallel")
install.packages('parallel')
library(parallel)
library(optimParallel)
library(doParallel)
library(data.table)
library(Rlab)
library(HDInterval)
library(mvtnorm)
library(matrixStats)
library(dplyr)
library(cold)
## Bolus data:
data("bolus")
d1 <- bolus
d1$group <- ifelse(d1$group == "2mg",1,0)
colnames(d1) <- c("index",'group',"time","y")
d2 <- d1 %>% select(index, y, group, time)
colnames(d2) <- c('index','y','x1','x2') ### Final data
## Modification of the objective function:
## Another approach:
dpd_poi <- function(x,fixed = c(rep(FALSE,5))){
params <- fixed
dpd_1 <- function(p){
params[!fixed] <- p
alpha <- params[1]
beta_0 <- params[2]
beta_1 <- params[3]
beta_2 <- params[4]
rho <- params[5]
add_pi <- function(d){
k <- beta_0+(d[3]*beta_1)+(d[4]*beta_2)
k1 <- exp(k) ## for Poisson regression
d <- cbind(d,k1)
}
dat_split <- split(x , f = x$index)
result <- lapply(dat_split, add_pi)
result <- rbindlist(result)
result <- as.data.frame(result)
colnames(result) <- c('index','y','x1','x2','lamb')
result_split <- split(result, f = result$index)
expression <- function(d){
bin <- as.data.frame(combn(d$y , 2))
pr <- as.data.frame(combn(d$lamb , 2))
## Evaluation of the probabilities:
f_jk <- function(u,v){
dummy_func <- function(x,y){
ppois(x, lambda = y)
}
dummy_func_1 <- function(x,y){
ppois(x-1, lambda = y)
}
k <- mapply(dummy_func,u,v)
k_1 <- mapply(dummy_func_1,u,v)
inv1 <- inverseCDF(as.matrix(k), pnorm)
inv2 <- inverseCDF(as.matrix(k_1), pnorm)
mean <- rep(0,2)
lower <- inv2
upper <- inv1
corr <- diag(2)
corr[lower.tri(corr)] <- rho
corr[upper.tri(corr)] <- rho
prob <- pmvnorm(lower = lower, upper = upper, mean = mean, corr = corr)
prob <- (1+(1/alpha))*(prob^alpha)
## First expression: (changes for Poisson regression)
lam <- as.vector(t(v))
v1 <- rpois(1000, lambda = lam[1])
v2 <- rpois(1000, lambda = lam[2])
all_possib <- as.data.frame(rbind(v1,v2))
new_func <- function(u){
k <- mapply(dummy_func,u,v)
k_1 <- mapply(dummy_func_1,u,v)
inv1_1 <- inverseCDF(as.matrix(k), pnorm)
inv2_1 <- inverseCDF(as.matrix(k_1), pnorm)
mean1 <- rep(0,2)
lower1 <- inv2_1
upper1 <- inv1_1
corr1 <- diag(2)
corr1[lower.tri(corr1)] <- rho
corr1[upper.tri(corr1)] <- rho
prob1 <- pmvnorm(lower = lower1, upper = upper1, mean = mean1, corr = corr1)
prob1 <- prob1^(alpha)
}
val <- apply(all_possib, 2, new_func)
val_s <- mean(val) ## approximation
return(val_s - prob)
}
final_res <- mapply(f_jk, bin, pr)
final_value <- sum(final_res)
}
u <- sapply(result_split,expression)
return(sum(u))
}
}
## run the objective function:
cl <- makeCluster(25)
setDefaultCluster(cl=cl)
clusterExport(cl,c('d2','val'))
clusterEvalQ(cl,c(library(data.table), library(Rlab),library(HDInterval),library(mvtnorm),library(matrixStats),library(dplyr),library(cold)))
val <- dpd_poi(d2, c(0.5,FALSE,FALSE,FALSE,FALSE))
optimParallel(par = c(beta_0 =1, beta_1 =0.1 ,beta_2 = 1,rho=0.2),fn = val ,method = "L-BFGS-B",lower = c(-10,-10,-10,0),upper = c(Inf,Inf,Inf,1))
stopCluster(cl)
After running for some time, it returns the following error:
checkForRemoteErrors(val)
9 nodes produced errors; first error: missing value where TRUE/FALSE needed
However, when I make a minor change in the objective function (pick 2 random numbers from rpois instead of 1000) and run the same code using optim, it converges and gives me a proper result. This is a Monte Carlo simulation and it does not make sense to draw so few Poisson variables. I have to use optimParllel, otherwise, it takes way too long to converge. I could also run this code using simulated data.
I'm unable to figure out where the issue truly lies. I truly appreciate any help in this regard.

Speeding up linear model fitting on complete pairwise observations in large sparse matrix in R

I have a numeric data.frame df with 134946 rows x 1938 columns.
99.82% of the data are NA.
For each pair of (distinct) columns "P1" and "P2", I need to find which rows have non-NA values for both and then do some operations on those rows (linear model).
I wrote a script that does this, but it seems quite slow.
This post seems to discuss a related task, but I can't immediately see if or how it can be adapted to my case.
Borrowing the example from that post:
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow=nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.9)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
My script is:
tic = proc.time()
out <- do.call(rbind,sapply(1:(N_ps-1), function(i) {
if (i/10 == floor(i/10)) {
cat("\ni = ",i,"\n")
toc = proc.time();
show(toc-tic);
}
do.call(rbind,sapply((i+1):N_ps, function(j) {
w <- which(complete.cases(df[,i],df[,j]))
N <- length(w)
if (N >= 5) {
xw <- df[w,i]
yw <- df[w,j]
if ((diff(range(xw)) != 0) & (diff(range(yw)) != 0)) {
s <- summary(lm(yw~xw))
o <- c(i,j,N,s$adj.r.squared,s$coefficients[2],s$coefficients[4],s$coefficients[8],s$coefficients[1],s$coefficients[3],s$coefficients[7])} else {
o <- c(i,j,N,rep(NA,7))
}
} else {o <- NULL}
return(o)
},simplify=F))
}
,simplify=F))
toc = proc.time();
show(toc-tic);
This takes about 10 minutes on my machine.
You can imagine what happens when I need to handle a much larger (although more sparse) data matrix. I never managed to finish the calculation.
Question: do you think this could be done more efficiently?
The thing is I don't know which operations take more time (subsetting of df, in which case I would remove duplications of that? appending matrix data, in which case I would create a flat vector and then convert it to matrix at the end? ...).
Thanks!
EDIT following up from minem's post
As shown by minem, the speed of this calculation strongly depended on the way linear regression parameters were calculated. Therefore changing that part was the single most important thing to do.
My own further trials showed that: 1) it was essential to use sapply in combination with do.call(rbind, rather than any flat vector, to store the data (I am still not sure why - I might make a separate post about this); 2) on the original matrix I am working on, much more sparse and with a much larger nrows/ncolumns ratio than the one in this example, using the information on the x vector available at the start of each i iteration to reduce the y vector at the start of each j iteration increased the speed by several orders of magnitude, even compared with minem's original script, which was already much better than mine above.
I suppose the advantage comes from filtering out many rows a priori, thus avoiding costly xna & yna operations on very long vectors.
The modified script is the following:
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow = nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.90)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
tic = proc.time()
naIds <- lapply(df, function(x) !is.na(x))
dl <- as.list(df)
rl <- sapply(1:(N_ps - 1), function(i) {
if ((i-1)/10 == floor((i-1)/10)) {
cat("\ni = ",i,"\n")
toc = proc.time();
show(toc-tic);
}
x <- dl[[i]]
xna <- which(naIds[[i]])
rl2 <- sapply((i + 1):N_ps, function(j) {
y <- dl[[j]][xna]
yna <- which(naIds[[j]][xna])
w <- xna[yna]
N <- length(w)
if (N >= 5) {
xw <- x[w]
yw <- y[yna]
if ((min(xw) != max(xw)) && (min(yw) != max(yw))) {
# extracts from lm/lm.fit/summary.lm functions
X <- cbind(1L, xw)
m <- .lm.fit(X, yw)
# calculate adj.r.squared
fitted <- yw - m$residuals
rss <- sum(m$residuals^2)
mss <- sum((fitted - mean(fitted))^2)
n <- length(m$residuals)
rdf <- n - m$rank
# rdf <- df.residual
r.squared <- mss/(mss + rss)
adj.r.squared <- 1 - (1 - r.squared) * ((n - 1L)/rdf)
# calculate se & pvals
p1 <- 1L:m$rank
Qr <- m$qr
R <- chol2inv(Qr[p1, p1, drop = FALSE])
resvar <- rss/rdf
se <- sqrt(diag(R) * resvar)
est <- m$coefficients[m$pivot[p1]]
tval <- est/se
pvals <- 2 * pt(abs(tval), rdf, lower.tail = FALSE)
res <- c(m$coefficients[2], se[2], pvals[2],
m$coefficients[1], se[1], pvals[1])
o <- c(i, j, N, adj.r.squared, res)
} else {
o <- c(i,j,N,rep(NA,7))
}
} else {o <- NULL}
return(o)
}, simplify = F)
do.call(rbind, rl2)
}, simplify = F)
out2 <- do.call(rbind, rl)
toc = proc.time();
show(toc - tic)
E.g. try with nr=100000; nc=100.
I should probably mention that I tried using indices, i.e.:
naIds <- lapply(df, function(x) which(!is.na(x)))
and then obviously generating w by intersection:
w <- intersect(xna,yna)
N <- length(w)
This however is slower than the above.
Larges bottleneck is lm function, because there are lot of checks & additional calculations, that you do not necessarily need. So I extracted only the needed parts.
I got this to run in +/- 18 seconds.
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow = nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.9)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
tic = proc.time()
naIds <- lapply(df, function(x) !is.na(x)) # outside loop
dl <- as.list(df) # sub-setting list elements is faster that columns
rl <- sapply(1:(N_ps - 1), function(i) {
x <- dl[[i]]
xna <- naIds[[i]] # relevant logical vector if not empty elements
rl2 <- sapply((i + 1):N_ps, function(j) {
y <- dl[[j]]
yna <- naIds[[j]]
w <- xna & yna
N <- sum(w)
if (N >= 5) {
xw <- x[w]
yw <- y[w]
if ((min(xw) != max(xw)) && (min(xw) != max(xw))) { # faster
# extracts from lm/lm.fit/summary.lm functions
X <- cbind(1L, xw)
m <- .lm.fit(X, yw)
# calculate adj.r.squared
fitted <- yw - m$residuals
rss <- sum(m$residuals^2)
mss <- sum((fitted - mean(fitted))^2)
n <- length(m$residuals)
rdf <- n - m$rank
# rdf <- df.residual
r.squared <- mss/(mss + rss)
adj.r.squared <- 1 - (1 - r.squared) * ((n - 1L)/rdf)
# calculate se & pvals
p1 <- 1L:m$rank
Qr <- m$qr
R <- chol2inv(Qr[p1, p1, drop = FALSE])
resvar <- rss/rdf
se <- sqrt(diag(R) * resvar)
est <- m$coefficients[m$pivot[p1]]
tval <- est/se
pvals <- 2 * pt(abs(tval), rdf, lower.tail = FALSE)
res <- c(m$coefficients[2], se[2], pvals[2],
m$coefficients[1], se[1], pvals[1])
o <- c(i, j, N, adj.r.squared, res)
} else {
o <- c(i,j,N,rep(NA,6))
}
} else {o <- NULL}
return(o)
}, simplify = F)
do.call(rbind, rl2)
}, simplify = F)
out2 <- do.call(rbind, rl)
toc = proc.time();
show(toc - tic);
# user system elapsed
# 17.94 0.11 18.44

Script "tennis common opponent" in R

I'm follow this article about "common opponent in tennis", my goal is to script it in the most effecient way. Below you can find my code but is so slow. For calculate the result of 1 match my laptop spent 120seconds more or less, and I have a dataset of 150k of rows to calculate.
the article: https://core.ac.uk/download/pdf/82518495.pdf
Need your help to clean and improve my code. Any suggestion is appreciate
tableA: https://1drv.ms/u/s!At-ZKKnf0H4jafxCX96NLxu00nc
tableB: https://1drv.ms/u/s!At-ZKKnf0H4javHgoPjzfCMXhg4
data_tennis_co: https://1drv.ms/u/s!At-ZKKnf0H4jaJyNkYrr8muff8k
data_tennis_co = read.table("test_co.csv", header=FALSE, sep=",", fill = TRUE)
A = read.table("tableA.csv", header=FALSE, sep=",", fill = TRUE)
B = read.table("tableB.csv", header=FALSE, sep=",", fill = TRUE)
#BASIC FUNCTIONS
G<-function(p){res<- p^4*(15-4*p-((10*p^2)/(1-2*p*(1-p))))}
d<- function(p,q) {res<- p*q*(1-(p*(1-q)+(1-p)*q))^-1}
TB <- function(p,q) {res <- foreach(i = seq_along(1:28), .combine = sum) %dopar% {tb<-A[i,1]*p^A[i,2]*(1-p)^A[i,3]*q^A[i,4]*(1-q)^A[i,5]*d(p,q)^A[i,6]}}
S <- function(p,q) {res <- foreach(i = seq_along(1:21), .combine = rbind) %dopar% {s<-B[i,1]*G(p)^B[i,2]*(1-G(p))^B[i,3]*G(q)^B[i,4]*(1-G(q))^B[i,5]*(G(p)*G(q)+(G(p)*(1-G(q))+(1-G(p))*G(q))*TB(p,q))^B[i,6]} sum(res)}
M3 <- function(p,q) {res <- S(p,q)^2*(1+2*(1-S(p,q)))}
DELTA_AB <- function(spwAC,rpwAC,spwBC,rpwBC) {res <- (spwAC-(1-rpwAC))-(spwBC-(1-rpwBC))}
PR<- function(spwAC,rpwAC,spwBC,rpwBC) {res <- (M3(0.6+DELTA_AB(spwAC,rpwAC,spwBC,rpwBC),(1-0.6))+M3(0.6,(1-(0.6-DELTA_AB(spwAC,rpwAC,spwBC,rpwBC)))))/2}
#COMMON OPPONENTS
MAL<-function(id1,id2){
prova<- subset(data_tennis_co, V3 == 1 & V4==2)
previous<-subset(data_tennis_co, V2 < prova$V2)
s1 <- subset(previous, V3 == 1 | V4==1)
s2 <- subset(previous, V3 ==2 | V4==2)
s1$opp <- ifelse(s1$V3==1, s1$V4, s1$V3)
s2$opp <- ifelse(s2$V3==2, s2$V4, s2$V3)
inn<- intersect(s1$opp,s2$opp)
common1<-s1[s1$opp %in% inn,]
common2<-s2[s2$opp %in% inn,]
# fare media se id non unico
COM <- merge(common1, common2,by=c("opp"))
COM$OMALLEY <- unlist(mapply(PR, COM$V5.x, COM$V6.x, COM$V7.y, COM$V8.y))
COM$OMALLEY[is.nan(COM$OMALLEY)] <- 0.5
return(tryCatch(sum(COM$OMALLEY)/nrow(COM), error=function(e) NaN))
}
tic()
RESA<-MAL(1,2)
toc()
the main bottleneck in the code is the use of parallel loops in TB and S, for operations that can be done faster using vectorized R functions.
G <- function(p) p^4*(15-4*p-((10*p^2)/(1-2*p*(1-p))))
d <- function(p, q) p*q*(1-(p*(1-q)+(1-p)*q))^-1
TB <- function(p, q) sum(A[,1] * p^A[,2] * (1-p)^A[,3] *
q^A[,4] * (1-q)^A[,5] * d(p,q)^A[,6])
S <- function(p, q) {
Gp <- G(p)
Gq <- G(q)
sum(B[,1] * Gp^B[,2] * (1-Gp)^B[,3] * Gq^B[,4] * (1-Gq)^B[,5] *
(Gp*Gq+(Gp*(1-Gq)+(1-Gp)*Gq)*TB(p,q))^B[,6])
}
M3 <- function(p, q) {
s <- S(p,q)
s^2*(1+2*(1-s))
}
DELTA_AB <- function(spwAC,rpwAC,spwBC,rpwBC) (spwAC-(1-rpwAC)) -
(spwBC-(1-rpwBC))
PR <- function(spwAC,rpwAC,spwBC,rpwBC) {
D <- DELTA_AB(spwAC, rpwAC, spwBC, rpwBC)
(M3(p = 0.6 + D, q = (1 - 0.6)) +
M3(p = 0.6, q = 1 - (0.6 - D))) / 2
}
Solution here:
https://codereview.stackexchange.com/questions/194301/tenis-common-opponents-r

View selected sample for each replication in bootstrap loop

Assume following simple bootstrap procedure:
x <- c(20,54,18,65,87,49,45,94,22,15,16,15,84,55,44,13,16,65,48,98,74,56,97,11,25,43,32,74,45,19,56,874,3,56,89,12,28,71,93)
n <- length(x)
nBoot <- 5; mn <- numeric(nBoot)
for(boots in 1:nBoot){
set.seed(830219+boots)
repl <- sample(x,n,replace=TRUE)
mn[boots] <- mean(repl)
}
Is there a way that I can view the resampled dataset 'repl' for each of the 5 replications?
I would appreciate an answer very much. Many thanks in advance
EDIT
I tried the following:
x <- c(20,54,18,65,87,49,45,94,22,15,16,15,84,55,44,13,16,65,48,98,74,56,97,11,25,43,32,74,45, 19,56,874,3,56,89,12,28,71,93)
n <- length(x)
nBoot <- 5; mn <- numeric(nBoot)
for(boots in 1:nBoot){
set.seed(830219+boots)
repl <- sample(x,n,replace=TRUE)
print(repl)
mn[boots] <- mean(repl)
}
This allows me to view each of the 5 resampled datasets, but does not allow me to work with each dataset seperately as repl[1], repl[2],...
EDIT2
I tried following:
x <- c(20,54,18,65,87,49,45,94,22,15,16,15,84,55,44,13,16,65,48,98,74,56,97,11,25,43,32,74,45,19,56,874,3,56,89,12,28,71,93)
n <- length(x)
nBoot <-3; mn <- numeric(nBoot); repl <- x
for(boots in 1:nBoot){
set.seed(830219+boots)
repl[boots] <- sample(x, n, replace=TRUE)
pr <- print(repl)
mn[boots] <- mean(repl)
}
I then however get 5 warning messages: 'In repl[boots] <- sample(x, n, replace = TRUE) :
number of items to replace is not a multiple of replacement length'
and calling repl[1] only gives me one number
Based on your comments, I've fixed the code. Here's the version that I tested and it seems to work:
x <- c(20,54,18,65,87,49,45,94,22,15,16,15,84,55,44,13,16,65,48,98,74,56,97,11,25,43,32,74,45,19,56,874,3,56,89,12,28,71,93)
n <- length(x)
nBoot <-3; mn <- numeric(nBoot)
repl <- matrix(x, nrow=nBoot, ncol=length(x))
for (boots in 1:nBoot) {
set.seed(830219+boots)
repl[boots, ] <- sample(x, n, replace=TRUE)
pr <- print(repl)
mn[boots] <- mean(repl)
}

Speeding up matrix calculations

I have this matrix calculations in my code that are taking a long time to run. So far the only way I can think of to speed is up is to use a foreach instead of a for loop, but I feel like there's more that can be done. Is there some way of vectorizing things or using an alternative to for loop that I'm missing out on?
Thanks!
require(foreach)
require(mvtnorm)
# some dummy input values
omega.input.jP <- matrix(rnorm(3000*5, 0.1, 0.1), 3000, 5)
nsteps.obs <- ncol(omega.input.jP)
sigma.j <- rnorm(3000, 0.02, 0.05)
rho1.j <- rnorm(3000, 0.8, 0.1)
rho2.j <- rnorm(3000, 0.05, 0.1)
y.lastobs <- 0.3
mu.input.jP <- matrix(NA, nrow(omega.input.jP), ncol(omega.input.jP))
# note: j is an index denoting sample number (here there are 3000 samples in total, and P denotes the time step (5 time steps here)
mu.input.jP <- foreach (j = 1:nrow(mu.input.jP), .combine = "rbind") %do% {
omega <- omega.input.jP[j, ]
Sigma.mu <- GetSigmaMu(nsteps = nsteps.obs, sigma_ar = sigma.j[j], rho1 = rho1.j[j], rho2 = rho2.j[j])
mu.input.P <- GetConditionalMu(omega = omega, Sigma.mu = Sigma.mu, y = y.lastobs)
return(mu.input.P)
}
GetSigmaMu <- function( # Get Sigma.mu, a \code{nsteps} x \code{nsteps} matrix, for AR(2) process
nsteps,
sigma_ar,
rho1,
rho2
) {
rho <- c(rho1, rho2)
cor <- ARMAacf(ar = rho, pacf = FALSE, lag.max = nsteps) # phi's, first element is phi0 = 1
var <- sigma_ar^2/(1 - sum(rho*cor[2:3])) # stationary variance # cor[2:3] gives first two phi's; cor[1] gives phi0 = 1 # change JR, 20140304
cov <- cor*var
Sigma.mu <- matrix(NA, nsteps, nsteps)
for (i in 1:nsteps) {
for (k in 1:nsteps) {
Sigma.mu[i,k] <- cov[abs(i-k)+1]
}
}
return(Sigma.mu)
}
GetConditionalMu <- function( # Get values of mu given y
omega,
Sigma.mu,
y,
method = "svd" # Method to get eigenvalues in matrix. Default method does not work, "svd" used instead.
) {
nsteps <- length(omega)
one <- rep(1, nsteps)
mean.mu.cond <- c(omega + (1/(sum(Sigma.mu)))*(Sigma.mu %*% one)*c(nsteps*y - t(one) %*% omega))
Sigma.mu.cond <- Sigma.mu - (1/(sum(Sigma.mu)))*(Sigma.mu %*% one %*% t(one) %*% Sigma.mu)
mu.cond <- rmvnorm(1, mean.mu.cond, Sigma.mu.cond, method = method)
return(mu.cond)
}

Resources