basically i want to perform diagonal averaging in R. Below is some code adapted from the simsalabim package to do the diagonal averaging. Only this is slow.
Any suggestions for vectorizing this instead of using sapply?
reconSSA <- function(S,v,group=1){
### S : matrix
### v : vector
N <- length(v)
L <- nrow(S)
K <- N-L+1
XX <- matrix(0,nrow=L,ncol=K)
IND <- row(XX)+col(XX)-1
XX <- matrix(v[row(XX)+col(XX)-1],nrow=L,ncol=K)
XX <- S[,group] %*% t(t(XX) %*% S[,group])
##Diagonal Averaging
.intFun <- function(i,x,ind) mean(x[ind==i])
RC <- sapply(1:N,.intFun,x=XX,ind=IND)
return(RC)
}
For data you could use the following
data(AirPassengers)
v <- AirPassengers
L <- 30
T <- length(v)
K <- T-L+1
x.b <- matrix(nrow=L,ncol=K)
x.b <- matrix(v[row(x.b)+col(x.b)-1],nrow=L,ncol=K)
S <- eigen(x.b %*% t(x.b))[["vectors"]]
out <- reconSSA(S, v, 1:10)
You can speed up the computation by almost 10 times with the help of a very specialized trick with rowsum:
reconSSA_1 <- function(S,v,group=1){
### S : matrix
### v : vector
N <- length(v)
L <- nrow(S)
K <- N-L+1
XX <- matrix(0,nrow=L,ncol=K)
IND <- row(XX)+col(XX)-1
XX <- matrix(v[row(XX)+col(XX)-1],nrow=L,ncol=K)
XX <- S[,group] %*% t(t(XX) %*% S[,group])
##Diagonal Averaging
SUMS <- rowsum.default(c(XX), c(IND))
counts <- if(L <= K) c(1:L, rep(L, K-L-1), L:1)
else c(1:K, rep(K, L-K-1), K:1)
c(SUMS/counts)
}
all.equal(reconSSA(S, v, 1:10), reconSSA_1(S, v, 1:10))
[1] TRUE
library(rbenchmark)
benchmark(SSA = reconSSA(S, v, 1:10),
SSA_1 = reconSSA_1(S, v, 1:10),
columns = c( "test", "elapsed", "relative"),
order = "relative")
test elapsed relative
2 SSA_1 0.23 1.0000
1 SSA 2.08 9.0435
[Update: As Joshua suggested it could be speed up even further by using the crux of the rowsum code:
reconSSA_2 <- function(S,v,group=1){
### S : matrix
### v : vector
N <- length(v)
L <- nrow(S)
K <- N-L+1
XX <- matrix(0,nrow=L,ncol=K)
IND <- c(row(XX)+col(XX)-1L)
XX <- matrix(v[row(XX)+col(XX)-1],nrow=L,ncol=K)
XX <- c(S[,group] %*% t(t(XX) %*% S[,group]))
##Diagonal Averaging
SUMS <- .Call("Rrowsum_matrix", XX, 1L, IND, 1:N,
TRUE, PACKAGE = "base")
counts <- if(L <= K) c(1:L, rep(L, K-L-1), L:1)
else c(1:K, rep(K, L-K-1), K:1)
c(SUMS/counts)
}
test elapsed relative
3 SSA_2 0.156 1.000000
2 SSA_1 0.559 3.583333
1 SSA 5.389 34.544872
A speedup of x34.5 comparing to original code!!
]
I can't get your example to produce sensible results. I think there are several errors in your function.
XX is used in sapply, but is not defined in the function
sapply works over 1:N, where N=144 in your example, but x.b only has 115 columns
reconSSA simply returns x
Regardless, I think you want:
data(AirPassengers)
x <- AirPassengers
rowMeans(embed(x,30))
UPDATE: I've re-worked and profiled the function. Most of the time is spent in mean, so it may be hard to get this much faster using R code. That said, you can 20% speedup by using sum instead.
reconSSA <- function(S,v,group=1){
N <- length(v)
L <- nrow(S)
K <- N-L+1
XX <- matrix(0,nrow=L,ncol=K)
IND <- row(XX)+col(XX)-1
XX <- matrix(v[row(XX)+col(XX)-1],nrow=L,ncol=K)
XX <- S[,group] %*% t(t(XX) %*% S[,group])
##Diagonal Averaging
.intFun <- function(i,x,ind) {
I <- ind==i
sum(x[I])/sum(I)
}
RC <- sapply(1:N,.intFun,x=XX,ind=IND)
return(RC)
}
Related
I have two for loops in R with a data around 150000 observation. I tried apply() family functions but they were slower than for loop in my case. here is my code:
where k=500 and N= 150000, x is location at each time t (for all observation) and xm is specific x with a specific coordination that I filtered here. At each time j we observe xm so we remove it from the data and fit the model with the rest of dataset. I had an if else condition here that removed it in order to make the loop faster.
It's so slow, I am so thankful for your help!
xs = 0:200
result= matrix(0, k,N )
for (j in 1: N){
for ( i in 1:k){
a <- sum(dnorm(xs[i],xm[-j],bx))
b <- sum(dnorm(xs[i],x[-ind[j]],bx))
result[i,j]<-a/b
}
}
Using dummy values ind, x, and xm, here is a solution that runs in about 10 seconds on my machine (>1000 times faster than the original code).
# start with a small N for verification
N <- 15e2L
xm <- runif(N)
x <- runif(N)
ind <- sample(N)
k <- 501L
xs <- 0:500
bx <- 2
system.time({
# proposed solution
a <- outer(xs, xm, function(x, y) dnorm(x, y, bx))
b <- outer(xs, x[ind], function(x, y) dnorm(x, y, bx))
result1 <- (rowSums(a) - a)/(rowSums(b) - b)
})
#> user system elapsed
#> 0.08 0.02 0.10
system.time({
# OP's solution
result2 <- matrix(0, k, N)
for (j in 1:N){
for (i in 1:k){
a <- sum(dnorm(xs[i], xm[-j], bx))
b <- sum(dnorm(xs[i], x[-ind[j]], bx))
result2[i,j] <- a/b
}
}
})
#> user system elapsed
#> 109.42 0.80 110.90
# check that the results are the same
all.equal(result1, result2)
#> [1] TRUE
# use a large N
N <- 15e4L
xm <- runif(N)
x <- runif(N)
ind <- sample(N)
system.time({
a <- outer(xs, xm, function(x, y) dnorm(x, y, bx))
b <- outer(xs, x[ind], function(x, y) dnorm(x, y, bx))
result1 <- (rowSums(a) - a)/(rowSums(b) - b)
})
#> user system elapsed
#> 8.62 1.10 9.73
I don't know how to extract each value of estquant from this loop. What code should I add at the end that gives me all the values instead of just one!
p <- 0.5
m <- 2
d1 <- as.matrix(d);d1
for (i in 1:m){
Xj <- d1[,i]
nj <- length(Xj)
Fj <- pbeta(Fx,i,nj+1-i)
a <- pbeta(p,i,nj+1-i)
estFj <- knots(ecdf(Xj))
estquant <- min(estFj[estFj >= a])
}
You want estquant to be a vector of length m.
So:
p <- 0.5
m <- 2
d1 <- as.matrix(d);d1
estquant <- numeric(m)
for (i in 1:m){
Xj <- d1[,i]
nj <- length(Xj)
Fj <- pbeta(Fx,i,nj+1-i)
a <- pbeta(p,i,nj+1-i)
estFj <- knots(ecdf(Xj))
estquant[i] <- min(estFj[estFj >= a])
}
estquant
(It's important to predefine an object when assigning values to it 1-by-1 in a loop, otherwise R has to redefine the object for every iteration and that is time-consuming.)
I have a numeric data.frame df with 134946 rows x 1938 columns.
99.82% of the data are NA.
For each pair of (distinct) columns "P1" and "P2", I need to find which rows have non-NA values for both and then do some operations on those rows (linear model).
I wrote a script that does this, but it seems quite slow.
This post seems to discuss a related task, but I can't immediately see if or how it can be adapted to my case.
Borrowing the example from that post:
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow=nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.9)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
My script is:
tic = proc.time()
out <- do.call(rbind,sapply(1:(N_ps-1), function(i) {
if (i/10 == floor(i/10)) {
cat("\ni = ",i,"\n")
toc = proc.time();
show(toc-tic);
}
do.call(rbind,sapply((i+1):N_ps, function(j) {
w <- which(complete.cases(df[,i],df[,j]))
N <- length(w)
if (N >= 5) {
xw <- df[w,i]
yw <- df[w,j]
if ((diff(range(xw)) != 0) & (diff(range(yw)) != 0)) {
s <- summary(lm(yw~xw))
o <- c(i,j,N,s$adj.r.squared,s$coefficients[2],s$coefficients[4],s$coefficients[8],s$coefficients[1],s$coefficients[3],s$coefficients[7])} else {
o <- c(i,j,N,rep(NA,7))
}
} else {o <- NULL}
return(o)
},simplify=F))
}
,simplify=F))
toc = proc.time();
show(toc-tic);
This takes about 10 minutes on my machine.
You can imagine what happens when I need to handle a much larger (although more sparse) data matrix. I never managed to finish the calculation.
Question: do you think this could be done more efficiently?
The thing is I don't know which operations take more time (subsetting of df, in which case I would remove duplications of that? appending matrix data, in which case I would create a flat vector and then convert it to matrix at the end? ...).
Thanks!
EDIT following up from minem's post
As shown by minem, the speed of this calculation strongly depended on the way linear regression parameters were calculated. Therefore changing that part was the single most important thing to do.
My own further trials showed that: 1) it was essential to use sapply in combination with do.call(rbind, rather than any flat vector, to store the data (I am still not sure why - I might make a separate post about this); 2) on the original matrix I am working on, much more sparse and with a much larger nrows/ncolumns ratio than the one in this example, using the information on the x vector available at the start of each i iteration to reduce the y vector at the start of each j iteration increased the speed by several orders of magnitude, even compared with minem's original script, which was already much better than mine above.
I suppose the advantage comes from filtering out many rows a priori, thus avoiding costly xna & yna operations on very long vectors.
The modified script is the following:
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow = nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.90)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
tic = proc.time()
naIds <- lapply(df, function(x) !is.na(x))
dl <- as.list(df)
rl <- sapply(1:(N_ps - 1), function(i) {
if ((i-1)/10 == floor((i-1)/10)) {
cat("\ni = ",i,"\n")
toc = proc.time();
show(toc-tic);
}
x <- dl[[i]]
xna <- which(naIds[[i]])
rl2 <- sapply((i + 1):N_ps, function(j) {
y <- dl[[j]][xna]
yna <- which(naIds[[j]][xna])
w <- xna[yna]
N <- length(w)
if (N >= 5) {
xw <- x[w]
yw <- y[yna]
if ((min(xw) != max(xw)) && (min(yw) != max(yw))) {
# extracts from lm/lm.fit/summary.lm functions
X <- cbind(1L, xw)
m <- .lm.fit(X, yw)
# calculate adj.r.squared
fitted <- yw - m$residuals
rss <- sum(m$residuals^2)
mss <- sum((fitted - mean(fitted))^2)
n <- length(m$residuals)
rdf <- n - m$rank
# rdf <- df.residual
r.squared <- mss/(mss + rss)
adj.r.squared <- 1 - (1 - r.squared) * ((n - 1L)/rdf)
# calculate se & pvals
p1 <- 1L:m$rank
Qr <- m$qr
R <- chol2inv(Qr[p1, p1, drop = FALSE])
resvar <- rss/rdf
se <- sqrt(diag(R) * resvar)
est <- m$coefficients[m$pivot[p1]]
tval <- est/se
pvals <- 2 * pt(abs(tval), rdf, lower.tail = FALSE)
res <- c(m$coefficients[2], se[2], pvals[2],
m$coefficients[1], se[1], pvals[1])
o <- c(i, j, N, adj.r.squared, res)
} else {
o <- c(i,j,N,rep(NA,7))
}
} else {o <- NULL}
return(o)
}, simplify = F)
do.call(rbind, rl2)
}, simplify = F)
out2 <- do.call(rbind, rl)
toc = proc.time();
show(toc - tic)
E.g. try with nr=100000; nc=100.
I should probably mention that I tried using indices, i.e.:
naIds <- lapply(df, function(x) which(!is.na(x)))
and then obviously generating w by intersection:
w <- intersect(xna,yna)
N <- length(w)
This however is slower than the above.
Larges bottleneck is lm function, because there are lot of checks & additional calculations, that you do not necessarily need. So I extracted only the needed parts.
I got this to run in +/- 18 seconds.
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow = nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.9)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
tic = proc.time()
naIds <- lapply(df, function(x) !is.na(x)) # outside loop
dl <- as.list(df) # sub-setting list elements is faster that columns
rl <- sapply(1:(N_ps - 1), function(i) {
x <- dl[[i]]
xna <- naIds[[i]] # relevant logical vector if not empty elements
rl2 <- sapply((i + 1):N_ps, function(j) {
y <- dl[[j]]
yna <- naIds[[j]]
w <- xna & yna
N <- sum(w)
if (N >= 5) {
xw <- x[w]
yw <- y[w]
if ((min(xw) != max(xw)) && (min(xw) != max(xw))) { # faster
# extracts from lm/lm.fit/summary.lm functions
X <- cbind(1L, xw)
m <- .lm.fit(X, yw)
# calculate adj.r.squared
fitted <- yw - m$residuals
rss <- sum(m$residuals^2)
mss <- sum((fitted - mean(fitted))^2)
n <- length(m$residuals)
rdf <- n - m$rank
# rdf <- df.residual
r.squared <- mss/(mss + rss)
adj.r.squared <- 1 - (1 - r.squared) * ((n - 1L)/rdf)
# calculate se & pvals
p1 <- 1L:m$rank
Qr <- m$qr
R <- chol2inv(Qr[p1, p1, drop = FALSE])
resvar <- rss/rdf
se <- sqrt(diag(R) * resvar)
est <- m$coefficients[m$pivot[p1]]
tval <- est/se
pvals <- 2 * pt(abs(tval), rdf, lower.tail = FALSE)
res <- c(m$coefficients[2], se[2], pvals[2],
m$coefficients[1], se[1], pvals[1])
o <- c(i, j, N, adj.r.squared, res)
} else {
o <- c(i,j,N,rep(NA,6))
}
} else {o <- NULL}
return(o)
}, simplify = F)
do.call(rbind, rl2)
}, simplify = F)
out2 <- do.call(rbind, rl)
toc = proc.time();
show(toc - tic);
# user system elapsed
# 17.94 0.11 18.44
I am trying to implement the Regularized Latent Semantic Indexing (RLSI) algorithm on R.
The original paper can be found here:
http://research.microsoft.com/en-us/people/hangli/sigirfp372-wang.pdf
Below is my code.
Here, I generate a matrix D from two matrices U and V. Each column of U correspond to a topic vector, and it is made to be sparse. After that, I apply RLSI on the D matrix to see if I can factorize it into two matrices, one of which has sparse vectors like U.
However, the resulting U is far from being sparse. Actually, every element of it is filled with numbers.
Is there something wrong with my code?
Thank you very much in advance.
library(magrittr)
# functions
updateU <- function(D,U,V){
S <- V %*% t(V)
R <- D %*% t(V)
for(m in 1:M){
u_m <- rep(0, K)
u_previous <- u_m
diff_u <- 100
while(diff_u > 0.1){
for(k in 1:K){
w_mk <- R[m,k] - S[k,-k] %*% U[m,-k]
in_hinge <- (abs(w_mk) - 0.5 * lambda_1)
u_m[k] <- (ifelse(in_hinge > 0, in_hinge, 0) * ifelse(w_mk >= 0, 1, -1)) / S[k,k]
}
diff_u <- sum(u_m - u_previous)
u_previous <- u_m
}
U[m,] <- u_m
}
return(U)
}
updateV <- function(D,U,V){
Sigma <- solve(t(U) %*% U + lambda_2 * diag(K))
Phi <- t(U) %*% D
V <- Sigma %*% Phi
return(V)
}
# Set constants
M <- 5000
N <- 1000
K <- 30
lambda_1 <- 1
lambda_2 <- 0.5
# Create D
originalU <- c(rpois(50000, lambda = 10), rep(0, 100000)) %>% sample(., 150000) %>% matrix(., M, K)
originalV <- rpois(30000, lambda = 5) %>% sample(., 30000) %>% matrix(., K, N)
D <- originalU %*% originalV
# Initialize U and V
V <- matrix(rpois(30000, lambda = 5), K, N)
U <- matrix(0, M, K)
# Run RLSI (iterate 100 times for now)
for(t in 1:100){
cat(t,":")
U <- updateU(D,U,V)
V <- updateV(D,U,V)
loss <- sum((D - U %*% V) ^ 2)
cat(loss, "\n")
}
I've got it. Each row in U has to be set to a zero vector each time updateU function is run.
I want set up a model for interest rate in binomial tree. The interest rate is path dependent. I want return interest rate (discount factor and payoff) at every step in all scenarios(2^N). The reason I want to return every single interest rate is that I want use the interest rate is compute discount factor. I know how to do this in a complex way. Here I want to use a double loop (or something simpler) to get the results.
w is for "0" or "1" dummy variable matrix representing all scenarios.
r is interest rate. if there is a head(1), then r1=r0+u=r0+0.005; if there is a tail(0), then r1=r0-d.
D is discount factor. D1=1/(1+r0), D2=D1/(1+r1)...
P is payoff.
In this case, period N is 10. therefore, I can compute step by step. However,if N gets larger and larger, I cannot use my method. I want a simple way to compute this. Thank you.
#Real Price
N <- 10
r0 <- 0.06
K <- 0.05
u <- 0.005
d <- 0.004
q <- 0.5
w <- expand.grid(rep(list(0:1),N))
r <- D <- P <- matrix(0,0,nrow=2^N,ncol=N)
for(i in 1:dim(w)[1])
{
r[i,1] <- r0 + u*w[i,1] - d*(1-w[i,1])
r[i,2] <- r[i,1] + u*w[i,2] - d*(1-w[i,2])
r[i,3] <- r[i,2]+u*w[i,3]-d*(1-w[i,3])
r[i,4] <- r[i,3]+u*w[i,4]-d*(1-w[i,4])
r[i,5] <- r[i,4]+u*w[i,5]-d*(1-w[i,5])
r[i,6] <- r[i,5]+u*w[i,6]-d*(1-w[i,6])
r[i,7] <- r[i,6]+u*w[i,7]-d*(1-w[i,7])
r[i,8] <- r[i,7]+u*w[i,8]-d*(1-w[i,8])
r[i,9] <- r[i,8]+u*w[i,9]-d*(1-w[i,9])
r[i,10] <- r[i,9]*+u*w[i,10]-d*(1-w[i,10])
D[i,1] <- 1/(1+r0)
D[i,2] <- D[i,1]/(1+r[i,1])
D[i,3] <- D[i,2]/(1+r[i,2])
D[i,4] <- D[i,3]/(1+r[i,3])
D[i,5] <- D[i,4]/(1+r[i,4])
D[i,6] <- D[i,5]/(1+r[i,5])
D[i,7] <- D[i,6]/(1+r[i,6])
D[i,8] <- D[i,7]/(1+r[i,7])
D[i,9] <- D[i,8]/(1+r[i,8])
D[i,10] <- D[i,9]/(1+r[i,9])
P[i,1] <- D[i,1]*pmax(K-r0,0)*(0.5^N)
P[i,2] <- D[i,2]*pmax(K-r[i,1],0)*(0.5^N)
P[i,3] <- D[i,3]*pmax(K-r[i,2],0)*(0.5^N)
P[i,4] <- D[i,4]*pmax(K-r[i,3],0)*(0.5^N)
P[i,5] <- D[i,5]*pmax(K-r[i,4],0)*(0.5^N)
P[i,6] <- D[i,6]*pmax(K-r[i,5],0)*(0.5^N)
P[i,7] <- D[i,7]*pmax(K-r[i,6],0)*(0.5^N)
P[i,8] <- D[i,8]*pmax(K-r[i,7],0)*(0.5^N)
P[i,9] <- D[i,9]*pmax(K-r[i,8],0)*(0.5^N)
P[i,10] <- D[i,10]*pmax(K-r[i,9],0)*(0.5^N)
}
true.price <- sum(P)
#> true.price
# > true.price
# [1] 0.00292045
You can just use a nested loop, looping over 2:(ncol(w)) within the i loop:
for(i in 1:nrow(w)) {
r[i, 1] <- r0 + u*w[i, 1] - d*(1-w[i, 1])
D[i, 1] <- 1/(1+r0)
P[i, 1] <- D[i, 1]*pmax(K-r0, 0)*(0.5^N)
for (j in 2:(ncol(w))) {
r[i,j] <- r[i, j-1] + u*w[i, j] - d*(1-w[i, j])
D[i,j] <- D[i, j-1]/(1+r[i, j-1])
P[i,j] <- D[i, j]*pmax(K-r[i, j-1], 0)*(0.5^N)
}
}
true.price <- sum(P)