Errors in R function `maxLik` - r

I am trying to optimise a likelihood function using package maxLik as follows.
library(fExtremes)
theta0 <- c(0.1, 1)
theta1 <- c(0.1, 2)
set.seed(20150518)
X <- matrix(0, nrow=1000, ncol=1000)
for(i in 1:1000){
X[i, ] <- c(rgpd(900, xi=-theta0[1], beta=theta0[2]),
rgpd(100, xi=-theta1[1], beta=theta1[2]))
}
library(maxLik)
loglik <- function(param){
shape <- param[1]
scale <- param[2]
sum(dgpd(x, xi=-shape, beta=scale, log=TRUE))
}
scale.mle <- rep(0, 1000)
for(i in 1:1000){
x <- X[i, ]
scale.mle[i] <- as.numeric(maxLik(logLik=loglik, start=c(theta0[1], theta0[2]), fixed=1)$estimate[2])
}
However, I keep getting the following error message:
Error in maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, :
NA in the initial gradient
How do I fix this? Is it even possible? Also what other R function can be used to optimise this?

Related

Cannot make sense of the error while using OptimParallel in R

I'm trying to run the following function mentioned below using OptimParallel in R on a certain data set. The code is as follows:
install.packages("optimParallel")
install.packages('parallel')
library(parallel)
library(optimParallel)
library(doParallel)
library(data.table)
library(Rlab)
library(HDInterval)
library(mvtnorm)
library(matrixStats)
library(dplyr)
library(cold)
## Bolus data:
data("bolus")
d1 <- bolus
d1$group <- ifelse(d1$group == "2mg",1,0)
colnames(d1) <- c("index",'group',"time","y")
d2 <- d1 %>% select(index, y, group, time)
colnames(d2) <- c('index','y','x1','x2') ### Final data
## Modification of the objective function:
## Another approach:
dpd_poi <- function(x,fixed = c(rep(FALSE,5))){
params <- fixed
dpd_1 <- function(p){
params[!fixed] <- p
alpha <- params[1]
beta_0 <- params[2]
beta_1 <- params[3]
beta_2 <- params[4]
rho <- params[5]
add_pi <- function(d){
k <- beta_0+(d[3]*beta_1)+(d[4]*beta_2)
k1 <- exp(k) ## for Poisson regression
d <- cbind(d,k1)
}
dat_split <- split(x , f = x$index)
result <- lapply(dat_split, add_pi)
result <- rbindlist(result)
result <- as.data.frame(result)
colnames(result) <- c('index','y','x1','x2','lamb')
result_split <- split(result, f = result$index)
expression <- function(d){
bin <- as.data.frame(combn(d$y , 2))
pr <- as.data.frame(combn(d$lamb , 2))
## Evaluation of the probabilities:
f_jk <- function(u,v){
dummy_func <- function(x,y){
ppois(x, lambda = y)
}
dummy_func_1 <- function(x,y){
ppois(x-1, lambda = y)
}
k <- mapply(dummy_func,u,v)
k_1 <- mapply(dummy_func_1,u,v)
inv1 <- inverseCDF(as.matrix(k), pnorm)
inv2 <- inverseCDF(as.matrix(k_1), pnorm)
mean <- rep(0,2)
lower <- inv2
upper <- inv1
corr <- diag(2)
corr[lower.tri(corr)] <- rho
corr[upper.tri(corr)] <- rho
prob <- pmvnorm(lower = lower, upper = upper, mean = mean, corr = corr)
prob <- (1+(1/alpha))*(prob^alpha)
## First expression: (changes for Poisson regression)
lam <- as.vector(t(v))
v1 <- rpois(1000, lambda = lam[1])
v2 <- rpois(1000, lambda = lam[2])
all_possib <- as.data.frame(rbind(v1,v2))
new_func <- function(u){
k <- mapply(dummy_func,u,v)
k_1 <- mapply(dummy_func_1,u,v)
inv1_1 <- inverseCDF(as.matrix(k), pnorm)
inv2_1 <- inverseCDF(as.matrix(k_1), pnorm)
mean1 <- rep(0,2)
lower1 <- inv2_1
upper1 <- inv1_1
corr1 <- diag(2)
corr1[lower.tri(corr1)] <- rho
corr1[upper.tri(corr1)] <- rho
prob1 <- pmvnorm(lower = lower1, upper = upper1, mean = mean1, corr = corr1)
prob1 <- prob1^(alpha)
}
val <- apply(all_possib, 2, new_func)
val_s <- mean(val) ## approximation
return(val_s - prob)
}
final_res <- mapply(f_jk, bin, pr)
final_value <- sum(final_res)
}
u <- sapply(result_split,expression)
return(sum(u))
}
}
## run the objective function:
cl <- makeCluster(25)
setDefaultCluster(cl=cl)
clusterExport(cl,c('d2','val'))
clusterEvalQ(cl,c(library(data.table), library(Rlab),library(HDInterval),library(mvtnorm),library(matrixStats),library(dplyr),library(cold)))
val <- dpd_poi(d2, c(0.5,FALSE,FALSE,FALSE,FALSE))
optimParallel(par = c(beta_0 =1, beta_1 =0.1 ,beta_2 = 1,rho=0.2),fn = val ,method = "L-BFGS-B",lower = c(-10,-10,-10,0),upper = c(Inf,Inf,Inf,1))
stopCluster(cl)
After running for some time, it returns the following error:
checkForRemoteErrors(val)
9 nodes produced errors; first error: missing value where TRUE/FALSE needed
However, when I make a minor change in the objective function (pick 2 random numbers from rpois instead of 1000) and run the same code using optim, it converges and gives me a proper result. This is a Monte Carlo simulation and it does not make sense to draw so few Poisson variables. I have to use optimParllel, otherwise, it takes way too long to converge. I could also run this code using simulated data.
I'm unable to figure out where the issue truly lies. I truly appreciate any help in this regard.

R function writing - getting error: NaNs producedError in tsort[U + 1]only 0's may be mixed with negative subscripts

I am creating an R function that calculates a bootstrapped bias corrected and accelerated interval, (not using any pre-installed packages) My code seems to be working but am struggling actually writing the code for the lower and upper limits of the interval. Any suggestions would be helpful.
BCa <- function(stat,X,k,level=0.95,...){
if(!is.numeric(k)||k<=0){
stop("The number of bootstrap resamples 'k' must be a numeric value greater than 0")
}
t.star <- stat(X,...)
t.k <- rep(NA,k)
for(i in 1:k){
Xi <- sample(X,replace=TRUE)
t.k[i] <- stat(Xi,...)
}
z0 <- qnorm(mean(t.k<t.star))
n <- length(X)
t.minus.j <- rep(NA,n)
for(j in 1:n){
Xj <- X[-j]
t.minus.j[j]<- stat(Xj,...)
}
t.bar.minus <- mean(t.minus.j)
t.diff <- t.bar.minus - t.minus.j
a <- ((sum(t.diff^3))/(6*(t.diff^2)^3/2))
alpha <- 1-level
tsort <- sort(t.k, decreasing = FALSE)
L <- pnorm(z0 + ((z0+qnorm(alpha/2))/((1-a)*z0+qnorm(alpha/2))))
U <- qnorm(z0 + ((z0+qnorm(alpha/2))/((1-a)*z0+qnorm(alpha/2))))
if(!is.integer(L)){
L <- floor(L*(k+1))
}
if(!is.integer(U)){
U <- ceiling(U*(k+1))
}
lower.limit <- tsort[L]
upper.limit <- tsort[U+1]
return(list(t.star=t.star,ci=c(lower.limit,upper.limit)))
}

Why does this optimization algorithm in R stop after a few function evaluations?

I have a code which has been used for some paper.
After defining the function to be optimized, the author used the Nelder-Mead method to estimate the parameters needed. When I run the code, it freezes after 493 function evaluations have been used, it doesn't show any kind of error message or anything. I've been trying to find some info but I haven't been lucky. How can I modify the optim command in order to evaluate all possible combinations, and/or what is preventing the function from being optimized?
Here's the code. It's relatively long, BUT the second-to-last line (system.time(stcopfit...)) is the ONLY ONE I need to make work / fix / modify. So you can just copy&paste the code (as I said, taken from the author of the mentioned paper) and let it run, you don't have to go through the all code, just the last few lines. This is the data over which to run the optimization, i.e. a matrix of [0,1] uniform variables of dimension 2172x9.
Any help is appreciated, thanks!
Here's a screenshot in RStudio (it took around 2 minutes to arrive at 493, and then it's been stuck like this for the last 30 minutes):
Code:
#download older version of "sn" package
url <- "https://cran.r-project.org/src/contrib/Archive/sn/sn_1.0-0.tar.gz"
install.packages(url, repos=NULL, type="source")
install.packages(signal)
library(sn)
library(signal)
#1. redefine qst function
qst <- function (p, xi = 0, omega = 1, alpha = 0, nu = Inf, tol = 1e-08)
{
if (length(alpha) > 1)
stop("'alpha' must be a single value")
if (length(nu) > 1)
stop("'nu' must be a single value")
if (nu <= 0)
stop("nu must be non-negative")
if (nu == Inf)
return(qsn(p, xi, omega, alpha))
if (nu == 1)
return(qsc(p, xi, omega, alpha))
if (alpha == Inf)
return(xi + omega * sqrt(qf(p, 1, nu)))
if (alpha == -Inf)
return(xi - omega * sqrt(qf(1 - p, 1, nu)))
na <- is.na(p) | (p < 0) | (p > 1)
abs.alpha <- abs(alpha)
if (alpha < 0)
p <- (1 - p)
zero <- (p == 0)
one <- (p == 1)
x <- xa <- xb <- xc <- fa <- fb <- fc <- rep(NA, length(p))
nc <- rep(TRUE, length(p))
nc[(na | zero | one)] <- FALSE
fc[!nc] <- 0
xa[nc] <- qt(p[nc], nu)
xb[nc] <- sqrt(qf(p[nc], 1, nu))
fa[nc] <- pst(xa[nc], 0, 1, abs.alpha, nu) - p[nc]
fb[nc] <- pst(xb[nc], 0, 1, abs.alpha, nu) - p[nc]
regula.falsi <- FALSE
while (sum(nc) > 0) {
xc[nc] <- if (regula.falsi)
xb[nc] - fb[nc] * (xb[nc] - xa[nc])/(fb[nc] - fa[nc])
else (xb[nc] + xa[nc])/2
fc[nc] <- pst(xc[nc], 0, 1, abs.alpha, nu) - p[nc]
pos <- (fc[nc] > 0)
xa[nc][!pos] <- xc[nc][!pos]
fa[nc][!pos] <- fc[nc][!pos]
xb[nc][pos] <- xc[nc][pos]
fb[nc][pos] <- fc[nc][pos]
x[nc] <- xc[nc]
nc[(abs(fc) < tol)] <- FALSE
regula.falsi <- !regula.falsi
}
x <- replace(x, zero, -Inf)
x <- replace(x, one, Inf)
Sign <- function(x) sign(x)+ as.numeric(x==0)
q <- as.numeric(xi + omega * Sign(alpha)* x)
names(q) <- names(p)
return(q)
}
#2. initial parameter setting
mkParam <- function(Omega, delta, nu){
ndim <- length(delta)+1;
R <- diag(ndim);
for (i in 2:ndim){
R[i,1] <- R[1,i] <- delta[i-1];
if (i>=3){for (j in 2:(i-1)){R[i,j] <- R[j,i] <- Omega[i-1,j-1];}}
}
LTR <- t(chol(R));
Mtheta <- matrix(0, nrow=ndim, ncol=ndim);
for (i in 2:ndim){
Mtheta[i,1] <- acos(LTR[i,1]);
cumsin <- sin(Mtheta[i,1]);
if (i >=3){for (j in 2:(i-1)){
Mtheta[i,j] <- acos(LTR[i,j]/cumsin);
cumsin <- cumsin*sin(Mtheta[i,j]);}
}
}
c(Mtheta[lower.tri(Mtheta)], log(nu-2));
}
#3. from internal to original parameters
paramToExtCorr <- function(param){
ntheta <- dim*(dim+1)/2;
theta <- param[1:ntheta];
ndim <- (1+sqrt(1+8*length(theta)))/2;
LTR <- diag(ndim);
for (i in 2:ndim){
LTR[i,1] <- cos(theta[i-1]);
cumsin <- sin(theta[i-1]);
if (i >=3){for (j in 2:(i-1)){
k <- i+ndim*(j-1)-j*(j+1)/2;
LTR[i,j] <- cumsin*cos(theta[k]);
cumsin <- cumsin*sin(theta[k]);}
}
LTR[i,i] <- cumsin;
}
R <- LTR %*% t(LTR);
R;
}
#4. show estimated parameters and log likelihood
resultVec <- function(fit){
R <- paramToExtCorr(fit$par);
logLik <- -fit$value;
Omega <- R[-1, -1];
delta <- R[1, -1];
ntheta <- dim*(dim+1)/2;
nu <- exp(fit$par[ntheta+1])+2;
c(Omega[lower.tri(Omega)], delta, nu, logLik);
}
#5. negative log likelihood for multivariate skew-t copula
stcopn11 <- function(param){
N <- nrow(udat);
mpoints <- 150;
npar <- length(param);
nu <- exp(param[npar])+2;
R <- paramToExtCorr(param);
Omega <- R[-1, -1];
delta <- R[1, -1];
zeta <- delta/sqrt(1-delta*delta);
iOmega <- solve(Omega);
alpha <- iOmega %*% delta / sqrt(1-(t(delta) %*% iOmega %*% delta)[1,1]);
ix <- matrix(0, nrow=N, ncol=dim);
lm <- matrix(0, nrow=N, ncol=dim);
for (j in 1:dim){
minx <- qst(min(udat[,j]), alpha=zeta[j], nu=nu);
maxx <- qst(max(udat[,j]), alpha=zeta[j], nu=nu);
xx <- seq(minx, maxx, length=mpoints);
px <- sort(pst(xx, alpha=zeta[j], nu=nu));
ix[,j] <- pchip(px, xx, udat[,j]);
lm[,j] <- dst(ix[,j], alpha=zeta[j], nu=nu, log=TRUE);
}
lc <- dmst(ix, Omega=Omega, alpha=alpha, nu=nu, log=TRUE);
-sum(lc)+sum(lm)
}
#6. sample setting
dim <- 9;
smdelta <- c(-0.36,-0.33,-0.48,-0.36,-0.33,-0.48,-0.36,-0.33,-0.48);
smdf <- 5;
smOmega <- cor(udat);
smzeta <- smdelta/sqrt(1-smdelta*smdelta);
iOmega <- solve(smOmega);
smalpha <- iOmega %*% smdelta /sqrt(1-(t(smdelta) %*% iOmega %*% smdelta)[1,1]);
#7. estimation
iniPar <- mkParam(diag(dim),numeric(dim),6);
system.time(stcopfit<-optim(iniPar,stcopn11,control=list(reltol=1e-8,trace=6)));
resultVec(stcopfit);
The parameters you arrive at by step 493 lead to an infinite loop in your qst function: not having any idea what this very complex code is actually doing, I'm afraid I can't diagnose further. Here's what I did to get that far:
I stated cur.params <- NULL in the global environment, then put cur.params <<- params within stcopn11; this saves the current set of parameters to the global environment, so that when you break out of the optim() call manually (via Control-C or ESC depending on your platform) you can inspect the current set of parameters, and restart from them easily
I put in old-school debugging statements (e.g. cat("entering stcopn11\n") and cat("leaving stcopn11\n") at the beginning and at the next-to-last line of the objective function, a few within stopc11 to indicate progress markers within)
once I had the "bad" parameters I used debug(stcopn11) and stcopn11(cur.param) to step through the function
I discovered that it was hanging on dimension 3 (j==3 in the for loop within stcopn11) and particularly on the first qst() call
I added a maxit=1e5 argument to qst; initialized it <- 1 before the while loop; set it <- it+1 each time through the loop; changed the stopping criterion to while (sum(nc) > 0 && it<maxit); and added if (it==maxit) stop("hit max number of iterations in qst") right after the loop
1e5 iterations in qst took 74 seconds; I have no idea whether it might stop eventually, but didn't want to wait to find out.
This was my modified version of stcopn11:
cur.param <- NULL ## set parameter placeholder
##5. negative log likelihood for multivariate skew-t copula
stcopn11 <- function(param,debug=FALSE) {
cat("stcopn11\n")
cur.param <<- param ## record current params outside function
N <- nrow(udat)
mpoints <- 150
npar <- length(param)
nu <- exp(param[npar])+2
R <- paramToExtCorr(param)
Omega <- R[-1, -1]
delta <- R[1, -1]
zeta <- delta/sqrt(1-delta*delta)
cat("... solving iOmega")
iOmega <- solve(Omega)
alpha <- iOmega %*% delta /
sqrt(1-(t(delta) %*% iOmega %*% delta)[1,1])
ix <- matrix(0, nrow=N, ncol=dim)
lm <- matrix(0, nrow=N, ncol=dim)
cat("... entering dim loop\n")
for (j in 1:dim){
if (debug) cat(j,"\n")
minx <- qst(min(udat[,j]), alpha=zeta[j], nu=nu)
maxx <- qst(max(udat[,j]), alpha=zeta[j], nu=nu)
xx <- seq(minx, maxx, length=mpoints)
px <- sort(pst(xx, alpha=zeta[j], nu=nu))
ix[,j] <- pchip(px, xx, udat[,j])
lm[,j] <- dst(ix[,j], alpha=zeta[j], nu=nu, log=TRUE)
}
lc <- dmst(ix, Omega=Omega, alpha=alpha, nu=nu, log=TRUE)
cat("leaving stcopn11\n")
-sum(lc)+sum(lm)
}

Speeding up matrix calculations

I have this matrix calculations in my code that are taking a long time to run. So far the only way I can think of to speed is up is to use a foreach instead of a for loop, but I feel like there's more that can be done. Is there some way of vectorizing things or using an alternative to for loop that I'm missing out on?
Thanks!
require(foreach)
require(mvtnorm)
# some dummy input values
omega.input.jP <- matrix(rnorm(3000*5, 0.1, 0.1), 3000, 5)
nsteps.obs <- ncol(omega.input.jP)
sigma.j <- rnorm(3000, 0.02, 0.05)
rho1.j <- rnorm(3000, 0.8, 0.1)
rho2.j <- rnorm(3000, 0.05, 0.1)
y.lastobs <- 0.3
mu.input.jP <- matrix(NA, nrow(omega.input.jP), ncol(omega.input.jP))
# note: j is an index denoting sample number (here there are 3000 samples in total, and P denotes the time step (5 time steps here)
mu.input.jP <- foreach (j = 1:nrow(mu.input.jP), .combine = "rbind") %do% {
omega <- omega.input.jP[j, ]
Sigma.mu <- GetSigmaMu(nsteps = nsteps.obs, sigma_ar = sigma.j[j], rho1 = rho1.j[j], rho2 = rho2.j[j])
mu.input.P <- GetConditionalMu(omega = omega, Sigma.mu = Sigma.mu, y = y.lastobs)
return(mu.input.P)
}
GetSigmaMu <- function( # Get Sigma.mu, a \code{nsteps} x \code{nsteps} matrix, for AR(2) process
nsteps,
sigma_ar,
rho1,
rho2
) {
rho <- c(rho1, rho2)
cor <- ARMAacf(ar = rho, pacf = FALSE, lag.max = nsteps) # phi's, first element is phi0 = 1
var <- sigma_ar^2/(1 - sum(rho*cor[2:3])) # stationary variance # cor[2:3] gives first two phi's; cor[1] gives phi0 = 1 # change JR, 20140304
cov <- cor*var
Sigma.mu <- matrix(NA, nsteps, nsteps)
for (i in 1:nsteps) {
for (k in 1:nsteps) {
Sigma.mu[i,k] <- cov[abs(i-k)+1]
}
}
return(Sigma.mu)
}
GetConditionalMu <- function( # Get values of mu given y
omega,
Sigma.mu,
y,
method = "svd" # Method to get eigenvalues in matrix. Default method does not work, "svd" used instead.
) {
nsteps <- length(omega)
one <- rep(1, nsteps)
mean.mu.cond <- c(omega + (1/(sum(Sigma.mu)))*(Sigma.mu %*% one)*c(nsteps*y - t(one) %*% omega))
Sigma.mu.cond <- Sigma.mu - (1/(sum(Sigma.mu)))*(Sigma.mu %*% one %*% t(one) %*% Sigma.mu)
mu.cond <- rmvnorm(1, mean.mu.cond, Sigma.mu.cond, method = method)
return(mu.cond)
}

DEoptim parallel options

Hi I am trying to understand how to get DEoptim to work using parallel processing, but am struggling to get the correct parameters to be put into the function to get it to work...below is a reproducible example (it has a financial context) but it is designed for creating a random portfolio of 7 assets to optimise for ES. It was inspired by this http://mpra.ub.uni-muenchen.de/28187/1/RJwrapper.pdf and also http://files.meetup.com/1772780/20120201_Ulrich_Parallel_DEoptim.pdf
the second of which does include a parallel option but want to use the unix forking rather than the SOCK clusters.
require(quantmod)
require(PerformanceAnalytics)
require(DEoptim)
tickers <- c("^GSPC","^IXIC","^TNX", "DIA","USO","GLD","SLV","UNG","^VIX","F","^FTSE","GS","MS","MSFT","MCD","COKE","AAPL","GOOG","T","C","BHP","RIO","CMG")
getSymbols(tickers)
tickers <- gsub("\\^","",tickers)
x <- lapply(tickers, function(x){ClCl(get(x))})
comb <- na.omit(do.call(merge,x))
colnames(comb) <- paste0(tickers,".cc")
obj <- function(w) {
if (sum(w) == 0) { w <- w + 1e-2 }
w <- w / sum(w)
CVaR <- ES(weights = w,
method = "gaussian",
portfolio_method = "component",
mu = mu,
sigma = sigma)
tmp1 <- CVaR$ES
tmp2 <- max(CVaR$pct_contrib_ES - 0.225, 0)
out <- tmp1 + 1e3 * tmp2
}
comb1 <- comb[,sample(1:ncol(comb),7)]
no.of.assets <- ncol(comb1)
mu <- colMeans(comb1)
sigma <- cov(comb1)
## The non-parallel version
output <- DEoptim(fn = obj,lower = rep(0, no.of.assets), upper = rep(1, no.of.assets))
## The parallel version that doesn't seem to work...
output <- DEoptim(fn = obj,lower = rep(0, no.of.assets), upper = rep(1, no.of.assets), DEoptim.control(itermax=10000, trace=250, parallelType="parallel", packages=c("PerformanceAnalytics"), parVar=c("mu","sigma")))
I get the following error message
Error in missing(packages) : 'missing' can only be used for arguments

Resources