I am trying to write a function to calculate a gradient in R. The function must specifically do so using a for loop. I am writing this function in order to illustrate how a for loop is less efficient than vectorized programming when trying to calculate the gradient.
The function takes in a design matrix X and a vector of coefficients beta in order to calculate the gradient of a cost function (The design matrix is a matrix of covariates with ones on the first column.)
The cost function is the MSE,
. I am calculating the gradient using an analytical solution, taking the partial derivative of the loss function. This gives us the partial derivative as ,
for the first coefficient $\beta_{0}$ and then similarly for all other coefficients $\beta_{j}$,
I managed to calculate the answer (implementing the above) using vectorized programming as the line of code below.
-2*t(X)%*%(y-X%*%beta)
My attempt at using a for loop, does not work but it looked like this,
# Initialize the matrix to hold gradient
gradient = matrix(0, nrow = nrow(beta))
for(i in 1:nrow(beta)){
if(i == 1){
gradient[i] = -2*sum(y - X%*%beta) # first value
}else if(i>1){
gradient[i] = -2*sum( t(X)%*%(y - X%*%beta) ) * apply(X[,-1],2,sum)[i-1]
} }
Below is the code to generate data to use and the two implementations that I tried in R. The code generates the data and can be used to test fixing the for loop if copied into R.
# Values
# Random data Generated
n = 4
p = 3
beta_0 = 2
beta = matrix(c(beta_0,3,1,4), nrow= (p+1) ) # coefficients
X = runif(n=(n*p), min=-5,max= 5) # covariates
X = matrix(X, nrow = n, ncol = p)
X = cbind(1, X) # make a design matrix
y = apply(X[,-1],1,mean) # Response (Some function) #
# Print all to show all initial values
print(list("Initial Values:"="","n:"=n," p:" = p, "beta_0:" = beta_0," beta:"=beta,
" X:"=X," y:" = y))
# Function 1
# Find the gradient (using a for loop)
# The partial derivative of the loss function
df_ols = function(beta,X,y){
begin.time <- proc.time()
# Initialize the matrix to hold gradient
gradient = matrix(0, nrow = nrow(beta))
for(i in 1:nrow(beta)){
if(i == 1){
gradient[i] = -2*sum(y - X%*%beta) # first value
}else if(i>1){
gradient[i] = -2*sum( t(X)%*%(y - X%*%beta) ) * apply(X[,-1],2,sum)[i-1]
} }
end.time <- proc.time()
time <- (end.time-begin.time)[3]
print(list("gradient 1"=gradient,"time"=time))
}
df_ols(beta,X,y)
# Function 2
# Find the gradient Approach 2 using vectorized programming
gradient_3 <- function(X, beta){
begin.time <- proc.time()
# Finding the gradient
grad_3 <- -2*t(X)%*%(y-X%*%beta)
grad_3 <- matrix(grad_3, ncol = 1,nrow = ncol(X)) # Turn into a column matrix
end.time <- proc.time()
time <- (end.time-begin.time)[3]
print(list("gradient 3"= grad_3 ,"time"=time))
}
gradient_3(X, beta) # Assumed Correct
I apologize if I was not too wordy. Any help would be appreciated.
I managed to get the for loop to work, below you'll see the answer.
# Find the gradient (using a for loop) explicit, element-wise formulations
# The partial derivative of the loss function
df_ols = function(beta,X,y){
begin.time <- proc.time()
# Initialize the matrix to hold gradient
gradient = matrix(0, nrow = nrow(beta))
for(i in 1:nrow(beta)){
if(i == 1){
gradient[i] = -2*sum(y - X%*%beta) # first value
}else if(i>1){
gradient[i] = -2*t(X[,i])%*%(y-X%*%beta)
#gradient[i] = -2*sum( t(X)%*%(y - X%*%beta) ) * apply(X[,-1],2,sum)[i-1]
} }
end.time <- proc.time()
time <- (end.time-begin.time)[3]
print(list("gradient 1"=gradient,"time"=time))
}
df_ols(beta,X,y)
, running both the vectorized and elementwise formations we find the vectorized process is much faster. We implement the full process below.
# Random data Generated
n = 100000*12
p = 3
beta_0 = 2
beta = matrix(c(beta_0,3,1,4), nrow= (p+1) ) # coefficients
X = runif(n=(n*p), min=-5,max= 5) # covariates
X = matrix(X, nrow = n, ncol = p)
X = cbind(1, X) # make a design matrix
y = apply(X[,-1],1,mean) # Response (Some function)
# Print parameters. To show all initial values
print(list("Initial Values:" = '',"n:"=n," p:" = p, "beta_0:" = beta_0," beta:"=beta,
" X:"=round(X,digits=2)," y:" = round(y,digits=2)))
# Function 3
# Find the gradient Approach 3, using vectorized programming
gradient_3 <- function(X, beta,y){
begin.time <- proc.time()
# Find the partial derivative of the rest (j'th)
grad_3 <- -2*t(X)%*%(y-X%*%beta)
grad_3 <- matrix(grad_3, ncol = 1,nrow = ncol(X)) # Turn into a column matrix
end.time <- proc.time()
time <- (end.time-begin.time)[3]
print(list("gradient 3"= grad_3 ,"time"=time))
}
Showing the results
> df_ols(beta,X,y) # Elementwise
$`gradient 1`
[,1]
[1,] 4804829
[2,] 53311879
[3,] 13471077
[4,] 73259191
$time
elapsed
3.59
> gradient_3(X, beta,y) # Vectorized Programming solution
$`gradient 3`
[,1]
[1,] 4804829
[2,] 53311879
[3,] 13471077
[4,] 73259191
$time
elapsed
0.89
Related
I would like to compute the Area Under the Curve defined by a set of experimental values. I created a function to calculate an aproximation of the AUC using the Simpson's rule as I saw in this post. However, the function only works when it receives a vector of odd length. How can I modify the code to add the area of the last trapezoid when the input vector has an even length.
AUC <- function(x, h=1){
# AUC function computes the Area Under the Curve of a time serie using
# the Simpson's Rule (numerical method).
# https://link.springer.com/chapter/10.1007/978-1-4612-4974-0_26
# Arguments
# x: (vector) time serie values
# h: (int) temporal resolution of the time serie. default h=1
n = length(x)-1
xValues = seq(from=1, to=n, by=2)
sum <- list()
for(i in 1:length(xValues)){
n_sub <- xValues[[i]]-1
n <- xValues[[i]]
n_add <- xValues[[i]]+1
v1 <- x[[n_sub+1]]
v2 <- x[[n+1]]
v3 <- x[[n_add+1]]
s <- (h/3)*(v1+4*v2+v3)
sum <- append(sum, s)
}
sum <- unlist(sum)
auc <- sum(sum)
return(auc)
}
Here a data example:
smoothed = c(0.3,0.317,0.379,0.452,0.519,0.573,0.61,0.629,0.628,0.613,0.587,0.556,0.521,
0.485,0.448,0.411,0.363,0.317,0.273,0.227,0.185,0.148,0.12,0.103,0.093,0.086,
0.082,0.079,0.076,0.071,0.066,0.059,0.053,0.051,0.052,0.057,0.067,0.081,0.103,
0.129,0.165,0.209,0.252,0.292,0.328,0.363,0.398,0.431,0.459,0.479,0.491,0.494,
0.488,0.475,0.457,0.43,0.397,0.357,0.316,0.285,0.254,0.227,0.206,0.189,0.181,
0.171,0.157,0.151,0.162,0.192,0.239)
One recommended way to handle an even number of points and still achieve precision is to combine Simpson's 1/3 rule with Simpson's 3/8 rule, which can handle an even number of points. Such approaches can be found in (at least one or perhaps more) engineering textbooks on numerical methods.
However, as a practical matter, you can write a code chunk to check the data length and add a single trapezoid at the end, as was suggested in the last comment of the post to which you linked. I wouldn't assume that it is necessarily as precise as combining Simpson's 1/3 and 3/8 rules, but it is probably reasonable for many applications.
I would double-check my code edits below, but this is the basic idea.
AUC <- function(x, h=1){
# AUC function computes the Area Under the Curve of a time serie using
# the Simpson's Rule (numerical method).
# https://link.springer.com/chapter/10.1007/978-1-4612-4974-0_26
# Arguments
# x: (vector) time serie values
# h: (int) temporal resolution of the time serie. default h=1
#jh edit: check for even data length
#and chop off last data point if even
nn = length(x)
if(length(x) %% 2 == 0){
xlast = x[length(x)]
x = x[-length(x)]
}
n = length(x)-1
xValues = seq(from=1, to=n, by=2)
sum <- list()
for(i in 1:length(xValues)){
n_sub <- xValues[[i]]-1
n <- xValues[[i]]
n_add <- xValues[[i]]+1
v1 <- x[[n_sub+1]]
v2 <- x[[n+1]]
v3 <- x[[n_add+1]]
s <- (h/3)*(v1+4*v2+v3)
sum <- append(sum, s)
}
sum <- unlist(sum)
auc <- sum(sum)
##jh edit: add trapezoid for last two data points to result
if(nn %% 2 == 0){
auc <- auc + (x[length(x)] + xlast)/2 * h
}
return(auc)
}
sm = smoothed[-length(smoothed)]
length(sm)
[1] 70
#even data as an example
AUC(sm)
[1] 20.17633
#original odd data
AUC(smoothed)
[1] 20.389
There may be a good reason for you to prefer using Simpson's rule, but if you're just looking for a quick and efficient estimate of AUC, the trapezoid rule is far easier to implement, and does not require an even number of breaks:
AUC <- function(x, h = 1) sum((x[-1] + x[-length(x)]) / 2 * h)
AUC(smoothed)
#> [1] 20.3945
Here, I show example code that uses the Simpson's 1/3 and 3/8 rules in tandem for the numerical integration of data. As always, the usual caveats about the possibility of coding errors or compatibility issues apply.
The output at the end compares the numerical estimates of this algorithm with the trapezoidal rule using R's "integrate" function.
#Algorithm adapted from:
#Numerical Methods for Engineers, Seventh Edition,
#By Chapra and Canale, page 623
#Modified to accept data instead of functional values
#Modified by: Jeffrey Harkness, M.S.
##Begin Simpson's rule function code
simp13 <- function(dat, h = 1){
ans = 2*h*(dat[1] + 4*dat[2] + dat[3])/6
return(ans)}
simp13m <- function(dat, h = 1){
summ <- dat[1]
n <- length(dat)
nseq <- seq(2,(n-2),2)
for(i in nseq){
summ <- summ + 4*dat[i] + 2*dat[i+1]}
summ <- summ + 4*dat[n-1] + dat[n]
result <- (h*summ)/3
return(result)}
simp38 <- function(dat, h = 1){
ans <- 3*h*(dat[1] + 3*sum(dat[2:3]) + dat[4])/8
return(ans)}
simpson = function(dat, h = 1){
hin = h
len = length(dat)
comp <- len %% 2
##number of segments
if(len == 2){
ans = sum(dat)/2*h} ##n = 2 is the trapezoidal rule
if(len == 3){
ans = simp13(dat, h = hin)}
if(len == 4){
ans = simp38(dat,h = hin)}
if(len == 6){
ans <- simp38(dat[1:4],h = hin) + simp13(dat[4:len],h = hin)}
if(len > 6 & comp == 0){
ans = simp38(dat[1:4],h = hin) + simp13m(dat[4:len],h = hin)}
if(len >= 5 & comp == 1){
ans = simp13m(dat,h = hin)}
return(ans)}
##End Simpson's rule function code
This next section of code shows the performance comparison. This code can easily be altered for different test functions and cases.
The precision difference tends to change with the sample size and test function used; this example is not intended to imply that the difference is always this pronounced.
#other algorithm for comparison purposes, from Allan Cameron above
oa <- function(x, h = 1) sum((x[-1] + x[-length(x)]) / 2 * h)
#Testing and algorithm comparison code
simans = NULL; oaans = NULL; simerr = NULL; oaerr = NULL; mp = NULL
for( j in 1:10){
n = j
#f = function(x) cos(x) + 2 ##Test functions
f = function(x) 0.2 + 25*x - 200*x^2 + 675*x^3 - 900*x^4 + 400*x^5
a = 0;b = 10
h = (b-a)/n
datain = seq(a,b,by = h)
preans = integrate(f,a,b)$value #precise numerical estimate of test function
simans[j] = simpson(f(datain), h = h)
oaans[j] = oa(f(datain), h = h)
(simerr[j] = abs(simans[j] - preans)/preans * 100)
(oaerr[j] = abs(oaans[j] - preans)/preans * 100)
mp[j] = simerr[j] < oaerr[j]
}
(outframe = data.frame("simpsons percent diff" = simerr,"trapezoidal percent diff" = oaerr, "more precise?" = mp, check.names = F))
simpsons percent diff trapezoidal percent diff more precise?
1 214.73489738 214.734897 FALSE
2 15.07958148 64.993410 TRUE
3 6.70203621 29.816799 TRUE
4 0.94247384 16.955208 TRUE
5 0.54830021 10.905620 TRUE
6 0.18616767 7.593825 TRUE
7 0.12051767 5.588209 TRUE
8 0.05890462 4.282980 TRUE
9 0.04087107 3.386525 TRUE
10 0.02412733 2.744500 TRUE
I'm trying to understand how to use Dynamic Linear Modeling for forecasting. I found an example of the DLM functionality of the MARSS package in R being used for forecasting. Below is all the code in the example, starting with loading the data and ending with creating the in-sample forecasts.
What I don't understand is how I would make an out-of-sample forecast? The code below generates "in-sample" forecasts, where it uses already-known information to generate predictions about already-existing data.
Say I want to forecast the Salmon Survival tomorrow rather than throughout the last several weeks. How would I do that?
Any help would be appreciated.
# load the data
data(SalmonSurvCUI, package = "MARSS")
# get time indices
years <- SalmonSurvCUI[, 1]
# number of years of data
TT <- length(years)
# get response variable: logit(survival)
dat <- matrix(SalmonSurvCUI[, 2], nrow = 1)
# get predictor variable
CUI <- SalmonSurvCUI[, 3]
## z-score the CUI
CUI.z <- matrix((CUI - mean(CUI))/sqrt(var(CUI)), nrow = 1)
# number of regr params (slope + intercept)
m <- dim(CUI.z)[1] + 1
# for process eqn
B <- diag(m) ## 2x2; Identity
U <- matrix(0, nrow = m, ncol = 1) ## 2x1; both elements = 0
Q <- matrix(list(0), m, m) ## 2x2; all 0 for now
diag(Q) <- c("q.alpha", "q.beta") ## 2x2; diag = (q1,q2)
# for observation eqn
Z <- array(NA, c(1, m, TT)) ## NxMxT; empty for now
Z[1, 1, ] <- rep(1, TT) ## Nx1; 1's for intercept
Z[1, 2, ] <- CUI.z ## Nx1; predictor variable
A <- matrix(0) ## 1x1; scalar = 0
R <- matrix("r") ## 1x1; scalar = r
# only need starting values for regr parameters
inits.list <- list(x0 = matrix(c(0, 0), nrow = m))
# list of model matrices & vectors
mod.list <- list(B = B, U = U, Q = Q, Z = Z, A = A, R = R)
# fit univariate DLM
dlm1 <- MARSS(dat, inits = inits.list, model = mod.list)
# get list of Kalman filter output
kf.out <- MARSSkfss(dlm1)
## forecasts of regr parameters; 2xT matrix
eta <- kf.out$xtt1
## ts of E(forecasts)
fore.mean <- vector()
for (t in 1:TT) {
fore.mean[t] <- Z[, , t] %*% eta[, t, drop = FALSE]
}
# variance of regr parameters; 1x2xT array
Phi <- kf.out$Vtt1
## obs variance; 1x1 matrix
R.est <- coef(dlm1, type = "matrix")$R
## ts of Var(forecasts)
fore.var <- vector()
for (t in 1:TT) {
tZ <- matrix(Z[, , t], m, 1) ## transpose of Z
fore.var[t] <- Z[, , t] %*% Phi[, , t] %*% tZ + R.est
}
The model of the beta and alpha is a random walk without drift so the prediction of beta(TT+k) and alpha(TT+k) will just be beta(TT) and alpha(TT) where TT is the last time step in the data (in this case CUI.z).
So your prediction is
logit.survival(TT+k) = alpha(TT) + beta(TT)*CUI.z(TT+k)
alpha(TT) and beta(TT) would be output via kf.out$xtT[,TT], i.e. last state estimate. You will need to provide a CUI.z at t=TT+k.
MARSS version 3.11.0 will have predict function and will output these predictions along with the prediction intervals. But release date is sometime late summer 2020. The functionality is in the GitHub development site (under the resids_update branch) but final testing is still being done.
I want to perform a bootstrap simulation 1000 times and compute percentile confidence intervals 1000 times for different samplesizes n = 10,20,...,100. I've solved this problem and I'm just asking, instead of doing this huge computations 10 times, covering 300 lines of code, is there a way to shorten this? Like, running this function over and over again 10 times? I tried a for-loop but it did not work. Here is the code that does work:
B = 1000 # number of replicates
kHat = Parameters[1] # approx = 2.06786
gammaHat = Parameters[2] # approx = 0.51144
TheoreticalMean = kHat/gammaHat
TheoreticalVariance = kHat/gammaHat^2
PercCoverage = vector("numeric", 10L)
n = 10 # sample size
getCI = function(B, k, gamma, n) {
getM = function(orgData, idx) {
bsM = mean(orgData[idx])
bsS2M = (((n-1) / n) * var(orgData[idx])) / n
c(bsM, bsS2M)
}
F = rgamma(n, kHat, gammaHat) # simulated data: original sample
M = mean(F) # M from original sample
S2M = (((n-1)/n)*var(F))/n # S^2(M) from original sample
# bootstrap
boots = t(replicate(B, getM(F, sample(seq(along=F), replace=TRUE))))
Mstar = boots[,1] # M* for each replicate
S2Mstar = boots[,2] # S^2*(M) for each replicate
biasM = mean(Mstar)-M # bias of estimator M
# indices for sorted vector of estimates
idx = trunc((B+1)*c(0.05/2,1-0.05/2))
ciPerc = sort(Mstar)[idx] # percentile CI
c(perc=ciPerc)
}
# 1000 bootstraps
Nrep <- 1000 # number of bootstraps
CIs <- t(replicate(Nrep, getCI(B, kHat, gammaHat, n)))
# coverage probabilities
PercCoverage[1] = sum((CIs[,"perc1"]<TheoreticalMean) & (CIs[,"perc2"]>TheoreticalMean)) / Nrep
However, here I need to script this for n=10, n=20 and so on to n=100, and each time I need to change PercCoverage[1] to PercCoverage[2]...PercCoverage[10] in order to store these values in an array for later plotting.
I tried setting n=c(10,20,30,40,50,60,70,80,90,100) and then placing all of the above in a for loop but the function getCI needed numerical value.
EDIT: For loop attempt:
n = c(10,20,30,40,50,60,70,80,90,100)
B = 1000 # number of replicates
kHat = Parameters[1] # approx = 2.06786
gammaHat = Parameters[2] # approx = 0.51144
TheoreticalMean = kHat/gammaHat
TheoreticalVariance = kHat/gammaHat^2
PercCoverage = vector("numeric", 10L)
for (i in length(n)){
getCI = function(B, k, gamma, n[i]) {
getM = function(orgData, idx) {
bsM = mean(orgData[idx])
bsS2M = (((n[i]-1) / n[i]) * var(orgData[idx])) / n[i]
c(bsM, bsS2M)
}
F = rgamma(n[i], kHat, gammaHat) # simulated data: original sample
M = mean(F) # M from original sample
S2M = (((n[i]-1)/n[i])*var(F))/n[i] # S^2(M) from original sample
# bootstrap
boots = t(replicate(B, getM(F, sample(seq(along=F), replace=TRUE))))
Mstar = boots[,1] # M* for each replicate
S2Mstar = boots[,2] # S^2*(M) for each replicate
biasM = mean(Mstar)-M # bias of estimator M
# indices for sorted vector of estimates
idx = trunc((B+1)*c(0.05/2,1-0.05/2))
ciPerc = sort(Mstar)[idx] # percentile CI
c(perc=ciPerc)
}
# 1000 bootstraps
Nrep <- 1000 # number of bootstraps
CIs <- t(replicate(Nrep, getCI(B, kHat, gammaHat, n[i])))
# coverage probabilities
PercCoverage[i] = sum((CIs[,"perc1"]<TheoreticalMean) & (CIs[,"perc2"]>TheoreticalMean)) / Nrep
}
Consider defining multiple functions: a master one boostrap_proc, gCI, and getM. Then pass in your sequences of sample sizes in lapply for list return or sapply for numeric vector each calling the master function and returning a series of probabilities (last line of function). Be sure to remove the hard coded n = 10.
Define Functions
B = 1000 # number of replicates
kHat = Parameters[1] # approx = 2.06786
gammaHat = Parameters[2] # approx = 0.51144
TheoreticalMean = kHat/gammaHat
TheoreticalVariance = kHat/gammaHat^2
bootstrap_proc <- function(n) {
Nrep <- 1000 # 1000 bootstraps
CIs <- t(replicate(Nrep, getCI(B, kHat, gammaHat, n)))
# coverage probabilities
sum((CIs[,"perc1"]<TheoreticalMean) & (CIs[,"perc2"]>TheoreticalMean)) / Nrep
}
getCI <- function(B, k, gamma, n) {
F <- rgamma(n, kHat, gammaHat) # simulated data: original sample
M <- mean(F) # M from original sample
S2M <- (((n-1)/n)*var(F))/n # S^2(M) from original sample
# bootstrap
boots <- t(replicate(B, getM(F, sample(seq(along=F), replace=TRUE),n)))
Mstar <- boots[,1] # M* for each replicate
S2Mstar <- boots[,2] # S^2*(M) for each replicate
biasM <- mean(Mstar)-M # bias of estimator M
# indices for sorted vector of estimates
idx <- trunc((B+1)*c(0.05/2,1-0.05/2))
ciPerc <- sort(Mstar)[idx] # percentile CI
c(perc=ciPerc)
}
getM <- function(orgData, idx, n) {
bsM <- mean(orgData[idx])
bsS2M <- (((n-1) / n) * var(orgData[idx])) / n
c(bsM, bsS2M)
}
Call Function
sample_sizes <- c(10,20,30,40,50,60,70,80,90,100)
# LIST
PercCoverage <- lapply(sample_sizes, bootstrap_proc)
# VECTOR
PercCoverage <- sapply(sample_sizes, bootstrap_proc)
# VECTOR
PercCoverage <- vapply(sample_sizes, bootstrap_proc, numeric(1))
Introduction to the problem
I am trying to write down a code in R so to obtain the weights of an Equally-Weighted Contribution (ERC) Portfolio. As some of you may know, the portfolio construction was presented by Maillard, Roncalli and Teiletche.
Skipping technicalities, in order to find the optimal weights of an ERC portfolio one needs to solve the following Sequential Quadratic Programming problem:
with:
Suppose we are analysing N assets. In the above formulas, we have that x is a (N x 1) vector of portfolio weights and Σ is the (N x N) variance-covariance matrix of asset returns.
What I have done so far
Using the function slsqp of the package nloptr which solves SQP problems, I would like to solve the above minimisation problem. Here is my code. Firstly, the objective function to be minimised:
ObjFuncERC <- function (x, Sigma) {
sum <- 0
R <- Sigma %*% x
for (i in 1:N) {
for (j in 1:N) {
sum <- sum + (x[i]*R[i] - x[j]*R[j])^2
}
}
}
Secondly, the starting point (we start by an equally-weighted portfolio):
x0 <- matrix(1/N, nrow = N, ncol = 1)
Then, the equality constraint (weights must sum to one, that is: sum of the weights minus one equal zero):
heqERC <- function (x) {
h <- numeric(1)
h[1] <- (t(matrix(1, nrow = N, ncol = 1)) %*% x) - 1
return(h)
}
Finally, the lower and upper bounds constraints (weights cannot exceed one and cannot be lower than zero):
lowerERC <- matrix(0, nrow = N, ncol = 1)
upperERC <- matrix(1, nrow = N, ncol = 1)
So that the function which should output optimal weights is:
slsqp(x0 = x0, fn = ObjFuncERC, Sigma = Sigma, lower = lowerERC, upper = upperERC, heq = heqERC)
Unfortunately, I do not know how to share with you my variance-covariance matrix (which takes name Sigma and is a (29 x 29) matrix, so that N = 29) so to reproduce my result, still you can simulate one.
The output error
Running the above code yields the following error:
Error in nl.grad(x, fn) :
Function 'f' must be a univariate function of 2 variables.
I have no idea what to do guys. Probably, I have misunderstood how things must be written down in order for the function slsqp to understand what to do. Can someone help me understand how to fix the problem and get the result I want?
UPDATE ONE: as pointed out by #jogo in the comments, I have updated the code, but it still produces an error. The code and the error above are now updated.
UPDATE 2: as requested by #jaySf, here is the full code that allows you to reproduce my error.
## ERC Portfolio Test
# Preliminary Operations
rm(list=ls())
require(quantmod)
require(nloptr)
# Load Stock Data in R through Yahoo! Finance
stockData <- new.env()
start <- as.Date('2014-12-31')
end <- as.Date('2017-12-31')
tickers <-c('AAPL','AXP','BA','CAT','CSCO','CVX','DIS','GE','GS','HD','IBM','INTC','JNJ','JPM','KO','MCD','MMM','MRK','MSFT','NKE','PFE','PG','TRV','UNH','UTX','V','VZ','WMT','XOM')
getSymbols.yahoo(tickers, env = stockData, from = start, to = end, periodicity = 'monthly')
# Create a matrix containing the price of all assets
prices <- do.call(cbind,eapply(stockData, Op))
prices <- prices[-1, order(colnames(prices))]
colnames(prices) <- tickers
# Compute Returns
returns <- diff(prices)/lag(prices)[-1,]
# Compute variance-covariance matrix
Sigma <- var(returns)
N <- 29
# Set up the minimization problem
ObjFuncERC <- function (x, Sigma) {
sum <- 0
R <- Sigma %*% x
for (i in 1:N) {
for (j in 1:N) {
sum <- sum + (x[i]*R[i] - x[j]*R[j])^2
}
}
}
x0 <- matrix(1/N, nrow = N, ncol = 1)
heqERC <- function (x) {
h <- numeric(1)
h[1] <- t(matrix(1, nrow = N, ncol = 1)) %*% x - 1
}
lowerERC <- matrix(0, nrow = N, ncol = 1)
upperERC <- matrix(1, nrow = N, ncol = 1)
slsqp(x0 = x0, fn = ObjFuncERC, Sigma = Sigma, lower = lowerERC, upper = upperERC, heq = heqERC)
I spotted several mistakes in your code. For instance, ObjFuncERC is not returning any value. You should use the following instead:
# Set up the minimization problem
ObjFuncERC <- function (x, Sigma) {
sum <- 0
R <- Sigma %*% x
for (i in 1:N) {
for (j in 1:N) {
sum <- sum + (x[i]*R[i] - x[j]*R[j])^2
}
}
sum
}
heqERC doesn't return anything too, I also changed your function a bit
heqERC <- function (x) {
sum(x) - 1
}
I made those changes and tried slsqp without lower and upper and it worked. Still, another thing to consider is that you set lowerERC and upperERC as matrices. Use the following instead:
lowerERC <- rep(0,N)
upperERC <- rep(1,N)
Hope this helps.
I have written a custom likelihood function that fits a multi-data model that integrates mark-recapture and telemetry data (sensu Royle et al. 2013 Methods in Ecology and Evolution). The likelihood function is designed to be flexible in terms of whether and how many covariates are specified for different linear models in different likelihood components which is determined by values supplied as function arguments (i.e., data matrices "detcovs" and "dencovs" in my code). The likelihood function works when I directly supply it to optimization functions (e.g., optim or nlm), but does not play nice with the mle2 function in the bbmle package. My problem is that I continually run into the following error: "some named arguments in 'start' are not arguments to the specified log-likelihood function". This is my first attempt at writing custom likelihood functions so I'm sure there are general coding conventions of which I'm unaware that make such tasks much more efficient and amendable to the mle2 function. Below is my likelihood function, code creating the staring value objects, and code calling the mle2 function. Any advice how to solve the error problem and general comments on writing cleaner functions is welcome. Many thanks in advance.
Edit: As requested, I have simplified the likelihood function and provided code to simulate reproducible data to which the model can be fit. Included in the simulation code are 2 custom functions and use of the raster function from the raster package. Hopefully, I have sufficiently simplified everything to enable others to troubleshoot. Again, many thanks for your help!
Jared
Likelihood function:
CSCR.RSF.intlik2.EXAMPLE <- function(alpha0,sigma,alphas=NULL,betas=NULL,n0,yscr=NULL,K=NULL,X=X,trapcovs=NULL,Gden=NULL,Gdet=NULL,ytel=NULL,stel=NULL,
dencovs=NULL,detcovs=NULL){
#
# this version of the code handles a covariate on log(Density). This is starting value 5
#
# start = vector of starting values
# yscr = nind x ntraps encounter matrix
# K = number of occasions
# X = trap locations
# Gden = matrix with grid cell coordinates for density raster
# Gdet = matrix with gride cell coordinates for RSF raster
# dencovs = all covariate values for all nGden pixels in density raster
# trapcovs = covariate value at trap locations
# detcovs = all covariate values for all nGrsf pixels in RSF raster
# ytel = nguys x nGdet matrix of telemetry fixes in each nGdet pixels
# stel = home range center of telemetered individuals, IF you wish to estimate it. Not necessary
# alphas = starting values for RSF/detfn coefficients excluding sigma and intercept
# alpha0 = starting values for RSF/detfn intercept
# sigma = starting value for RSF/detfn sigma
# betas = starting values for density function coefficients
# n0 = starting value for number of undetected individuals on log scale
#
n0 = exp(n0)
nGden = nrow(Gden)
D = e2dist(X,Gden)
nGdet <- nrow(Gdet)
alphas = alphas
loglam = alpha0 -(1/(2*sigma*sigma))*D*D + as.vector(trapcovs%*%alphas) # ztrap recycled over nG
psi = exp(as.vector(dencovs%*%betas))
psi = psi/sum(psi)
probcap = 1-exp(-exp(loglam))
#probcap = (exp(theta0)/(1+exp(theta0)))*exp(-theta1*D*D)
Pm = matrix(NA,nrow=nrow(probcap),ncol=ncol(probcap))
ymat = yscr
ymat = rbind(yscr,rep(0,ncol(yscr)))
lik.marg = rep(NA,nrow(ymat))
for(i in 1:nrow(ymat)){
Pm[1:length(Pm)] = (dbinom(rep(ymat[i,],nGden),rep(K,nGden),probcap[1:length(Pm)],log=TRUE))
lik.cond = exp(colSums(Pm))
lik.marg[i] = sum( lik.cond*psi )
}
nv = c(rep(1,length(lik.marg)-1),n0)
part1 = lgamma(nrow(yscr)+n0+1) - lgamma(n0+1)
part2 = sum(nv*log(lik.marg))
out = -1*(part1+ part2)
lam = t(exp(a0 - (1/(2*sigma*sigma))*t(D2)+ as.vector(detcovs%*%alphas)))# recycle zall over all ytel guys
# lam is now nGdet x nG!
denom = rowSums(lam)
probs = lam/denom # each column is the probs for a guy at column [j]
tel.loglik = -1*sum( ytel*log(probs) )
out = out + tel.loglik
out
}
Data simulation code:
library(raster)
library(bbmle)
e2dist <- function (x, y){
i <- sort(rep(1:nrow(y), nrow(x)))
dvec <- sqrt((x[, 1] - y[i, 1])^2 + (x[, 2] - y[i, 2])^2)
matrix(dvec, nrow = nrow(x), ncol = nrow(y), byrow = F)
}
spcov <- function(R) {
v <- sqrt(nrow(R))
D <- as.matrix(dist(R))
V <- exp(-D/2)
cov1 <- t(chol(V)) %*% rnorm(nrow(R))
Rd <- as.data.frame(R)
colnames(Rd) <- c("x", "y")
Rd$C <- as.numeric((cov1 - mean(cov1)) / sd(cov1))
return(Rd)
}
set.seed(1234)
co <- seq(0.3, 0.7, length=5)
X <- cbind(rep(co, each=5),
rep(co, times=5))
B <- 10
co <- seq(0, 1, length=B)
Z <- cbind(rep(co, each=B), rep(co, times=B))
dencovs <- cbind(spcov(Z),spcov(Z)[,3]) # ordered as reading raster image from left to right, bottom to top
dimnames(dencovs)[[2]][3:4] <- c("dencov1","dencov2")
denr.list <- vector("list",2)
for(i in 1:2){
denr.list[[i]] <- raster(
list(x=seq(0,1,length=10),
y=seq(0,1,length=10),
z=t(matrix(dencovs[,i+2],10,10,byrow=TRUE)))
)
}
B <- 20
co <- seq(0, 1, length=B)
Z <- cbind(rep(co, each=B), rep(co, times=B))
detcovs <- cbind(spcov(Z),spcov(Z)[,3]) # ordered as reading raster image from left to right, bottom to top
dimnames(detcovs)[[2]][3:4] <- c("detcov1","detcov2")
detcov.raster.list <- vector("list",2)
trapcovs <- matrix(0,J,2)
for(i in 1:2){
detr.list[[i]] <- raster(
list(x=seq(0,1,length=20),
y=seq(0,1,length=20),
z=t(matrix(detcovs[,i+2],20,20,byrow=TRUE)))
)
trapcovs[,i] <- extract(detr.list[[i]],X)
}
alpha0 <- -3
sigma <- 0.15
alphas <- c(1,-1)
beta0 <- 3
betas <- c(-1,1)
pixelArea <- (dencovs$y[2] - dencovs$y[1])^2
mu <- exp(beta0 + as.matrix(dencovs[,3:4])%*%betas)*pixelArea
EN <- sum(mu)
N <- rpois(1, EN)
pi <- mu/sum(mu)
s <- dencovs[sample(1:nrow(dencovs), size=N, replace=TRUE, prob=pi),1:2]
J <- nrow(X)
K <- 10
yc <- d <- p <- matrix(NA, N, J)
D <- e2dist(s,X)
loglam <- t(alpha0 - t((1/(2*sigma*sigma))*D*D) + as.vector(trapcovs%*%alphas))
p <- 1-exp(-exp(loglam))
for(i in 1:N) {
for(j in 1:J) {
yc[i,j] <- rbinom(1, K, p[i,j])
}
}
detected <- apply(yc>0, 1, any)
yscr <- yc[detected,]
ntel <- 5
nfixes <- 100
poss.tel <- which(s[,1]>0.2 & s[,1]<0.8 & s[,2]>0.2 & s[,2]<0.8)
stel.id <- sample(poss.tel,ntel)
stel <- s[stel.id,]
ytel <- matrix(NA,ntel,nrow(detcovs))
d <- e2dist(stel,detcovs[,1:2])
lam <- t(exp(1 - t((1/(2*sigma*sigma))*d*d) + as.vector(as.matrix(detcovs[,3:4])%*%alphas)))
for(i in 1:ntel){
ytel[i,] <- rmultinom(1,nfixes,lam[i,]/sum(lam[i,]))
}
Specify starting values and call mle2 function:
start1 <- list(alpha0=alpha0,sigma=sigma,alphas=alphas,betas=betas,n0=log(N-nrow(yscr)))
parnames(CSCR.RSF.intlik2.EXAMPLE) <- names(start)
out1 <- mle2(CSCR.RSF.intlik2.EXAMPLE,start=start1,method="SANN",optimizer="optim",
data=list(yscr=yscr,K=K,X=X,trapcovs=trapcovs,Gden=dencovs[,1:2],Gdet=detcovs[,1:2],
ytel=ytel,stel=stel,dencovs=as.matrix(dencovs[,3:4]),detcovs=as.matrix(detcovs[,3:4]))
)