non-finite function value integrate R: linear regression with error decomposition - r

I'm now working on a linear regression where the error term is decomposed with a normal random variable and a beta random variable(for more details see for instance http://www.eea-esem.com/files/papers/eea-esem/2012/405/Cardak-Johnston-Martin_20111110.pdf). The question is that while I do MLE with maxLik in R it returns errors and the process stops. Here is the code in R:
#######################DGP################
library(maxLik)
set.seed(1357)
n=1000
x1 = matrix(rnorm(n,3,1.5),n,1)
x2 = matrix(rnorm(n,1,2.5),n,1)
x3 = sample(c(0,1), n, replace = TRUE)
X = cbind(1,x1,x2)
b = matrix(c(1,2,-3),1,3)
y = X%*%t(b)+rnorm(n,0,2)
g0 = 2
g1 = 1
g2 = 1.5
g3 = 2.4
#######################
p = exp(g0+g1*x3)
q = exp(g2+g3*x3)
#cbind(p,q)
eta = rep(0,n)
for (i in 1:n){
eta[i] = rbeta(1,shape1=p[i],shape2=q[i])}
plot(density(eta))
y2 = y+eta*2
summary(lm(y~X-1))
ols = lm(y2~X-1)
summary(ols)
#######################Model specification################
normbeta = function(u,nu,sigma2,p,q,k){
eta = (u-nu)/k
ff= dnorm(nu,0,sqrt(sigma2))*dbeta(eta,shape1=p,shape2=q)/k
return(ff)
}
obj = function(u,sigma2,p,q,k){
integrate(normbeta,u=u,lower=u-k,upper=u,sigma2=sigma2,p=p,q=q,k=k)$value
}
objv = Vectorize(obj)
############MLE#################
loglik = function(b){
a = split(b,rep(1:7,c(3,1,1,1,1,1,1)))
beta = a[[1]]
sigma2 = a[[2]]^2
k2 = a[[3]]^2
k=sqrt(k2)
g0 = a[[4]]
g1 = a[[5]]
g2 = a[[6]]
g3 = a[[7]]
p = exp(g0+g1*x3)
q = exp(g2+g3*x3)
u = y-X%*%beta
l = objv(u=u,sigma2=sigma2,p=p,q=q,k=k)
return(log(l))
}
a = list()
a[[1]] = coef(ols)
a[[2]] = sqrt(deviance(ols)/df.residual(ols))
a[[3]] = c(1,2,1,1.5,2.4)
b = unlist(a)
sum(loglik(b))
MLE = maxLik(logLik=loglik,print.level=2,start=b,method="BHHH")
summary(MLE)
With the error reported:
----- Initial parameters: -----
fcn value: -2331.714
parameter initial gradient free
X1 2.042012 -338.04135 1
X2 1.953121 -1008.58628 1
X3 -2.969475 -310.82131 1
2.008750 212.29245 1
1.000000 -161.51965 1
2.000000 -75.76826 1
1.000000 -29.25573 1
1.500000 75.10844 1
2.400000 29.20714 1
Condition number of the (active) hessian: 1.657308e+14
Error in integrate(normbeta, u = u, lower = u - k, upper = u, sigma2 = sigma2, :
non-finite function value
In addition: Warning message:
In dbeta(eta, shape1 = p, shape2 = q) : NaNs produced
I believe that the error might come from dbeta(eta, shape1 = p, shape2 = q) that beta density parameter p and q should be positive. However I've already constrained them with exp() function. Anyone knows what is going on here? So many thanks.

Related

Model MLE finite values needed

I am trying to use mle to find the conditions of my model fitted to some data however I am getting the error L-BFGS-B needs finite values of 'fn'.
I am not really sure how to find where there are the non-finite values
sir.model2 = function(t, x, params){
# extract the state variables
S = x[1]
I = x[2]
R = x[3]
# extract the parameters
if(t<params['Time']){
beta = params['beta.1']
}else{
beta = params['beta.2']
}
gamma = params['gamma']
# compute the derivatives
dSdt = -beta * S * I
dIdt = beta * S * I - gamma * I
dRdt = gamma * I
# return the computed derivatives
list(c(dSdt, dIdt, dRdt))
}
# define the negative log likelihood function
NLL2 = function(beta.1,beta.2,time, gamma, I.init){
# specify the parameter values
params = c(beta.1 = beta.1,beta.2=beta.2,Time=time, gamma = gamma)
# specify the model's initial conditions
xstart = c(S = 1-I.init, I = I.init, R = 0)
# solve the differential equations
out = ode(func = sir.model2, y = xstart, times = times, parms = params)
out=as.data.frame(out)
out$beta[out$time<params['Time']]<-params['beta.1']
out$beta[out$time>=params['Time']]<-params['beta.2']
# calculate the negative log likelihood
-sum(dpois(data.edu$Cases, out$beta*out$S*out$I* N.edu, log=T))
}
m = mle(minuslogl = NLL2,
method="L-BFGS-B",
start = list(beta.1 = 1.5 * 1 / 7,beta.2 = 1.5 * 1 / 7,time=20, gamma = 1 / 7, I.init = 0.0001),
lower = list(beta.1 = .1, beta.2 =.1,time=.1 gamma = 1e-3, I.init = 1e-3),
upper = list(beta.1 = 5, beta.2 =5,time=107 gamma =5, I.init = 1-1e-3)
)
coef(m)

How to simulate PCA Data?

I am trying to simulate PCA Data as follows:
q <- 5 # no. of PCs
p <- 20 # no. of variables
n <- 2000 # no. of individuals
eps <- 0.05 # error standard deviation
# Eigenvalues
Sig <- seq(3, 1, length.out = q)^2
Lambda <- diag(Sig)
# Matrix of Principal Components
H <- rmvnorm(n = n, mean = rep(0, q), sigma = Lambda)
# Add gaussian noise
E <- matrix(rnorm(n*p, sd = sqrt(eps)), ncol = p)
# Data matrix
Y <- H %*% t(Amat) + E
# Perform PCA
summary(m1 <- prcomp(Y, scale = T)) # and so on...
However, I have no idea how to create the matrix of Loadings Amat in a meaningful way.
Thanks for any help I receive from you and I appreciate it!
This is not using the same structure as the OP, but it simulates a PCA with 4 different groups (which could be species) which each have 3 "traits" (each of the trait have different means and sd based on some biological data found in the literature for example).
set.seed(123) # setting this so the random results will be repeatable
library(MASS)
# Simulating 3 traits for 4 different species
n = 200 # number of "individuals"
# Generate the groups
Amat1 = MASS::mvrnorm(n, mu = c(11.2,11.8,9.91), Sigma = diag(c(1.31,1.01,1.02)))
Amat2 = MASS::mvrnorm(n, mu = c(7.16,8.54,6.82), Sigma = diag(c(0.445,0.546,0.350)))
Amat3 = MASS::mvrnorm(n, mu = c(15.6,14.6,13.5), Sigma = diag(c(1.43,0.885,0.990)))
Amat4 = MASS::mvrnorm(n, mu = c(8.65,14.1,8.24), Sigma = diag(c(0.535,0.844,0.426)))
# Combine the data
Amat = rbind(Amat1,Amat2,Amat3,Amat4)
# Make group data
Amat.gr = cbind(Amat, gl(4,k=n,labels = c(1,2,3,4)))
# Calculate the covariance matrix for each group
by(Amat.gr[,1:3],INDICES = Amat.gr[,4],FUN = cov) # calculate covariance matrix for all groups
# Plot the result
summary(m1 <- prcomp(Amat, scale= T))
# biplot(m1, xlabs=rep(".", nrow(Amat)), cex = 2)
plot(vegan::scores(m1), asp = 1, pch = 19, col = gl(4,k=n,labels = c(1,2,3,4)))
plot(Amat[,1],Amat[,2], pch = 19, col = gl(4,k=n,labels = c(1,2,3,4)))
The plot on the left shows the PCA and on the right the raw data.
I added a toy example with data to show what is the algorithm to compute a PCA in R from Legendre and Legendre 2012.
# Generate vectors (example from Legendre and Legendre 2012)
v1 = c(2,3,5,7,9)
v2 = c(1,4,0,6,2)
# If you want to play with sample size
# n = 100
# v1 = rnorm(n = n, mean = mean(v1), sd = sd(v1))
# v2 = rnorm(n = n, mean = mean(v2), sd = sd(v2))
# Get the y matrix
y = cbind(v1,v2)
# Centered y matrix
yc = apply(y, 2, FUN = function(x) x-mean(x))
# Dispersion matrix
s = 1/(nrow(y)-1)*t(yc) %*% yc
# Compute the single value decomposition to get the eigenvectors and
ev = svd(s)$v
# get the principal components
f = yc %*% ev
# This gives the identity matrix
round(t(svd(s)$v) %*% svd(s)$v,2)
# these are the eigen values
svd(s)$d
-svd(yc)$v #p. 104
plot(f, pch = 19); abline(h=0,v=0, lty = 3)

My P-values are way lower than I expected and can not build a proper power curve

pval.dist.sim = function(n, sigma_x, rho, reps = 2500){
p = 5; sigma = sqrt(2)
beta = c(0.5, 0.5, 0, 0.25, 0)
mu = 10
# generate vector for pvals
pval.list = numeric(reps)
for(r in 1:reps){
# generate design matrix
X = gen_X(n = n, p = 5, rho = rho, sigma_x = sigma_x, mu = mu)
# generate the XtXinv portion of equation
XtXinv = qr.solve(crossprod(X))
sqrtXtXinv55 = sqrt(XtXinv[5,5])
y = X %*% beta + rnorm(n = n)
beta.hat = XtXinv %*% crossprod(X, y)
sE = sqrt(sum((y - X %*% beta.hat)^2)/(n-p))
t.val = beta.hat[3]/(sE * sqrtXtXinv55)
pval.list[r] = 2 * pt(-abs(t.val), df = n - p)
}
return(pval.list)
}
Above is the pval.dist simulation. I need to run this function to build my p.values to build my power curve
set.seed(3701)
# givens
p = 5; d = 2; mu = 10; sigmasqrd = 2; reps = 2500
n.list = seq(from=10, to=150, by=10)
# create a vector for the estimates of the power
est.power = numeric(length(n.list))
# create a vector for the left endpoints of the 95% CI
LB.list = numeric(length(n.list))
# create a vector for the right endpoints of the 95% CI
UB.list = numeric(length(n.list))
for(j in 1:length(n.list)){
# perform the test reps times
pvals = pval.dist.sim(n = n.list[j], sigma_x = 1.5, rho = 0.2, reps = reps )
# record the simulated estimate of the power
est.power[j] = mean(pvals<0.05)
# compute the 95% conf int
bounds = binom.test(x=sum(pvals < 0.05), n = reps, conf.level = 0.95)$conf.int[1:2]
LB.list[j] = bounds[1]
UB.list[j] = bounds[2]
}
## plot the power curve estimation
plot(n.list, est.power, t = "l", xlab = "n",ylab = "Power")
I am having the issue that my pvalues, when plugged in, are drastically low. I am getting values in the single digit percentage. What am I doing wrong?

Error when running mle2 function (bbmle)

I am receiving the following error when running the mle2() function from the bbmle package in R:
some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable
I am trying to understand if this is due to a problem with my data or an issue with calling the function properly. Unfortunately, I cannot post my real data, so I am using a similar working example of the same sample size.
The custom dAction function I am using is a softmax function. There have to be upper and lower bounds on the optimization so I am using the L-BFGS-B method.
library(bbmle)
set.seed(3939)
### Reproducible data
dat1 <- rnorm(30, mean = 3, sd = 1)
dat2 <- rnorm(30, mean = 3, sd = 1)
dat1[c(1:3, 5:14, 19)] <- 0
dat2[c(4, 15:18, 20:22, 24:30)] <- 0
### Data variables
x <- sample(1:12, 30, replace = TRUE)
pe <- dat1
ne <- dat2
### Likelihood
dAction <- function(x, a, b, t, pe, ne, log = FALSE) {
u <- exp(((x - (a * ne) - (b * pe)) / t))
prob <- u / (1 + u)
if(log) return(prob) else return(-sum(log(prob)))
}
### Fit
fit <- mle2(dAction,
start = list(a = 0.1, b = 0.1, t = 0.1),
data = list(x = x, pe = pe, ne = ne),
method = "L-BFGS-B",
lower = c(a = 0.1, b = 0.1, t = 0.1),
upper = c(a = 10, b = 1, t = 10))
Warning message:
In mle2(dAction, start = list(a = 0.1, b = 0.1, t = 0.1), data = list(x = x, :
some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable
Here are the results for summary():
summary(fit)
Maximum likelihood estimation
Call:
mle2(minuslogl = dAction, start = list(a = 0.1, b = 0.1, t = 0.1),
method = "L-BFGS-B", data = list(x = x, pe = pe, ne = ne),
lower = c(a = 0.1, b = 0.1, t = 0.1), upper = c(a = 10, b = 1,
t = 10))
Coefficients:
Estimate Std. Error z value Pr(z)
a 0.1 NA NA NA
b 0.1 NA NA NA
t 0.1 NA NA NA
-2 log L: 0.002048047
Warning message:
In sqrt(diag(object#vcov)) : NaNs produced
And the results for the confidence intervals
confint(fit)
Profiling...
2.5 % 97.5 %
a NA 1.0465358
b NA 0.5258828
t NA 1.1013322
Warning messages:
1: In sqrt(diag(object#vcov)) : NaNs produced
2: In .local(fitted, ...) :
Non-positive-definite Hessian, attempting initial std err estimate from diagonals
I don't entirely understand the context of your problem, but:
The issue (whether it is a real problem or not depends very much on the aforementioned context that I don't understand) has to do with your constraints. If we do the fit without the constraints:
### Fit
fit <- mle2(dAction,
start = list(a = 0.1, b = 0.1, t = 0.1),
data = list(x = x, pe = pe, ne = ne))
## method = "L-BFGS-B",
## lower = c(a = 0.1, b = 0.1, t = 0.1),
## upper = c(a = 10, b = 1, t = 10))
we get coefficients that are below your bounds.
coef(fit)
a b t
0.09629301 0.07724332 0.02405173
If this is correct, at least one of the constraints is going to be active (i.e. when we fit with lower bounds, at least one of our parameters will hit the bounds - in fact, it's all of them). When fits are on the boundary, the simplest machinery for computing confidence intervals (Wald intervals) doesn't work. However, this doesn't affect the profile confidence interval estimates you report above. These are correct - the lower bounds are reported as NA because the lower confidence limit is at the boundary (you can replace these by 0.1 if you like).
If you didn't expect the optimal fit to be on the boundary, then I don't know what's going on, maybe a data issue.
Your log-likelihood function is not wrong, but it's a little confusing because you have a log argument that returns the negative log-likelihood when log=FALSE (default) and the likelihood when log=TRUE. Before I realized that, I rewrote the function (I also made it a little more numerically stable by doing computations on the log scale wherever possible).
dAction <- function(x, a, b, t, pe, ne) {
logu <- (x - (a * ne) - (b * pe)) / t
lprob <- logu - log1p(exp(logu))
return(-sum(lprob))
}

Optimization of a function in R ( L-BFGS-B needs finite values of 'fn')

I would like to optimize (maximum) of the following function f1. I wrote the following code which using the lower and upper bound, since we know that all of our parameters are equal or bigger than zero and also we always should have x4 values less than or equal x6. How can I fix this problem in R?I want to get a finite maximum value of function f1.
x1 = 0.1
x2 = 0.1
x3 = 2
x4 = 10
x5 = 2
x6 = 30
x7 = 1
par = list(x1=x1, x2=x2, x3=x3, x4=x4,x5=x5, x6=x6, x7=x7)
par1 = c(1, 1, 2, 1.5, 1, 1.5, 1)
f1 = function(x, par){
sum(log(exp(-(par$x7)*(par$x1*x + par$x2*x^2/2 +
par$x3 * (par$x4-x)^3/3+par$x5 *(x-par$x6)^3/3))))
}
x = seq(0, 500, length=100)
z = c(par$x1, par$x2, par$x3, par$x4, par$x5, par$x6, par$x7)
f2 = function(z){
par.new = list(x1 = z[1], x2 = z[2], x3 = z[3], x4 = z[4]
, x5 = z[5], x6 = z[6], x7 = z[7])
f1(x, par.new)
}
optim(par1, f2, method = "L-BFGS-B", lower = rep(0, length(z)),
upper = rep(Inf,length(z)),control = list(trace = 5,fnscale=-1))
> optim(par1, f2, method = "L-BFGS-B", lower = rep(0, length(z)),
upper = rep(Inf, length(z)), control = list(trace = 5,fnscale=-1))
N = 7, M = 5 machine precision = 2.22045e-16
L = 0 0 0 0 0 0 0
X0 = 1 1 2 1.5 1 1.5 1
U = inf inf inf inf inf inf inf
At X0, 0 variables are exactly at the bounds
Error in optim(par1, f2, method = "L-BFGS-B", lower = rep(0, length(z)), :
L-BFGS-B needs finite values of 'fn'
At some point in the optimization, your function is returning a value greater than .Machine$double.xmax (which is 1.797693e+308 on my machine).
Since your function f1(...) is defined as sum(log(exp(...))), and since log(exp(z)) = z for any z, why not use this:
par1 = c(1, 1, 2, 1.5, 1, 1.5, 1)
x = seq(0, 500, length=100)
f1 = function(par, x){
sum(-(par[7])*(par[1]*x + par[2]*x^2/2 +
par[3] * (par[4]-x)^3/3+par[6] *(x-par[7])^3/3))
}
result <- optim(par1, f1, x=x,
method = "L-BFGS-B",
lower = rep(0, length(par1)), upper = rep(Inf,length(par1)),
control = list(trace = 5,fnscale=-1))
result$par
# [1] 2.026284e-01 2.026284e-01 8.290126e+08 0.000000e+00 1.000000e+00 9.995598e+35 2.920267e+27
result$value
# [1] 2.423136e+147
Note that the vector of parameters (par) must be the first argument to f1.

Resources