How to identify starting values for constrOptim()? - r

I'm using constrOptim() in R to estimate parameters of a model. I get the error that my target function has a result of length 0 instead 1. The problem can be the starting values I chose. I'm using R the first time and I'm not familiar with the constrOptim algorithm itself. Can somebody help me with the optimization?
my input:
library(quantmod)
getSymbols('^GDAXI', src='yahoo', return.class='ts',from="2005-01-01", to="2015-01-31")
GDAXI.DE=GDAXI[ , "GDAXI.Close"]
log_r1=diff(log(GDAXI.DE[39:2575]))
sigma=matrix(nrow=length(log_r1)-1, ncol=1)
for(i in 2:length(log_r1))
{
sigma[i-1]=var(log_r1[1:i])
}
error=matrix(nrow=length(log_r1), ncol=1)
mu=mean(log_r1)
for(i in 1:length(log_r1))
{
error[i]=log_r1[i]-mu
}
the function i want to minimize:
LogLik=function(th)
{
input=garch(th)
sig=input
for(i in 1:length(sigma))
{
LL=(-1/2)*log(2*pi)-(1/2)*log(sig[i])-(1/2)*er[i]/sig[i]
}
}
the function calculating the input above:
sig=var(log_r1)
er=error
Sigma_garch=matrix(nrow=length(sigma), ncol=1)
garch=function(th)
{
omega.0=th[1]
alpha.0=th[2]
beta.0=th[3]
for(i in 1:length(sigma))
{
if(i==1)
{
Sigma_ga=sig
}else
{
Sigma_ga=Sigma_garch[i-1]
}
Sigma_garch[i]=omega.0+alpha.0*er[i]^2+beta.0*Sigma_ga
}
return(Sigma_garch)
}
my constraints and optimization procedure:
ui=matrix(c(diag(1,3),c(0,(-1),(-1))),4,3, byrow = T)
ci=c(0.01,0,0,(-0.99))
theta=c(0.02,0.01,0.95)
est=constrOptim(c(0.02,0.01,0.95),LogLik, grad=NULL, ui,ci,hessian=FALSE)
the error I'm getting: Error in optim(theta.old, fun, gradient, control = control, method = method, : Zielfunktion gibt Ergebnis der Länge 0 zurück, nicht Länge 1.
Does somebody can tell me what can be the probleme here?

Your target function, LogLik, doesnt return a value resulting in target function has a result of length 0 instead 1.
LogLik=function(th)
{
input=garch(th)
sig=input
for(i in 1:length(sigma))
{
LL=(-1/2)*log(2*pi)-(1/2)*log(sig[i])-(1/2)*er[i]/sig[i]
}
return(LL)
}

Related

Likelihood for First order censored autoregressive process with covariate(see Jung Wook Park1 , Marc G. Genton2 and Sujit K. Ghosh3)

library(mvtnorm)
set.seed(14)
n=10000
sigmatrue<-1
rhotrue<-0.3
b1=0.05
b0=0
y<-arima.sim(model=list(ar=c(0.3)),n=10000 ,sd=sigmatrue)#kataskevi
#xronoseiras
x=rep(0,n)
for(i in 1:n){
x[i]=i
}
for(t in 1:n)
{
y[t]=y[t]+b0+b1*x[t]
}
est=arima(y,order=c(1,0,0),xreg=x,include.mean=TRUE,method="ML",kappa=1e+06)
cens<-rep(0, n)
c=(9/10)*(n*b1+b0)
for (i in 1:n) {
if(y[i]>c){
y[i]<-c
cens[i]<-1
}
}
ll<-function(p){
sigma=matrix(c(p[2]^2/(1-p[3]^2), p[2]^2*p[3]/(1-p[3]^2),p[2]^2*p[3]/(1-p[3]^2),p[2]^2/(1-p[3]^2)),ncol=2,nrow=2,byrow=TRUE)
likelihood<-rep(0,n)
for(t in 2 :n){
if(cens[t]==0 & cens[t-1]==0){
likelihood[t]<-dnorm(((y[t]-(p[1]+p[4]*t)-p[3]*(y[t-1]-(p[1]+p[4]*(t-1)))/p[2]) )/p[2])
}
else if(cens[t]==0 & cens[t-1]==1){
likelihood[t]<-(1/(1-pnorm((c-(p[1]+p[4]*t)*sqrt(1-p[3]^2)/p[2]))*sqrt(1-p[3]^2)/p[2]*dnorm(((y[t]-(p[1]+p[4]*t)*sqrt(1-p[3]^2))/p[2])*(1-pnorm(((c-(p[1]+p[4]*(t))-p[3]*(y[t]-(p[1]+p[4]*(t-1)))/p[2])))))))
}
else if(cens[t]==1 & cens[t-1]==0){
likelihood[t]<-1-pnorm(((c-(p[1]+p[4]*t)-p[3]*(y[t-1]-(p[1]+p[4]*(t-1)))/p[2])))
}
else
{
likelihood[t]<-(((pmvnorm(lower=c, upper=Inf , mean=c(p[1]+p[4]*(t-1),p[1]+p[4]*t),sigma=sigma))/(1-pnorm((c-(p[1]+p[4]*(t-1))*sqrt(1-p[3]^2)/p[2])))))
}
}
f0=(sqrt(1-p[3])/p[2]*dnorm(((y[1]-p[1]-p[4])*sqrt(1-p[3]^2))/p[2]))
likelihood[1]=f0
#Ta prosthesa
if (any(likelihood==0)){
likelihood[likelihood==0] = 0.000001 #poly mikros arithmos
}
if (any(likelihood==Inf)){
likelihood[likelihood==Inf] = 1 #poly megalos h 1, an milame gia pi8anothta
}
if (any(is.nan(likelihood))){
likelihood[is.nan(likelihood)] = 0.000001
}
minusloglike=-sum(log(likelihood))
#l1=list(Minusloglike=minusloglike,Loglikelihood=log(likelihood))
return(minusloglike)
}
fit<-optim(c(0,1,0.3,0.05),ll,method="L-BFGS-B",lower=c(-Inf,0.001,-0.999,-Inf),upper = c(Inf,Inf,0.999,Inf),hessian=TRUE)
fisher.info<-solve(fit$hessian)
fisher.info
prop.sigma<-sqrt(diag(fisher.info))
sigmas<-diag(prop.sigma)
upper<-fit$par+1.96*sigmas
lower<-fit$par-1.96*sigmas
interval<-data.frame(value=fit$par, lower=diag(lower),upper=diag(upper))
interval
I run this code(it is for censored first order autogressive process with covariate , i have 4 cases for x(t) ,x(t-1) either is censored or non-censored and i dont want the likelihood to go near zero and inf).I get error
Error in if (any(likelihood == Inf)) { :
missing value where TRUE/FALSE needed
Called from: fn(par, ...)
The program is working for n=100 but when n is larger than 100 i have this error. I think this error causes bad estimattes of the four parameters(b1,rho,sigma,b0).Does anyone know what can i do?
Thank you for your help.

R S3 cat() output from function

I've got a problem with output from S3 function. I try to overload "+" function to act with two vectors like with polynomial parameters. It's my university project. Code is below:
'+.ply' <- function(a,b){
size <- max(length(a$polynomial),length(b$polynomial))
size
aAdd <- a$polynomial
bAdd <- b$polynomial
if (length(aAdd) == size) {
aAdd = aAdd
} else {
length(aAdd) <- size
}
aAdd[is.na(aAdd)] <- 0
if (length(bAdd) == size) {
bAdd = bAdd
} else {
length(bAdd) <- size
}
bAdd[is.na(bAdd)] <- 0
cat("Polynomial of degree ", paste(length(aAdd+bAdd)-1),
" with coefficients ", paste(aAdd+bAdd))
}
Code is working fine, but in return it gives me output
*Polynomial of degree 3 with coefficients 3 4 6 3NULL*
I need to use cat in order to avoid [1] index which occurs while I'm using print, paste combo. I know that there are plenty threads about this problem, but I can't find any sollution for such problem during function overloading. I will be thankful for help.

function to recursively remove outliers

I wrote a function to remove outliers resursively (for any data points 3sd away from its median.
rm.outlier <- function (var) {
has.3sd =1
while (has.3sd>0) {
for (l in var) {
if ( (l-median(var))> 3*sd(var) & !is.na(l)) {
var[var==l] <- NA
}
}
has.3sd <- sum(var > 3*sd(var))
if (has.3sd==0) {
break
}
}
return (var)
}
However, I always got the error message:
Error in if ((l - median(var)) > 3 * sd(var) & !is.na(l)) { :
missing value where TRUE/FALSE needed
I wonder why I got this error message? I spent long time trying to figure it out but couldn't. I appreciate it if anyone can help me with that. Thanks a lot.

R try catch block

I'm trying to evaluate trees for a number of output parameters, in a loop. But sometimes the tree function aborts. How can the lines be surrounded by a try catch block?
I apologize for not having "real" code, but I don't have an example of a non working tree. Here's pseddo code to illustrate the current implementation
for (icol in seq(1,ncol)) {
cName <-colnames(dt)[icol]
tdata <- dt[,unique(c(1,2,icol)),with=F]
nTrues <- sum(rowSums(tdata[,cName,with=F]))
if (nTrues>0 ) {
print(paste('processing column',icol,'of',ncol,': ',cName))
nFac <- table(tdata[,cName,with=F])
print(nFac)
treeData <- merge(tdata, maint_data)
treeData[,c('identifiers'):=NULL]
fmla <- paste(cName,'~ .')
if (TRUE) {
# Recursive Partitioning and Regression Trees
cat('Recursive Partitioning and Regression Trees (rpart)','\n')
rtree <- rpart(fmla,data=treeData) # <-- NEED TRY CATCH HERE...
print(summary(rtree))
cat('Confusion matrix for rpart')
print(table(predict(rtree), treeData[[cName]]))
}
flush.console()
} else {
print(paste('skipping column',icol,'of',ncol(ci_ratio_before_larger),': ',cName))
}
}
Here's a correction that seems to work....
tryCatch({
# Recursive Partitioning and Regression Trees
cat('Recursive Partitioning and Regression Trees (rpart)','\n')
rtree <- rpart(fmla,data=treeData)
print(summary(rtree))
cat('Confusion matrix for rpart')
print(table(predict(rtree,type='vector'), treeData[[cName]]))
},
error = function (condition) {
print("RPART_ERROR:")
print(paste(" Message:",conditionMessage(condition)))
print(paste(" Call: ",conditionCall(condition)))
}
)
I cannot really test it, but can you try replacing your
if (TRUE)
condition with this:
tryCatch({
# Recursive Partitioning and Regression Trees
cat('Recursive Partitioning and Regression Trees (rpart)','\n')
rtree <- rpart(fmla,data=treeData) # <-- NEED TRY CATCH HERE...
print(summary(rtree))
cat('Confusion matrix for rpart')
print(table(predict(rtree), treeData[[cName]]))
},
error = function (condition) {
print("RPART_ERROR:")
print(paste(" Message:",conditionMessage(condition)))
print(paste(" Call: ",conditionCall(condition)))
},
finally= function() {
}
)

viterbi decoding of matrices

Aim : global decoding of hidden sequence using matrix. The step-by-step code is below:
# equilibrium probs
equil=function(P){
e=eigen(t(P))$vectors[,1]
e/sum(e)
}
# simulate hidden sequence
hssim=function(n,lambda)
{
r=dim(lambda)[1]
states=1:r
s=vector("numeric",n)
pi.lam=equil(lambda)
s[1]=sample(states,1,FALSE,pi.lam)
for (t in 2:n) {
s[t]=sample(states,1,FALSE,prob=lambda[s[t-1],])
}
s
}
# simulate observed sequence
obssim=function(s,P)
{
n=length(s)
r=dim(P)[2]
q=dim(P)[1]
states=1:q
obs=vector("numeric",n)
for (t in 1:n) {
obs[t]=sample(states,1,FALSE,prob=P[,s[t]])
}
obs
}
lambda=rbind(c(0.999,0.0002,0.0003,0.0004,0.0001),c(0.001,0.9930,0.010,0.004,0.002),c(0.0004,0.0020,0.9900,0.0075,0.0001),c(0.0007,0.0030,0.0003,0.9940,0.0020),c(0.0010,0.0005,0.0040,0.0020,0.9925))
s=hssim(10000,lambda)
P=cbind(c(0.6,0.1,0.1,0.2),c(0.25,0.3,0.2,0.25),c(0.1,0.6,0.2,0.1),c(0.25,0.2,0.4,0.1),c(0.5,0.2,0.2,0.1))
obs=obssim(s,P)
# optional - converting to/from another alphabet...
letters=c("a","c","g","t")
numbers=1:4
convert=function(x,frm,to)
{
to[match(x,frm)]
}
obslets=convert(obs,numbers,letters)
# estimate emmision probs from observed and hidden sequence
Pest=function(s,obs,r,q)
{
est=matrix(0,nrow=q,ncol=r)
for (i in 1:r) {
est[,i]=table(obs[s==i])
est[,i]=est[,i]/sum(est[,i])
}
est
}
phat=Pest(s,obs,5,4)
# estimate lambda from hidden sequence
lambdaest=function(s,r)
{
n=length(s)
est=matrix(0,ncol=r,nrow=r)
for (t in 2:n) {
est[s[t-1],s[t]]=est[s[t-1],s[t]]+1
}
for (i in 1:r) {
est[i,]=est[i,]/sum(est[i,])
}
est
}
lamhat=lambdaest(s,5)
# global decoding algorithm
global=function(obs,lambda,P)
{
r=dim(lambda)[1]
q=dim(P)[1]
n=length(obs)
s=vector("numeric",n)
f=matrix(0,nrow=r,ncol=n)
# forwards
f0=equil(lambda)
f[,1]=P[obs[1],]*(f0%*%lambda)
for (i in 2:n) {
for (k in 1:r){
f[k,i]=P[obs[i],k]*max(f[,i-1]*lambda[,k])
}
f[,i]=f[,i]/sum(f[,i])
}
# backwards
s[n]=which.max(f[,n])
for (i in (n-1):1) {
s[i]=which.max(lambda[,s[i+1]]*f[,i])
}
s
}
globest=global(obs,lambda,P)
I created function to solve viterbi decoding of a hidden sequence and showed error,and so i couldnt plot the graph identify the difference.Can anyone fix this for me.
The problem is that function max does not work with complex data like your f matrix.
I suggest taking the module before applying max. Example:
x = c(1 + 1i, 2)
# What you are doing
max(x)
# >> Error in max(x) : invalid 'type' (complex) of argument
# Suggestion
max(Mod(x))
# >> 2

Resources