I was doing maximum likelihood estimation using optim() and it was quite easy. It's a generalized logistic distribution with 4 parameters and a couple of restrictions, all listed in the likelihood function:
genlogis.loglikelihood <- function(param = c(sqrt(2/pi),0.5, 2, 0), x){
if(length(param) < 3 | length(param) > 4 ){
stop('Incorrect number of parameters: param = c(a,b,p,location)')
}
if(length(param) == 3){
#warning('Location parameter is set to 0')
location = 0
}
if(length(param) == 4){
location = param[4]
}
a = param[1]
b = param[2]
p = param[3]
if(!missing(a)){
if(a < 0){
stop('The argument "a" must be positive.')
}
}
if(!missing(b)){
if(b < 0){
stop('The argument "b" must be positive.')
}
}
if(!missing(p)){
if(p < 0){
stop('The argument "p" must be positive.')
}
}
if(p == 0 && b > 0 && a > 0){
stop('If "p" equals to 0, "b" or "a" must be
0 otherwise there is identifiability problem.')
}
if(b == 0 && a == 0){
stop('The distribution is not defined for "a"
and "b" equal to 0 simultaneously.')
}
z <- sum(log((a+b*(1+p)*abs((x-location))^p ) * exp(-((x-location)*(a+b*abs((x-location))^p))))) -
sum(2*log(exp(-((x-location)*(a+b*abs((x-location))^p))) + 1))
if(!is.finite(z)){
z <- 1e+20
}
return(-z)
}
I made it's likelihood function and worked flawessly this way:
opt <- function(parameters, data){
optim(par = parameters, fn = genlogis.loglikelihood, x=data,
lower = c(0.00001,0.00001,0.00001, -Inf),
upper = c(Inf,Inf,Inf,Inf), method = 'L-BFGS-B')
}
opt(c(0.3, 1.01, 2.11, 3.5), faithful$eruptions)
Since this function does the gradient numerically I had not much problem.
Then I wanted to change to constrOptim() because the boundaries are actually 0 and not a small number on the first 3 parameters. But, the problem I face is that the argument grad has to be specified and I can't derive that function to give a gradient function, so I have to do it numerically as in optim(), it works if I put grad = NULL but I don't want Nelder-Mead method but BFGS.
I've tried this way but not of much sucess:
opt2 <- function(initial, data){
ui <- rbind(c(1, 0, 0, 0), c(0,1,0,0), c(0,0,1,0))
ci <- c(0,0,0)
constrOptim(theta = initial, f = genlogis.loglikelihood(param, x),
grad = numDeriv::grad(func = function(x, param) genlogis.loglikelihood(param, x), param = theta, x = data)
, x = data, ui = ui, ci = ci)
}
Your notation is a bit complicated, maybe that confused you.
opt2 <- function(parameters, data){
fn = function(p) genlogis.loglikelihood(p, x = data)
gr = function(p) numDeriv::grad(fn, p)
ui <- rbind(c(1, 0, 0, 0), c(0,1,0,0), c(0,0,1,0))
ci <- c(0,0,0)
constrOptim(theta = parameters, f = fn, grad = gr,
ui = ui, ci = ci, method="BFGS")
}
opt2(c(0.3, 1.01, 2.11, 3.5), faithful$eruptions)
Related
w1 <- 1000
w2 <- 600
a <- c(w1,w2)
fun1 <- function(a){
return(exp(1)^1.5*a[1]^0.2*a[2]^(1-0.2))
}
#constraint - a[1]+a[2]=10000
Answer1 <- constrOptim(c(w1,w2), fun1, NULL,
ui = c(1,1), ci = c(10000),
control = list(fnscale = -1))
Trying to optimize the function
However, getting an error
"Error in constrOptim(c(1, 1), fun1, NULL, ui = c(1, 1), ci = c(10000), :
initial value is not in the interior of the feasible region"
What could be the issue?
You can consider the following approach :
library(DEoptim)
fun1 <- function(a1)
{
a2 <- 10000 - a1
val <- exp(1) ^ 1.5 * a1 ^ 0.2 * a2 ^ 0.8
if(is.na(val) | is.nan(val))
{
return(10 ^ 30)
}else
{
return(-val)
}
}
obj_DEoptim <- DEoptim(fn = fun1, lower = 0, upper = 10000)
obj_DEoptim$optim$bestmem
Note that you have an "equality" constraint consisting of only two variables, which actually can be denoted by x and 10000-x respectively. In this case, optim is sufficient for your purpose, e.g.,
f <- function(x) {
return(exp(1.5) * x^0.2 * (10000 - x)^(1 - 0.2))
}
optim(1000,
f,
method = "Brent",
lower = 0,
upper = 10000,
control = list(fnscale = -1)
)
and you will obtain
$par
[1] 2000
$value
[1] 27171.88
$counts
function gradient
NA NA
$convergence
[1] 0
$message
NULL
By the way, you can visualize the cost function f using curve(f, 0, 10000), which shows
The code for the Holt-Winters function in R contains the following if clause:
if (!is.null(gamma) && is.logical(gamma) && !gamma)
Obviously the first means "if NOT gamma is Null". I am a bit confused by the meaning of the third - it looks like this reads, "if NOT gamma", but there is nothing following like an equals sign or an is.null, etc.
Forgive me if this is a somewhat basic question, but I am very new to R.
Full code:
{
x <- as.ts(x)
seasonal <- match.arg(seasonal)
f <- frequency(x)
if (!is.null(alpha) && (alpha == 0))
stop("cannot fit models without level ('alpha' must not be 0 or FALSE)")
if (!all(is.null(c(alpha, beta, gamma))) && any(c(alpha,
beta, gamma) < 0 || c(alpha, beta, gamma) > 1))
stop("'alpha', 'beta' and 'gamma' must be within the unit interval")
if ((is.null(gamma) || gamma > 0)) {
if (seasonal == "multiplicative" && any(x == 0))
stop("data must be non-zero for multiplicative Holt-Winters")
if (start.periods < 2)
stop("need at least 2 periods to compute seasonal start values")
}
if (!is.null(gamma) && is.logical(gamma) && !gamma) {
expsmooth <- !is.null(beta) && is.logical(beta) && !beta
if (is.null(l.start))
l.start <- if (expsmooth)
x[1L]
else x[2L]
if (is.null(b.start))
if (is.null(beta) || !is.logical(beta) || beta)
b.start <- x[2L] - x[1L]
start.time <- 3 - expsmooth
s.start <- 0
}
else {
start.time <- f + 1
wind <- start.periods * f
st <- decompose(ts(x[1L:wind], start = start(x), frequency = f),
seasonal)
if (is.null(l.start) || is.null(b.start)) {
dat <- na.omit(st$trend)
cf <- coef(.lm.fit(x = cbind(1, seq_along(dat)),
y = dat))
if (is.null(l.start))
l.start <- cf[1L]
if (is.null(b.start))
b.start <- cf[2L]
}
if (is.null(s.start))
s.start <- st$figure
}
lenx <- as.integer(length(x))
if (is.na(lenx))
stop("invalid length(x)")
len <- lenx - start.time + 1
hw <- function(alpha, beta, gamma) .C(C_HoltWinters, as.double(x),
lenx, as.double(max(min(alpha, 1), 0)), as.double(max(min(beta,
1), 0)), as.double(max(min(gamma, 1), 0)), as.integer(start.time),
as.integer(!+(seasonal == "multiplicative")), as.integer(f),
as.integer(!is.logical(beta) || beta), as.integer(!is.logical(gamma) ||
gamma), a = as.double(l.start), b = as.double(b.start),
s = as.double(s.start), SSE = as.double(0), level = double(len +
1L), trend = double(len + 1L), seasonal = double(len +
f))
if (is.null(gamma)) {
if (is.null(alpha)) {
if (is.null(beta)) {
error <- function(p) hw(p[1L], p[2L], p[3L])$SSE
sol <- optim(optim.start, error, method = "L-BFGS-B",
lower = c(0, 0, 0), upper = c(1, 1, 1), control = optim.control)
if (sol$convergence || any(sol$par < 0 | sol$par >
1)) {
if (sol$convergence > 50) {
warning(gettextf("optimization difficulties: %s",
sol$message), domain = NA)
}
else stop("optimization failure")
}
alpha <- sol$par[1L]
beta <- sol$par[2L]
gamma <- sol$par[3L]
}
else {
error <- function(p) hw(p[1L], beta, p[2L])$SSE
sol <- optim(c(optim.start["alpha"], optim.start["gamma"]),
error, method = "L-BFGS-B", lower = c(0, 0),
upper = c(1, 1), control = optim.control)
if (sol$convergence || any(sol$par < 0 | sol$par >
1)) {
if (sol$convergence > 50) {
warning(gettextf("optimization difficulties: %s",
sol$message), domain = NA)
}
else stop("optimization failure")
}
alpha <- sol$par[1L]
gamma <- sol$par[2L]
}
}
else {
if (is.null(beta)) {
error <- function(p) hw(alpha, p[1L], p[2L])$SSE
sol <- optim(c(optim.start["beta"], optim.start["gamma"]),
error, method = "L-BFGS-B", lower = c(0, 0),
upper = c(1, 1), control = optim.control)
if (sol$convergence || any(sol$par < 0 | sol$par >
1)) {
if (sol$convergence > 50) {
warning(gettextf("optimization difficulties: %s",
sol$message), domain = NA)
}
else stop("optimization failure")
}
beta <- sol$par[1L]
gamma <- sol$par[2L]
}
else {
error <- function(p) hw(alpha, beta, p)$SSE
gamma <- optimize(error, lower = 0, upper = 1)$minimum
}
}
}
else {
if (is.null(alpha)) {
if (is.null(beta)) {
error <- function(p) hw(p[1L], p[2L], gamma)$SSE
sol <- optim(c(optim.start["alpha"], optim.start["beta"]),
error, method = "L-BFGS-B", lower = c(0, 0),
upper = c(1, 1), control = optim.control)
if (sol$convergence || any(sol$par < 0 | sol$par >
1)) {
if (sol$convergence > 50) {
warning(gettextf("optimization difficulties: %s",
sol$message), domain = NA)
}
else stop("optimization failure")
}
alpha <- sol$par[1L]
beta <- sol$par[2L]
}
else {
error <- function(p) hw(p, beta, gamma)$SSE
alpha <- optimize(error, lower = 0, upper = 1)$minimum
}
}
else {
if (is.null(beta)) {
error <- function(p) hw(alpha, p, gamma)$SSE
beta <- optimize(error, lower = 0, upper = 1)$minimum
}
}
}
final.fit <- hw(alpha, beta, gamma)
fitted <- ts(cbind(xhat = final.fit$level[-len - 1], level = final.fit$level[-len -
1], trend = if (!is.logical(beta) || beta)
final.fit$trend[-len - 1], season = if (!is.logical(gamma) ||
gamma)
final.fit$seasonal[1L:len]), start = start(lag(x, k = 1 -
start.time)), frequency = frequency(x))
if (!is.logical(beta) || beta)
fitted[, 1] <- fitted[, 1] + fitted[, "trend"]
if (!is.logical(gamma) || gamma)
fitted[, 1] <- if (seasonal == "multiplicative")
fitted[, 1] * fitted[, "season"]
else fitted[, 1] + fitted[, "season"]
structure(list(fitted = fitted, x = x, alpha = alpha, beta = beta,
gamma = gamma, coefficients = c(a = final.fit$level[len +
1], b = if (!is.logical(beta) || beta) final.fit$trend[len +
1], s = if (!is.logical(gamma) || gamma) final.fit$seasonal[len +
1L:f]), seasonal = seasonal, SSE = final.fit$SSE,
call = match.call()), class = "HoltWinters")
}
edit: I was confused about the context.
! is the logical-NOT operator in R.
As pointed out in the comments, R often allows users to pass arguments of different types. In this case ?HoltWinters says
gamma: gamma parameter used for the seasonal component. If set to
‘FALSE’, an non-seasonal model is fitted.
So gamma can be either a numeric value or a logical (FALSE) value.
Since this !gamma follows is.logical(gamma) && ..., it will only be evaluated if gamma is a logical (TRUE/FALSE) value. In this case, !gamma is equivalent to gamma==FALSE, but most programmers would shorten this to !gamma (so that FALSE becomes TRUE and TRUE becomes FALSE).
We wouldn't want to test gamma=FALSE without the is.logical() test first, because someone might have specified gamma=0, in which case R would evaluate 0==FALSE, which according to its coercion rules is TRUE.
This test could also have been written if (identical(gamma,FALSE)) - which would correctly evaluate both NULL and 0 as different from FALSE.
In contrast, if gamma were to be numeric, !gamma would be shorthand for gamma != 0.
According to R's rules for coercion from floating-point to logical, 0 gets converted to FALSE and any non-zero, non-NA value gets converted to TRUE (see this question for more detail).
Thus !gamma is equivalent to gamma!=0. Some old-school programmers use this for brevity; I don't think the brevity-clarity tradeoff is worth it, but that's just my opinion.
I was wondering how one extracts the estimated parameters stored in an R object of class mle-class.
Here is an example:
x <- matrix(rnorm(300), ncol = 3)
x[x > 1] <- 1
require(tmvtnorm)
fit1 <- mle.tmvnorm(X = x, lower = rep(-Inf, 3), upper = rep(1, 3))
Now, fit1 is an object of class:
class(fit1)
[1] "mle"
attr(,"package")
[1] "stats4
"
fit1 itself gives me:
fit1
Call:
mle(minuslogl = function (mu_1 = 0, mu_2 = 0, mu_3 = 0, sigma_1.1 = 1,
sigma_1.2 = 0, sigma_1.3 = 0, sigma_2.2 = 1, sigma_2.3 = 0,
sigma_3.3 = 1)
{
nf <- names(formals())
theta <- sapply(nf, function(x) {
eval(parse(text = x))
})
mean <- theta[1:n]
if (cholesky) {
L <- inv_vech(theta[-(1:n)])
L[lower.tri(L, diag = FALSE)] <- 0
sigma <- t(L) %*% L
}
else {
sigma <- inv_vech(theta[-(1:n)])
}
if (det(sigma) <= 0 || any(diag(sigma) < 0)) {
return(.Machine$integer.max)
}
f <- -(sum(dmvnorm(X, mean, sigma, log = TRUE)) - nrow(X) *
log(pmvnorm(lower = lower, upper = upper, mean = mean,
sigma = sigma)))
if (is.infinite(f) || is.na(f)) {
return(.Machine$integer.max)
}
f
}, start = as.list(c(0, 0, 0, 1, 0, 0, 1, 0, 1)), method = "BFGS",
fixed = list())
Coefficients:
mu_1 mu_2 mu_3 sigma_1.1 sigma_1.2 sigma_1.3
0.64218198 1.51720543 0.97047201 1.73395947 -0.03889188 0.14627774
sigma_2.2 sigma_2.3 sigma_3.3
2.18020597 0.38822509 1.49854600
My question is: how do I extract these coefficients from the object fit1?
Thanks again for your time, and for your help in answering this question!
coef is a generic function which extracts model coefficients from objects returned by modeling functions. coefficients is an alias for it.
Usage
coef(object, ...)
coefficients(object, ...)
So, fit1#coef should work.
https://stat.ethz.ch/R-manual/R-devel/library/stats/html/coef.html
Sorry for this silly question: I will keep it just in case someone ends up looking.
fit1#coef
mu_1 mu_2 mu_3 sigma_1.1 sigma_1.2 sigma_1.3
0.64218198 1.51720543 0.97047201 1.73395947 -0.03889188 0.14627774
sigma_2.2 sigma_2.3 sigma_3.3
2.18020597 0.38822509 1.49854600
solves the query. Duh!
I'm trying to generate a random sample from the truncated GPD (Generalized Pareto Distribution). In order to do just that, I've begun with writing the CDF and quantile functions of the GPD:
##CDF of the GPD
pGPD <- function(q, xi = 1, mu = 0, beta = 1, lower.tail = TRUE){
shape = xi
location = mu
scale = beta
# Probability:
p = .pepd(q, location, scale, shape, lower.tail)
# Return Value:
p
}
##Quantile function (inverse of CDF) of GPD
qGPD <- function(p, xi = 1, mu = 0, beta = 1, lower.tail = TRUE){
shape = xi
location = mu
scale = beta
# Quantiles:
q = .qepd(p, location, scale, shape, lower.tail)
# Return Value:
q
}
##Generate random numbers of GPD-distribution
rGPD <- function(n, xi = 1, mu = 0, beta = 1){
shape = xi
location = mu
scale = beta
# Random Variates:
r = .repd(n, location, scale, shape)
# Return Value:
r
}
.pepd <- function(q, location = 0, scale = 1, shape = 0, lower.tail = TRUE) {
# Check:
stopifnot(min(scale) > 0)
stopifnot(length(shape) == 1)
# Probability:
q <- pmax(q - location, 0)/scale
if (shape == 0)
p <- 1 - exp(-q)
else {
p <- pmax(1 + shape * q, 0)
p <- 1 - p^(-1/shape)
}
# Lower Tail:
if (!lower.tail)
p <- 1 - p
# Return Value:
p
}
.qepd <-function(p, location = 0, scale = 1, shape = 0, lower.tail = TRUE){
# Check:
stopifnot(min(scale) > 0)
stopifnot(length(shape) == 1)
stopifnot(min(p, na.rm = TRUE) >= 0)
stopifnot(max(p, na.rm = TRUE) <= 1)
# Lower Tail:
if (lower.tail)
p <- 1 - p
# Quantiles:
if (shape == 0) {
q = location - scale * log(p)
} else {
q = location + scale * (p^(-shape) - 1)/shape
}
# Return Value:
q
}
.repd <-
function(n, location = 0, scale = 1, shape = 0) {
# Check:
stopifnot(min(scale) > 0)
stopifnot(length(shape) == 1)
# Random Variates:
if (shape == 0) {
r = location + scale * rexp(n)
} else {
r = location + scale * (runif(n)^(-shape) - 1)/shape
}
# Return Value:
r
}
This all works perfectly. Now, I want to generate numbers from the Truncated GPD and to do that, I've used the following relation:
where Q resembles the quantile functions of the subscript and F_{GPD}(T) is the CDF of the GPD. Using this, I've written the following code:
##Quantiles truncated GPD
qtGPD <- function (p,q,xi=1,mu=0,beta=1,lower.tail=TRUE){
ans= qGPD(p*pGPD(q,xi,mu,beta,lower.tail),
xi,mu,beta, lower.tail)
print(paste0("Generated from the ", 100*p, "th% quantile"))
return (ans)
}
rtGPD <- function (n,q,xi=1,mu=0,beta=1,lower.tail=TRUE){
qtGPD(p= runif(n),q,xi,mu,beta,lower.tail)
}
But now, if I want for example to generate numbers from the 99th% quantile truncated GPD with the function rtGPD it doesn't work, because my p value keeps changing. So, what am I doing wrong or how can I fix this? All I want is to generate numbers from the truncated GPD at the 99th% quantile for example, or at the 97.5% quantile or... you get the idea.
Thanks in advance!
EDIT: For example, if you run the following code:
set.seed(10)
A= rGPD(10)
sort(A)
qtGPD(0.99,2)
rtGPD(10,2)
Then you should normally get a vector A, with random values from the GPD which can be bigger than 1, like expected.
With the command qtGPD(O.99,2), one obtains
[1] "Generated from the 99th% quantile"
[1] 1.941176
which is also OK. But if you then run rtGPD(10,2), a function that I want to give me random values for the truncated GPD, you get different values for p in runif(10), all generated from different quantiles. I just want to generate/simulate random numbers for the truncated GPD at a certain quantile, for example the 99% quantile. But this code isn't letting me do that.
binomial <- function(nmax = 100,
thr = 0.95,
alpha = 1,
beta = 1,
p_true = 0.5,
p_0 = 0.5){
for(j in seq.int(nmax, 0)){
if(pbeta(q = p_0, shape1 = alpha + j, shape2 = beta + nmax - j, lower.tail = FALSE) < thr){
targetatnmax <- j + 1
} else {
print(
break
}
}
result <- list(Success = Success, targeratnmax = targetatnmax)
return(result)
}
res = binomial(nmax,thr,alpha,beta,p_true,p_0)
res
In my program I am trying to find the number of successes needed to exceed 0.95 thr. I am trying to use a for loop with if else statements but when I run it I don't get the value I need. I know my value should be 59 but I cannot seem to get this. I know the code seems really messy but its only because I have been playing around with it for hours. PLEASE ANY HELP
Here is your code after clean-up:
binomial <- function(nmax = 100,
thr = 0.95,
alpha = 1,
beta = 1,
p_true = 0.5,
p_0 = 0.5){
targetatnmax <- 0
for(j in seq.int(0,nmax)){
if(pbeta(q = p_0, shape1 = alpha + j, shape2 = beta + nmax - j, lower.tail = FALSE) < thr){
targetatnmax <- j + 1
} else {
break
}
}
result <- list(targeratnmax = targetatnmax)
return(result)
}
res = binomial()
res
#$targeratnmax
#[1] 59
The main problem (other than the syntax errors and not existent objects) was that your loop ran from nmax to 0 instead of the other way arround.
There is probably potential for optimization, but my understanding of the statistics is not good enough to really tackle that.