I want to determine which variables are best suited for the model. To do this, I use the method of all possible regressions, i.e. build models with all possible combinations of predictors. For example:
library(fpp2)
# uschange is dataset from "fpp2" package
train <- ts(uschange[1:180, ], start = 1970, frequency = 5)
fit1 <- tslm(Consumption ~ Income, data = train)
fit2 <- tslm(Consumption ~ Production, data = train)
fit3 <- tslm(Consumption ~ Savings, data = train)
fit4 <- tslm(Consumption ~ Unemployment, data = train)
fit5 <- tslm(Consumption ~ Income + Production, data = train)
fit6 <- tslm(Consumption ~ Income + Savings, data = train)
# and so on...
After that, I need to evaluate the models in two ways:
test <- data.frame(
Income = uschange[181:187, 2],
Production = uschange[181:187, 3],
Savings = uschange[181:187, 4],
Unemployment = uschange[181:187, 5]
)
y <- uschange[181:187, 1]
CV(fit1)
accuracy(forecast(fit1, test), y)
CV(fit2)
accuracy(forecast(fit2, test), y)
CV(fit3)
accuracy(forecast(fit3, test), y)
# and so on...
As a result, I want to get a model with the smallest value of AICc from CV() and with the smallest error value (for example MAE from accuracy()).
How can I do this automatically?
EDIT:
> dput(head(uschange, 20))
structure(c(0.615986218, 0.46037569, 0.876791423, -0.274245141,
1.897370758, 0.911992909, 0.794538845, 1.648587467, 1.313722178,
1.891474954, 1.530714, 2.318294715, 1.81073916, -0.041739961,
0.354235565, -0.291632155, -0.877027936, 0.351135548, 0.409597702,
-1.475808634, 0.972261043, 1.169084717, 1.55327055, -0.255272381,
1.987153628, 1.447334175, 0.531811929, 1.160125137, 0.457011505,
1.016624409, 1.904101264, 3.890258661, 0.708252663, 0.79430954,
0.433818275, 1.093809792, -1.661684821, -0.938353209, 0.094487794,
-0.122595985, -2.452700312, -0.551525087, -0.358707862, -2.185454855,
1.90973412, 0.901535843, 0.308019416, 2.291304415, 4.149573867,
1.89062398, 1.273352897, 3.436892066, 2.799076357, 0.817688618,
0.868996932, 1.472961869, -0.882483578, 0.074279194, -0.41314971,
-4.064118932, 4.810311502, 7.287992337, 7.289013063, 0.985229644,
3.657770614, 6.051341804, -0.445832214, -1.53087186, -4.35859438,
-5.054525795, 5.809959038, 16.04471706, -5.348868495, 8.426034362,
2.758795652, 11.14642986, -2.533514487, -6.592644641, 0.51717884,
11.3433954, 0.9, 0.5, 0.5, 0.7, -0.1, -0.1, 0.1, 0, -0.2, -0.1,
-0.2, -0.3, -0.3, 0, -0.1, 0.1, 0.2, 0.3, 0.5, 1.3), .Dim = c(20L,
5L), .Dimnames = list(NULL, c("Consumption", "Income", "Production",
"Savings", "Unemployment")), .Tsp = c(1970, 1974.75, 4), class = c("mts",
"ts", "matrix"))
Try this:
# get all names of predictors
cols <- colnames(uschange)[-1]
# create all combinations
out <- unlist(lapply(1:length(cols), function(n) combn(cols, n, FUN=function(row) paste0("Consumption ~ ", paste0(row, collapse = "+")))))
# fit models:
mods = lapply(out, function(frml) tslm(frml, data=train))
# define helper function:
cv_this <- function(x){
return(list('cv' = CV(x), 'acc' = accuracy(forecast(x, test), y)))
}
# run helper function over all models to get evaluations out:
lapply(mods, cv_this)
Related
I have a training df with 2 columns like
a b
1 1000 20
2 1008 13
...
n ... ...
Now, as I am required to find a 95% CI for the estimate of 'b' based on a specific 'a' value, with a 'k' value of my choice and compare the CI result to other specific value of 'k's. My question is how can I perform bootstrap for this with 1000 bootstrap reps as I am required to use a fitted knn model for the training data with kernel = 'gaussian' and k can only be in range 1-20 ?
I have found that the best k for this model is k = 5, and had a go for bootstrap but it doesn't work
library(kknn)
library(boot)
boot.kn = function(formula, data, indices)
{
# Create a bootstrapped version
d = data[indices,]
# Fit a model for bs
fit.kn = fitted(train.kknn(formula,data, kernel= "gaussian", ks = 5))
# Do I even need this complicated block
target = as.character(fit.kn$terms[[2]])
rv = my.pred.stats(fit.kn, d[,target])
return(rv)
}
bs = boot(data=df, statistic=boot.kn, R=1000, formula=b ~ a)
boot.ci(bs,conf=0.95,type="bca")
Please inform me for more info if I'm not clear enough. Thank you.
Here is a way to regress b on a with the k-nearest neighbors algorithm.
First, a data set. This is a subset of the iris data set, keeping the first two columns. One row is removed to later be the new data.
i <- which(iris$Sepal.Length == 5.3)
df1 <- iris[-i, 1:2]
newdata <- iris[i, 1:2]
names(df1) <- c("a", "b")
names(newdata) <- c("a", "b")
Now load the packages to be used and determine the optimal value for k with package kknn.
library(caret)
library(kknn)
library(boot)
fit <- kknn::train.kknn(
formula = b ~ a,
data = df1,
kmax = 15,
kernel = "gaussian",
distance = 1
)
k <- fit$best.parameters$k
k
#[1] 9
And bootstrap predictions for the new point a <- 5.3.
boot.kn <- function(data, indices, formula, newdata, k){
d <- data[indices, ]
fit <- knnreg(formula, data = d)
predict(fit, newdata = newdata)
}
set.seed(2021)
R <- 1e4
bs <- boot(df1, boot.kn, R = R, formula = b ~ a, newdata = newdata, k = k)
ci <- boot.ci(bs, level = 0.95, type = "bca")
ci
#BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
#Based on 10000 bootstrap replicates
#
#CALL :
#boot.ci(boot.out = bs, type = "bca", level = 0.95)
#
#Intervals :
#Level BCa
#95% ( 3.177, 3.740 )
#Calculations and Intervals on Original Scale
Plot the results.
old_par <- par(mfrow = c(2, 1),
oma = c(5, 4, 0, 0) + 0.1,
mar = c(1, 1, 1, 1) + 0.1)
hist(bs$t, main = "Histogram of bootstrap values")
abline(v = 3.7, col = "red")
abline(v = mean(bs$t), col = "blue")
abline(v = ci$bca[4:5], col = "blue", lty = "dashed")
plot(b ~ a, df1)
points(5.3, 3.7, col = "red", pch = 19)
points(5.3, mean(bs$t), col = "blue", pch = 19)
arrows(x0 = 5.3, y0 = ci$bca[4],
x1 = 5.3, y1 = ci$bca[5],
col = "blue", angle = 90, code = 3)
par(old_par)
I am drawing a nomogram based on the dataset PimaIndiansDiabetes using the package rms:
library(mlbench)
data(PimaIndiansDiabetes)
library(rms)
ddist <- datadist(PimaIndiansDiabetes)
options(datadist='ddist')
lrm_model <- lrm (diabetes ~ .,
data = PimaIndiansDiabetes,
x = TRUE, y = TRUE)
nom <- nomogram(lrm_model,
fun=plogis,
fun.at=c(0.01, 0.05, seq(0.1, 0.9, by=0.2), 0.95, 0.99),
abbrev = TRUE,
lp=F,
vnames = "labels",
varname.label=TRUE,
funlabel = "Diabetes")
plot(nom,
fun.side=c(1,3,1,3,1,3,1,3,1),
label.every=1,
lmgp = 0.15,
xfrac=.4
)
Below is the plot:
How do I make the axis names italic? (I actually prefer if only the 8 predictors' names are italic)
I am trying to use a Gaussian Process Regression (GPR) model to predict hourly streamflow discharges in a river. I've got good results applying the caret::kernlab train () function (thanks Kuhn!).
Since the uncertainty idea is one of the main inherent ones advantages of the GPR, I would like to know if anyone could help me to access the results related to the prediction inteval of the test dataset.
I'll put an extract of the code I've been working. Since my real data are huge (and sincerely, I don't know how to put it here), I'll example with the data(airquality). The main goal in this particular example is to predict airquality$Ozone, using as predictos the lag-variables of airquality$Temperature.
rm(list = ls())
data(airquality)
airquality = na.omit(as.data.frame(airquality)); str(airquality)
library(tidyverse)
library(magrittr)
airquality$Ozone %>% plot(type = 'l')
lines(airquality$Temp, col = 2)
legend("topleft", legend = c("Ozone", "Temperature"),
col=c(1, 2), lty = 1:1, cex = 0.7, text.font = 4, inset = 0.01,
box.lty=0, lwd = 1)
attach(airquality)
df_lags <- airquality %>%
mutate(Temp_lag1 = lag(n = 1L, Temp)) %>%
na.omit()
ESM_train = data.frame(df_lags[1:81, ]) # Training Observed 75% dataset
ESM_test = data.frame(df_lags[82:nrow(df_lags), ]) # Testing Observed 25% dataset
grid_gaussprRadial = expand.grid(.sigma = c(0.001, 0.01, 0.05, 0.1, 0.5, 1, 2)) # Sigma parameters searching for GPR
# TRAIN MODEL ############################
# Tuning set
library(caret)
set.seed(111)
cvCtrl <- trainControl(
method ="repeatedcv",
repeats = 1,
number = 20,
allowParallel = TRUE,
verboseIter = TRUE,
savePredictions = "final")
# Train (aprox. 4 seconds time-simulation)
attach(ESM_train)
set.seed(111)
system.time(Model_train <- caret::train(Ozone ~ Temp + Temp_lag1,
trControl = cvCtrl,
data = ESM_train,
metric = "MAE", # Using MAE since I intend minimum values are my focus
preProcess = c("center", "scale"),
method = "gaussprRadial", # Setting RBF kernel function
tuneGrid = grid_gaussprRadial,
maxit = 1000,
linout = 1)) # Regression type
plot(Model_train)
Model_train
ESM_results_train <- Model_train$resample %>% mutate(Model = "") # K-fold Training measures
# Select the interested TRAIN data and arrange them as dataframe
Ozone_Obs_Tr = Model_train$pred$obs
Ozone_sim = Model_train$pred$pred
Resid = Ozone_Obs_Tr - Ozone_sim
train_results = data.frame(Ozone_Obs_Tr,
Ozone_sim,
Resid)
# Plot Obs x Simulated train results
library(ggplot2)
ggplot(data = train_results, aes(x = Ozone_Obs_Tr, y = Ozone_sim)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, color = "black")
# TEST MODEL ############################
# From "ESM_test" dataframe, we predict ESM Ozone time series, adding it in "ESM_forecasted" dataframe
ESM_forecasted = ESM_test %>%
mutate(Ozone_Pred = predict(Model_train, newdata = ESM_test, variance.model = TRUE))
str(ESM_forecasted)
# Select the interested TEST data and arrange them as a dataframe
Ozone_Obs = ESM_forecasted$Ozone
Ozone_Pred = ESM_forecasted$Ozone_Pred
# Plot Obs x Predicted TEST results
ggplot(data = ESM_forecasted, aes(x = Ozone_Obs, y = Ozone_Pred)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, color = "black")
# Model performance #####
library(hydroGOF)
gof_TR = gof(Ozone_sim, Ozone_Obs_Tr)
gof_TEST = gof(Ozone_Pred,Ozone_Obs)
Performances = data.frame(
Train = gof_TR,
Test = gof_TEST
); Performances
# Plot the TEST prediction
attach(ESM_forecasted)
plot(Ozone_Obs, type = "l", xlab = "", ylab = "", ylim = range(Ozone_Obs, Ozone_Pred))
lines(Ozone_Pred , col = "coral2", lty = 2, lwd = 2)
legend("top", legend = c("Ozone Obs Test", "Ozone Pred Test"),
col=c(1, "coral2"), lty = 1:2, cex = 0.7, text.font = 4, inset = 0.01, box.lty=0, lwd = 2)
These last lines generate the following plot:
The next, and last, step would be to extract the prediction intervals, which is based on a gaussian distribution around each prediction point, to plot it together with this last plot.
The caret::kernlab train() appliance returned better prediction than, for instance, just kernlab::gaussprRadial(), or even tgp::bgp() packages. For both of them I could find the prediction interval.
For example, to pick up the prediction intervals via tgp::bgp(), it could be done typing:
Upper_Bound <- Ozone_Pred$ZZ.q2 #Ozone_Pred - 2 * sigma^2
Lower_Bound <- Ozone_Pred$ZZ.q1 #Ozone_Pred + 2 * sigma^2
Therefore, via caret::kernlab train(), I hope the required standard deviations could be found typing something as
Model_train$...
or maybe, with
Ozone_Pred$...
Moreover, at link: https://stats.stackexchange.com/questions/414079/can-mad-median-absolute-deviation-or-mae-mean-absolute-error-be-used-to-calc,
Stephan Kolassa author explained that we could estimate the prediction intervals through MAE, or even RMSE. But I didn't understand if this is my point, since the MAE I got is just the comparison between Obs x Predicted Ozone data, in this example.
Please, this solution is very important to me! I think I am near to obtain my main results, but I don't know anymore how to try.
Thanks a lot, friends!
I don't really know how the caret framework works, but getting a prediction interval for a GP regression with a Gaussian likelihood is easy enough to do manually.
First we just need a function for the squared exponential kernel, also called the radial basis function kernel, which is what you were using. sf here is the scale factor (unused in the kernlab implementation), and ell is the length scale, called sigma in the kernlab implementation:
covSEiso <- function(x1, x2 = x1, sf = 1.0, ell = 1.0) {
sf <- sf^2
ell <- -0.5 * (1 / (ell^2))
n <- nrow(x1)
m <- nrow(x2)
d <- ncol(x1)
result <- matrix(0, nrow = n, ncol = m)
for ( j in 1:m ) {
for ( i in 1:n ) {
result[i, j] <- sf * exp(ell * sum((x1[i, ] - x2[j, ])^2))
}
}
return(result)
}
I'm not sure what your code says about which length scale to use; below I will use a length scale of 25 and scale factor of 50 (obtained via GPML's hyperparameter optimization routines). Then we use the covSEiso() function above to get the relevant covariances, and the rest is application of basic Gaussian identities. I would refer you to Chapter 2 of Rasmussen and Williams (2006) (graciously provided for free online).
data(airquality)
library(tidyverse)
library(magrittr)
df_lags <- airquality %>%
mutate(Temp_lag1 = lag(n = 1L, Temp)) %>%
na.omit()
ESM_train <- data.frame(df_lags[1:81, ]) # Training Data 75% dataset
ESM_test <- data.frame(df_lags[82:nrow(df_lags), ]) # Testing Data 25% dataset
## For convenience I'll define separately the training and test inputs
X <- ESM_train[ , c("Temp", "Temp_lag1")]
Xstar <- ESM_test[ , c("Temp", "Temp_lag1")]
## Get the kernel manually
K <- covSEiso(X, ell = 25, sf = 50)
## We also need covariance between the test cases
Kstar <- covSEiso(Xstar, X, ell = 25, sf = 50)
Ktest <- covSEiso(Xstar, ell = 25, sf = 50)
## Now the 95% credible region for the posterior is
predictive_mean <- Kstar %*% solve(K + diag(nrow(K))) %*% ESM_train$Ozone
predictive_var <- Ktest - (Kstar %*% solve(K + diag(nrow(K))) %*% t(Kstar))
## Then for the prediction interval we only need to add the observation noise
z <- sqrt(diag(predictive_var)) + 25
interval_high <- predictive_mean + 2 * z
interval_low <- predictive_mean - 2 * z
Then we can check out the prediction intervals
This all is pretty easy to do via my gplmr package (available on GitHub) which can call GPML from R if you have Octave installed:
data(airquality)
library(tidyverse)
library(magrittr)
library(gpmlr)
df_lags <- airquality %>%
mutate(Temp_lag1 = lag(n = 1L, Temp)) %>%
na.omit()
ESM_train <- data.frame(df_lags[1:81, ]) # Training Data 75% dataset
ESM_test <- data.frame(df_lags[82:nrow(df_lags), ]) # Testing Data 25% dataset
X <- as.matrix(ESM_train[ , c("Temp", "Temp_lag1")])
y <- ESM_train$Ozone
Xs <- as.matrix(ESM_test[ , c("Temp", "Temp_lag1")])
ys <- ESM_test$Ozone
hyp0 <- list(mean = numeric(), cov = c(0, 0), lik = 0)
hyp <- set_hyperparameters(hyp0, "infExact", "meanZero", "covSEiso","likGauss",
X, y)
gp_res <- gp(hyp, "infExact", "meanZero", "covSEiso", "likGauss", X, y, Xs, ys)
predictive_mean <- gp_res$YMU
interval_high <- gp_res$YMU + 2 * sqrt(gp_res$YS2)
interval_low <- gp_res$YMU - 2 * sqrt(gp_res$YS2)
Then just plot the predictions, as above:
plot(NULL, xlab = "", ylab = "", xaxt = "n", yaxt = "n",
xlim = range(ESM_test$Temp), ylim = range(c(interval_high, interval_low)))
axis(1, tick = FALSE, line = -0.75)
axis(2, tick = FALSE, line = -0.75)
mtext("Temp", 1, 1.5)
mtext("Ozone", 2, 1.5)
idx <- order(ESM_test$Temp)
polygon(c(ESM_test$Temp[idx], rev(ESM_test$Temp[idx])),
c(interval_high[idx], rev(interval_low[idx])),
border = NA, col = "#80808080")
lines(ESM_test$Temp[idx], predictive_mean[idx])
points(ESM_test$Temp, ESM_test$Ozone, pch = 19)
plot(NULL, xlab = "", ylab = "", xaxt = "n", yaxt = "n",
xlim = range(ESM_test$Temp_lag1), ylim = range(c(interval_high, interval_low)))
axis(1, tick = FALSE, line = -0.75)
axis(2, tick = FALSE, line = -0.75)
mtext("Temp_lag1", 1, 1.5)
mtext("Ozone", 2, 1.5)
idx <- order(ESM_test$Temp_lag1)
polygon(c(ESM_test$Temp_lag1[idx], rev(ESM_test$Temp_lag1[idx])),
c(interval_high[idx], rev(interval_low[idx])),
border = NA, col = "#80808080")
lines(ESM_test$Temp_lag1[idx], predictive_mean[idx])
points(ESM_test$Temp_lag1, ESM_test$Ozone, pch = 19)
I want to calculate all possible predictions with different probabilities of my data with multiple models. The result is a list.
df<-iris
df$y<-sample(0:1,nrow(df),replace=TRUE)
set.seed(101)
#Now Selecting 80% of data as sample from total 'n' rows of the data
sample <- sample.int(n = nrow(df), size = floor(.8*nrow(df)), replace = F)
train <- df[sample, ]
test <- df[-sample, ]
Then i create a logistic model:
full <- glm(y~., data = train, family = "binomial")
min <- glm( y~ 1, data = train, family = "binomial")
backward <- step(full,direction = "backward",trace=0)
forward <- step(min,scope=list(lower=min, upper=full),direction = "forward",trace=0)
model2<- glm(y~Sepal.Length+Sepal.Width , data = train, family = "binomial")
models<-list(backward,forward,model2)
prediction<- lapply(models, function(x){predict(x,newdata=test,type="response")})
First of all i have table with predictions. Then i created a vector with all posible probabilities.
p <- seq(from = 0.1, to = 0.9, by = 0.5)
Problem is i want to apply differents breaks point. I tried with map2 function of purrr package but it doesn't work.
pred = map2(prediction,p, function(x,pi){ifelse(x > pi, 1, 0)})
The problem is:
Error: .x (3) and .y (2) are different lengths
Anyone can help?
I think is best to change apply to sapply, then i will have a data.frame.
prediction<- sapply(models, function(x){predict(x, newdata=test,type="response")},
simplify = T,USE.NAMES = TRUE)
Then i could use pmap function?
thanks
EDIT: I updated with all code.
See if this makes sense:
df<-iris
df$y<-sample(0:1,nrow(df),replace=TRUE)
set.seed(101)
#Now Selecting 80% of data as sample from total 'n' rows of the data
sample <- sample.int(n = nrow(df), size = floor(.8*nrow(df)), replace = F)
train <- df[sample, ]
test <- df[-sample, ]
full <- glm(y~., data = train, family = "binomial")
min <- glm( y~ 1, data = train, family = "binomial")
backward <- step(full,direction = "backward",trace=0)
forward <- step(min,scope=list(lower=min, upper=full),direction = "forward",trace=0)
model2<- glm(y~Sepal.Length+Sepal.Width , data = train, family = "binomial")
models<-list(backward,forward,model2)
prediction<- lapply(models, function(x){predict(x,newdata=test,type="response")})
p <- seq(from = 0.1, to = 0.9, by = 0.5)
combn = cross2(prediction, p)
pred <- map(combn,
function(combination) {
x <- combination[[1]]
pi <- combination[[2]]
ifelse(x > pi, 1, 0)
}
)
I'm working on a model for variable y, in which I intend to use time as an explanatory variable. I've chosen a Gompertz and a logistic curve as candidates, but when I try to estimate the coefficients (using both nls and nls2), I end up getting different errors (singularity or step factor reduced below 'minFactor'). I would really appreciate any help. Here is my code and a deput version of the info object.
I chose the initial values according to the criteria in http://www.metla.fi/silvafennica/full/sf33/sf334327.pdf
library(nls2)
> dput(info)
structure(list(y = c(0.308, 0.279, 0.156, 0.214, 0.224, 0.222,
0.19, 0.139, 0.111, 0.17, 0.155, 0.198, 0.811, 0.688, 0.543,
0.536, 0.587, 0.765, 0.667, 0.811, 0.587, 0.617, 0.586, 0.633,
2.231, 2.202, 1.396, 1.442, 1.704, 2.59, 2.304, 3.026, 2.7, 3.275,
3.349, 3.936, 9.212, 8.773, 6.431, 6.983, 7.169, 9.756, 10.951,
13.938, 14.378, 18.406, 24.079, 28.462, 51.461, 46.555, 39.116,
43.982, 41.722), t = 1:53), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -53L))
summary(gomp_nls <- nls2(y ~ alpha*exp(-beta*exp(-gamma*t)),
data = info,
start = list(alpha = 40, beta = 4.9, gamma = 0.02),
algorithm = "default")
)
summary(logist_nls <- nls2(y ~ alpha/(1+beta*exp(-gamma*t)),
data = info,
start = list(alpha = 40, beta = 128, gamma = 0.02),
algorithm = "default"))
)
I'd appreciate any help
The "default" algorithm for nls2 is to use nls. You want to specify "brute-force" or one of the other algorithms for finding an initial value. The starting value should be a data frame of two rows such that it will fill in the hypercube so defined with potential starting values.
It will then evaluate the residual sum of squares at each of those starting values and return the starting values at which the formula gives the least sum of squares.
If you find that the result returned by nls2 is at the boundary of the region you defined then enlarge the region and try again. (You might not need this step if the starting value returned are good enough anyways.)
Finally run nls with the starting values you found.
library(nls2)
## 1
fo1 <- y ~ alpha*exp(-beta*exp(-gamma*t))
st1 <- data.frame(alpha = c(10, 100), beta = c(1, 100), gamma = c(0.01, 0.20))
fm1.0 <- nls2(fo1, data = info, start = st1, algorithm = "brute-force")
fm1 <- nls(fo1, data = info, start = coef(fm1.0))
## 2
fo2 <- y ~ alpha/(1+beta*exp(-gamma*t))
st2 <- data.frame(alpha = c(10, 1000), beta = c(1, 10000), gamma = c(0.01, 0.20))
fm2.0 <- nls2(fo2, data = info, start = st2, algorithm = "brute-force")
fm2 <- nls(fo2, data = info, start = coef(fm2.0))
# plot both fits
plot(y ~ t, info)
lines(fitted(fm1) ~ t, info, col = "blue")
lines(fitted(fm2) ~ t, info, col = "red")
Note
Note that for the data shown these two 2-parameter exponential models fit reasonably well so if you are only interested in the range where it rises exponentially then these could be alternatives to consider. (The first one below is better because the coefficients are more similar to each other. The second one may have scaling problems.)
fm3 <- nls(y ~ a * exp(b/t), info, start = c(a = 1, b = 1))
fm4 <- nls(y ~ a * t^b, info, start = c(a = .001, b = 6))