what is RMSE in the results object in R package caret? - r

Why result 1 is different from result 2 ? Intuitively I would think that truc$results$RMSE is the root mean square error of forecasts but I guess it is not.
library(caret)
x <- data.frame(x = rnorm(15))
y <- x$x + rnorm(15)
myTimeControl <- trainControl(method = "timeslice",initialWindow = 10, horizon = 1, fixedWindow = FALSE, savePredictions=TRUE)
truc <- train(x,y,method = "lm",metric= "RMSE",trControl =myTimeControl,preProc = c("center", "scale"))
result1 <- sqrt(mean((truc$pred$pred-truc$pred$obs)^2))
result2 <- truc$results$RMSE
result1
result2

If you invert mean and sqrt, you get the same result... Something's weird with caret's formula... Actually, you made an interesting observation...
result1 <- mean(sqrt((truc$pred$pred-truc$pred$obs)^2))

Related

How to implement knn based on weights

I would like to implement the weighted knn algorithm but I don't know how to do it. Everything and that I can use kknn, I suppose that it can also be done with knn. In the function train(caret) there is an option "weights" but I can't find the solution, any suggestion?
I use the following code in R :
library(caret)
library(corrplot)
glass <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data",
col.names=c("","RI","Na","Mg","Al","Si","K","Ca","Ba","Fe","Type"))
str(glass)
head(glass)
glass_1<- glass[,-7]
glass_2<- glass_1[,-7]
head(glass_2)
glass<- glass_2
standard.features <- scale(glass[,2:8])
data <- cbind(standard.features,glass[9])
anyNA(data)
head(data)
corrplot(cor(data))
data$Type<-factor(data$Type)
inTraining <- createDataPartition(data$Type, p = .7, list = FALSE, times =1 )
training <- data[ inTraining,]
testing <- data[-inTraining,]
prop.table(table(training$Type))
prop.table(table(testing$Type))
dim(training); dim(testing);
summary(data)
fitControl <- trainControl(## 5-fold CV
method = "cv",
number = 5,
## repeated ten times
#repeats = 5)
)
#k_value <- expand.grid(kmax = 3, distance = 2, kernel = "optimal")
k_value <- expand.grid(k = 3)
set.seed(825)
knn_Fit <- train(Type ~ ., data = training, weights = ????,
method = "knn", tuneGrid = k_value,
trControl = fitControl)
## This last option is actually one
## for gbm() that passes through
#verbose = FALSE)
knn_Fit
knn_Fit$finalModel

R Caret: seeds and createMultiFolds

I want to make my code reproducible and use the seeds argument as well as createMultiFolds within a loop.
I set up this code:
cv_model <- function(dat, targets){
library(randomForest)
library(caret)
library(MLmetrics)
library(Metrics)
results <<- list(weight = NA, vari = NA)
# set up error measures
sumfct <- function(data, lev = NULL, model = NULL){
mape <- MLmetrics::MAPE(y_pred = data$pred, y_true = data$obs)
RMSE <- sqrt(mean((data$pred - data$obs)^2, na.omit = TRUE))
c(MAPE = mape, RMSE = RMSE)
}
for (i in 1:length(targets)) {
set.seed(43)
folds <- caret::createMultiFolds(y = dat$weight,
k = 3,
times = 3)
set.seed(43)
myseeds <- vector(mode = "list", length = 3*3+1)
for (i in 1:9) {
myseeds[[i]] <- sample.int(n=1000, 1)
}
# for the final model
myseeds[[10]] <- sample.int(n=1000, 1)
# specifiy trainControl
control <- caret::trainControl(method="repeatedcv", number=3, repeats=3, search="grid",
savePred =T,
summaryFunction = sumfct, index = folds, seeds = myseeds)
# fixed mtry
params <- data.frame(mtry = 2)
# choose predictor columns by excluding target columns
preds <- dat[, -c(which(names(dat) == "Time"),
which(names(dat) == "Chick"),
which(names(dat) == "Diet"))]
# set target variables
response <- dat[, which(names(dat) == targets[i])]
set.seed(42)
model <- caret::train(x = preds,
y = response,
data = dat,
method="rf",
ntree = 25,
metric= "RMSE",
tuneGrid=params,
trControl=control)
results[[i]] <<- model
}
}
targets <- c("weight", "vari")
dat <- as.data.frame(ChickWeight)
# generate random numbers
set.seed(1)
dat$vari <- c(runif(nrow(dat)))
## use 2 of the cores
library(doParallel)
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
# use function
cv_model(dat = dat, targets = targets)
# end parallel computing
stopCluster(cl)
# unregister doParallel by registering DoSeq (do sequential)
registerDoSEQ()
After running the code, the error message Error: Please make sure 'y' is a factor or numeric value.. occurs.
If you delete the following lines
set.seed(43)
myseeds <- vector(mode = "list", length = 3*3+1)
for (i in 1:9) {
myseeds[[i]] <- sample.int(n=1000, 1)
}
# for the final model
myseeds[[10]] <- sample.int(n=1000, 1)
and within trainControl , seeds = myseeds, then the code runs without an error message.
How can I fix the error and at the same time provide seeds and createMultiFolds within the code?

Caret: Unable to adjust performance metric when using rfe function

I'm trying to perform recursive feature elimination using the rfe function but I'm having a bit of trouble trying to change the performance measure to output the ROC:
newFunc <- caretFuncs
newFunc$summary <- twoClassSummary
ctrl <- rfeControl(functions = newFunc,
method = 'cv',
returnResamp = TRUE,
number = 2,
verbose = TRUE)
profiler <- rfe(predictors, response,
sizes = c(1),
method = 'nnet',
tuneGrid = expand.grid(size=c(4), decay=c(0.1)),
maxit = 20,
metric = 'ROC',
rfeControl = ctrl)
Trying to run this code is giving me the following error:
Error in { : task 1 failed - "undefined columns selected"
If I remove the custom newFunc, set the functions parameter inside rfeControl to use caretFuncs and remove the metric parameter from rfe, the model works fine. This makes me think there's something wrong with the summary.
caretFuncs$summary:
function (data, lev = NULL, model = NULL)
{
if (is.character(data$obs))
data$obs <- factor(data$obs, levels = lev)
postResample(data[, "pred"], data[, "obs"])
}
twoClassSummary
function (data, lev = NULL, model = NULL)
{
lvls <- levels(data$obs)
if (length(lvls) > 2)
stop(paste("Your outcome has", length(lvls), "levels. The twoClassSummary() function isn't appropriate."))
requireNamespaceQuietStop("ModelMetrics")
if (!all(levels(data[, "pred"]) == lvls))
stop("levels of observed and predicted data do not match")
data$y = as.numeric(data$obs == lvls[2])
rocAUC <- ModelMetrics::auc(ifelse(data$obs == lev[2], 0,
1), data[, lvls[1]])
out <- c(rocAUC, sensitivity(data[, "pred"], data[, "obs"],
lev[1]), specificity(data[, "pred"], data[, "obs"], lev[2]))
names(out) <- c("ROC", "Sens", "Spec")
out
}
The output to postResample and twoClassSummary are identical in their structures so I'm a little lost as to what this problem is. Am I doing something inherently wrong here or is this a bug that I need to flag to the devs?
I'm actually interested in obtaining the logLoss so I could write my own function:
logLoss = function(data, lev = NULL, model = NULL) {
-1*mean(log(data[, 'pred'][model.matrix(~ as.numeric(data[, 'obs'], levels = lev) + 0) - data[, 'pred'] > 0]))
}
But, I'm a little unsure how to convert the factor levels into the correct [0,1] from my [yes, no] factor?
First of all here is a viable logloss function for use with caret:
LogLoss <- function (data, lev = NULL, model = NULL)
{
obs <- data[, "obs"]
cls <- levels(obs) #find class names
probs <- data[, cls[2]] #use second class name
probs <- pmax(pmin(as.numeric(probs), 1 - 1e-15), 1e-15) #bound probability
logPreds <- log(probs)
log1Preds <- log(1 - probs)
real <- (as.numeric(data$obs) - 1)
out <- c(mean(real * logPreds + (1 - real) * log1Preds)) * -1
names(out) <- c("LogLoss")
out
}
to answer the question how to convert the factor levels into the correct [0,1] from my [yes, no] factor:
real <- (as.numeric(data$obs) - 1)
to get rfe to work you can use rfFuncs instead of caretFuncs. Example:
rfFuncs$summary <- twoClassSummary
ctrl <- rfeControl(functions = rfFuncs,
method = 'cv',
returnResamp = TRUE,
number = 2,
verbose = TRUE)
profiler <- rfe(Sonar[,1:60], Sonar$Class,
sizes = c(1, 5, 20, 40, 60),
method = 'nnet',
tuneGrid = expand.grid(size=c(4), decay=c(0.1)),
maxit = 20,
metric = 'ROC',
rfeControl = ctrl)
profiler$results
Variables ROC Sens Spec ROCSD SensSD SpecSD
1 1 0.6460027 0.6387987 0.5155187 0.08735968 0.132008571 0.007516016
2 5 0.7563971 0.6847403 0.7013180 0.03751483 0.008724045 0.039383924
3 20 0.8633511 0.8462662 0.7017432 0.08460677 0.091143309 0.097708207
4 40 0.8841540 0.8642857 0.7429847 0.08096697 0.090913729 0.098309489
5 60 0.8945351 0.9004870 0.7431973 0.05707867 0.064971175 0.127471631
or with the LogLoss function I provided:
rfFuncs$summary <- LogLoss
ctrl <- rfeControl(functions = rfFuncs,
method = 'cv',
returnResamp = TRUE,
number = 2,
verbose = TRUE)
profiler <- rfe(Sonar[,1:60], Sonar$Class,
sizes = c(1, 5, 20, 40, 60),
method = 'nnet',
tuneGrid = expand.grid(size=c(4), decay=c(0.1)),
maxit = 20,
metric = 'LogLoss',
rfeControl = ctrl,
maximize = FALSE) #this was edited after the answer of Дмитрий Пасько)
profiler$results
Variables LogLoss LogLossSD
1 1 1.8237372 1.030120134
2 5 0.5548774 0.128704686
3 20 0.4226522 0.021547998
4 40 0.4167819 0.013587892
5 60 0.4328718 0.008000892
EDIT: Дмитрий Пасько raises a valid concern in his answer - LogLoss should be minimized. One way to achieve this is to provide the logical argument maximize telling caret should the metric be minimized or maximized.
but u should minimize logLoss, thus use this code (example with logistic regression https://www.kaggle.com/demetrypascal/rfe-logreg-with-pca-and-feature-importance):
LogLoss <- function (data, lev = NULL, model = NULL)
{
obs <- data[, "obs"]
cls <- levels(obs) #find class names
probs <- data[, cls[2]] #use second class name
probs <- pmax(pmin(as.numeric(probs), 1 - 1e-15), 1e-15) #bound probability
logPreds <- log(probs)
log1Preds <- log(1 - probs)
real <- (as.numeric(data$obs) - 1)
out <- c(mean(real * logPreds + (1 - real) * log1Preds)) * -1
names(out) <- c("LogLossNegative")
-out
}
lrFuncs$summary <- LogLoss
rfec = rfeControl(method = "cv",
number = 2,
functions = lrFuncs)

Error with prediction - ROCR package (using probabilities)

I have used "rfe" function with svm to create a model with reduced features. Then I use "predict" on test data which outputs class labels (binary), 0 class probabilities, 1 class probabilities. I then tried using prediction function, in ROCR package, on predicted probabilities and true class labels but get the following error and am not sure why as the lengths of the 2 arrays are equal:
> pred_svm <- prediction(pred_svm_2class[,2], as.numeric(as.character(y)))
Error in prediction(pred_svm_2class[, 2], as.numeric(as.character(y))) :
Number of predictions in each run must be equal to the number of labels for each run.
I have the code below and the input is here click me.It is a small dataset with binary classification, so code runs fast.
library("caret")
library("ROCR")
sensor6data_2class <- read.csv("/home/sensei/clustering/svm_2labels.csv")
sensor6data_2class <- within(sensor6data_2class, Class <- as.factor(Class))
set.seed("1298356")
inTrain_svm_2class <- createDataPartition(y = sensor6data_2class$Class, p = .75, list = FALSE)
training_svm_2class <- sensor6data_2class[inTrain_svm_2class,]
testing_svm_2class <- sensor6data_2class[-inTrain_svm_2class,]
trainX <- training_svm_2class[,1:20]
y <- training_svm_2class[,21]
ctrl_svm_2class <- rfeControl(functions = rfFuncs , method = "repeatedcv", number = 5, repeats = 2, allowParallel = TRUE)
model_train_svm_2class <- rfe(x = trainX, y = y, data = training_svm_2class, sizes = c(1:20), metric = "Accuracy", rfeControl = ctrl_svm_2class, method="svmRadial")
pred_svm_2class = predict(model_train_svm_2class, newdata=testing_svm_2class)
pred_svm <- prediction(pred_svm_2class[,2], y)
Thanks and appreciate your help.
This is because in the line
pred_svm <- prediction(pred_svm_2class[,2], y)
pred_svm_2class[,2] is the predictions on test data and y is the labels for training data. Just generate the labels for test in a separate variable like this
y_test <- testing_svm_2class[,21]
And now if you do
pred_svm <- prediction(pred_svm_2class[,2], y_test)
There will be no error. Full code below -
# install.packages("caret")
# install.packages("ROCR")
# install.packages("e1071")
# install.packages("randomForest")
library("caret")
library("ROCR")
sensor6data_2class <- read.csv("svm_2labels.csv")
sensor6data_2class <- within(sensor6data_2class, Class <- as.factor(Class))
set.seed("1298356")
inTrain_svm_2class <- createDataPartition(y = sensor6data_2class$Class, p = .75, list = FALSE)
training_svm_2class <- sensor6data_2class[inTrain_svm_2class,]
testing_svm_2class <- sensor6data_2class[-inTrain_svm_2class,]
trainX <- training_svm_2class[,1:20]
y <- training_svm_2class[,21]
y_test <- testing_svm_2class[,21]
ctrl_svm_2class <- rfeControl(functions = rfFuncs , method = "repeatedcv", number = 5, repeats = 2, allowParallel = TRUE)
model_train_svm_2class <- rfe(x = trainX, y = y, data = training_svm_2class, sizes = c(1:20), metric = "Accuracy", rfeControl = ctrl_svm_2class, method="svmRadial")
pred_svm_2class = predict(model_train_svm_2class, newdata=testing_svm_2class)
pred_svm <- prediction(pred_svm_2class[,2], y_test)

Error in confusionMatrix.rfe: resampled confusion matrices are not availible

I ran into an error "resampled confusion matrices are not available" when trying to extract confusion matrix from a rfe object. is the confusionMaitrx.rfe function of the caret package not working or am I missing something here?
Below is an example using simulated data from
http://topepo.github.io/caret/rfe.html
Documentation on function confusionMatrix.rfe is here
http://www.inside-r.org/packages/cran/caret/docs/confusionMatrix.train
library(caret)
library(mlbench)
library(Hmisc)
library(randomForest)
n <- 100
p <- 40
sigma <- 1
set.seed(1)
sim <- mlbench.friedman1(n, sd = sigma)
colnames(sim$x) <- c(paste("real", 1:5, sep = ""),
paste("bogus", 1:5, sep = ""))
bogus <- matrix(rnorm(n * p), nrow = n)
colnames(bogus) <- paste("bogus", 5+(1:ncol(bogus)), sep = "")
x <- cbind(sim$x, bogus)
y <- sim$y
normalization <- preProcess(x)
x <- predict(normalization, x)
x <- as.data.frame(x)
subsets <- c(1:5, 10, 15, 20, 25)
set.seed(10)
ctrl <- rfeControl(functions = lmFuncs,
method = "repeatedcv",
repeats = 5,
verbose = FALSE)
lmProfile <- rfe(x, y,
sizes = subsets,
rfeControl = ctrl)
lmProfile
confusionMatrix(lmProfile)
**Error in confusionMatrix.rfe(lmProfile) :
resampled confusion matrices are not availible**
Thanks!
mlbench.friedman1 is a regression problem, not a classification problem. If you check the data, you can see that your Y variable is continuous. confusionMatrix has no use in this case.

Resources