I ran into an error "resampled confusion matrices are not available" when trying to extract confusion matrix from a rfe object. is the confusionMaitrx.rfe function of the caret package not working or am I missing something here?
Below is an example using simulated data from
http://topepo.github.io/caret/rfe.html
Documentation on function confusionMatrix.rfe is here
http://www.inside-r.org/packages/cran/caret/docs/confusionMatrix.train
library(caret)
library(mlbench)
library(Hmisc)
library(randomForest)
n <- 100
p <- 40
sigma <- 1
set.seed(1)
sim <- mlbench.friedman1(n, sd = sigma)
colnames(sim$x) <- c(paste("real", 1:5, sep = ""),
paste("bogus", 1:5, sep = ""))
bogus <- matrix(rnorm(n * p), nrow = n)
colnames(bogus) <- paste("bogus", 5+(1:ncol(bogus)), sep = "")
x <- cbind(sim$x, bogus)
y <- sim$y
normalization <- preProcess(x)
x <- predict(normalization, x)
x <- as.data.frame(x)
subsets <- c(1:5, 10, 15, 20, 25)
set.seed(10)
ctrl <- rfeControl(functions = lmFuncs,
method = "repeatedcv",
repeats = 5,
verbose = FALSE)
lmProfile <- rfe(x, y,
sizes = subsets,
rfeControl = ctrl)
lmProfile
confusionMatrix(lmProfile)
**Error in confusionMatrix.rfe(lmProfile) :
resampled confusion matrices are not availible**
Thanks!
mlbench.friedman1 is a regression problem, not a classification problem. If you check the data, you can see that your Y variable is continuous. confusionMatrix has no use in this case.
Related
I have written a cross validation/grid search style code in R that tries to find an optimal threshold value for a given value of mtry (using the random forest algorithm). I have posted my code below using the Sonar data from the library mlbench However, there seems to be some problems with this code.
library(caret)
library(mlbench)
library(randomForest)
res <- matrix(0, nrow = 10, ncol = 6)
colnames(res) <- c("mtry","Threshhold","Accuracy", "PositivePred", "NegativePred", "F-value")
out <- matrix(0, nrow = 17, ncol = 6)
colnames(out) <- c("mtry","Threshhold","Avg.Accuracy", "Avg.PosPred", "Avg.NegPred", "Avg.F_Value")
rep <- matrix(0, nrow = 10, ncol = 6)
colnames(out) <- c("mtry","Threshhold","Avg_Accuracy", "Avg_PosPred", "Avg_NegPred", "Avg_F_Value")
data(Sonar)
N=Sonar
### creating 10 folds
folds <- cut(seq(1,nrow(N)),breaks=10,labels=FALSE)
for (mtry in 5:14) {
K=mtry-4
for(thresh in seq(1,9,0.5)) {
J = 2*thresh-1
dataset<-N[sample(nrow(N)),] #### mix up the dataset N
for(I in 1:10){
#Segement your data by fold using the which() function
testIndexes <- which(folds==I,arr.ind=TRUE)
N_test <- dataset[testIndexes, ] ### select each fold for test
N_train <- dataset[-testIndexes, ] ### select rest for training
rf = randomForest(Class~., data = N_train, mtry=mtry, ntree=500)
pred = predict(rf, N_test, type="prob")
label = as.factor(ifelse(pred[,2]>=thresh,"M","R"))
confusion = confusionMatrix(N_test$Class, label)
res[I,1]=mtry
res[I,2]=thresh
res[I,3]=confusion$overall[1]
res[I,4]=confusion$byClass[3]
res[I,5]=confusion$byClass[4]
res[I,6]=confusion$byClass[7]
}
print(res)
out[J,1] = mtry
out[J,2] = thresh
out[J,3] = mean(res[,2])
out[J,4] = mean(res[,3])
out[J,5] = mean(res[,4])
out[J,6] = mean(res[,5])
}
print(out)
rep[K,1] = mtry
rep[K,2] = thresh
rep[K,3] = mean(out[,2])
rep[K,4] = mean(out[,3])
rep[K,5] = mean(out[,4])
rep[K,6] = mean(out[,5])
}
print(rep)
Earlier, I wrote a similar code with the "iris" dataset, and I did not seem to have any problems:
library(caret)
library(randomForest)
data(iris)
N <- iris
N$Species = ifelse(N$Species == "setosa", "a", "b")
N$Species = as.factor(N$Species)
res <- matrix(0, nrow = 10, ncol = 5)
colnames(res) <- c("Threshhold","Accuracy", "PositivePred", "NegativePred", "F-value")
out <- matrix(0, nrow = 9, ncol = 5)
colnames(out) <- c("Threshhold","Avg.Accuracy", "Avg.PosPred", "Avg.NegPred", "Avg.F_Value")
### creating 10 folds
folds <- cut(seq(1,nrow(N)),breaks=10,labels=FALSE)
for(J in 1:9) {
thresh = J/10
dataset<-N[sample(nrow(N)),] #### mix up the dataset N
for(I in 1:10){
#Segement your data by fold using the which() function
testIndexes <- which(folds==I,arr.ind=TRUE)
N_test <- dataset[testIndexes, ] ### select each fold for test
N_train <- dataset[-testIndexes, ] ### select rest for training
rf = randomForest(Species~., data = N_train, mtry=3, ntree=10)
pred = predict(rf, N_test, type="prob")
label = as.factor(ifelse(pred[,1]>=thresh,"a","b"))
confusion = confusionMatrix(N_test$Species, label)
res[I,1]=thresh
res[I,2]=confusion$overall[1]
res[I,3]=confusion$byClass[3]
res[I,4]=confusion$byClass[4]
res[I,5]=confusion$byClass[7]
}
print(res)
out[J,1] = thresh
out[J,2] = mean(res[,2])
out[J,3] = mean(res[,3])
out[J,4] = mean(res[,4])
out[J,5] = mean(res[,5])
}
print(out)
Could someone please assist me in debugging the first code?
Thanks
You need to close parenthesis ) in your for loop.
Replace this
for(thresh in seq(1,9,0.5) {
with
for(thresh in seq(1,9,0.5)) {
Update:
Also, it appears that your thresh is always above 1 giving a single value R in the label, as it is never above thresh.
label = as.factor(ifelse(pred[,2]>=thresh,"M","R"))
and that creates a problem in the next statement
confusion = confusionMatrix(N_test$Class, label)
I tested with 0.5, and I get no error.
label = as.factor(ifelse(pred[,2]>=0.5,"M","R"))
If you can define a better thresh - to stay between 0 and 1, you should be fine.
I have the following code to choose a value of lambda based on the lowest resulting mean squared error (MSE) after iterated cross validation.
library(glmnet)
set.seed(3)
IV1 <- data.frame(IV1 = rnorm(100))
IV2 <- data.frame(IV2 = rnorm(100))
IV3 <- data.frame(IV3 = rnorm(100))
IV4 <- data.frame(IV4 = rnorm(100))
IV5 <- data.frame(IV5 = rnorm(100))
DV <- data.frame(DV = rnorm(100))
data <- data.frame(IV1,IV2,IV3,IV4,IV5,DV)
x <- model.matrix(DV~.-IV5 , data)[ , -1]
y <- data$DV
AB <- glmnet(x=x, y=y, alpha=1)
plot(AB,xvar="lambda")
lambdas <- NULL
for (i in 1:100){
fit <- cv.glmnet(x, y)
errors <- data.frame(fit$lambda, fit$cvm)
lambdas <- rbind(lambdas, errors)
}
lambdas <- aggregate(lambdas[ , 2], list(lambdas$fit.lambda), mean)
bestindex <- which(lambdas[2]== min(lambdas[2]))
bestlambda <- lambdas[bestindex,1]
How would I modify this to select lambda.1se (i.e., the largest λ at which the MSE is within one standard error of the minimal MSE)?
Edit:
How about this
lambdas <- NULL #initialize
n.fits <- 100
for (i in 1:n.fits) {
{
fit <- cv.glmnet(x,y)
errors = data.frame(fit$lambda,fit$cvm)
lambdas <- rbind(lambdas,errors)
r2[i]<-max(1-fit$cvm/var(y))
}
# take mean cvm for each lambda
lambdas <- aggregate(lambdas[, 2], list(lambdas$fit.lambda), mean)
lambdas<-as.data.frame(lambdas)
# find subset with mse within 1 se of mean
onese<-std.error(lambdas[2])
min<-min(lambdas[2])
low<-min-onese
high<-min+onese
lambdas<-subset(lambdas, x>low)
lambdas<-subset(lambdas, x<high)
#choose highest lambda among those
bestindex = which(lambdas[1]==max(lambdas[1]))
bestlambda = lambdas[bestindex,1]
If you decide to use cv.glmnet, the following might be what you are looking for. (p.s. I also cleaned up your simulation code a bit; note that I also didn't make use of the AB object from glmnet which is obviously not the same as cv.glmnet)
library(glmnet)
## Simulate data:
set.seed(3)
x <- data.frame(
IV1 = rnorm(100),
IV2 = rnorm(100),
IV3 = rnorm(100),
IV4 = rnorm(100),
IV5 = rnorm(100)
)
x <- as.matrix(x)
y <- rnorm(100) #target or response
## Iteratively fit models
lambdas <- NULL #initialize
n.fits <- 100
for (i in 1:n.fits) {
fit <- cv.glmnet(x, y, family="gaussian")
df <- data.frame(fit$lambda.1se, mean(fit$cvm) ) #can use median for CVM also
lambdas <- rbind(lambdas, df)
}
## Select best lambda:
bestindex <- which.min(lambdas[ , 2]) #the way you had it was way too complicated
bestlambda <- lambdas[bestindex, 1]
bestlambda
Why result 1 is different from result 2 ? Intuitively I would think that truc$results$RMSE is the root mean square error of forecasts but I guess it is not.
library(caret)
x <- data.frame(x = rnorm(15))
y <- x$x + rnorm(15)
myTimeControl <- trainControl(method = "timeslice",initialWindow = 10, horizon = 1, fixedWindow = FALSE, savePredictions=TRUE)
truc <- train(x,y,method = "lm",metric= "RMSE",trControl =myTimeControl,preProc = c("center", "scale"))
result1 <- sqrt(mean((truc$pred$pred-truc$pred$obs)^2))
result2 <- truc$results$RMSE
result1
result2
If you invert mean and sqrt, you get the same result... Something's weird with caret's formula... Actually, you made an interesting observation...
result1 <- mean(sqrt((truc$pred$pred-truc$pred$obs)^2))
I have used "rfe" function with svm to create a model with reduced features. Then I use "predict" on test data which outputs class labels (binary), 0 class probabilities, 1 class probabilities. I then tried using prediction function, in ROCR package, on predicted probabilities and true class labels but get the following error and am not sure why as the lengths of the 2 arrays are equal:
> pred_svm <- prediction(pred_svm_2class[,2], as.numeric(as.character(y)))
Error in prediction(pred_svm_2class[, 2], as.numeric(as.character(y))) :
Number of predictions in each run must be equal to the number of labels for each run.
I have the code below and the input is here click me.It is a small dataset with binary classification, so code runs fast.
library("caret")
library("ROCR")
sensor6data_2class <- read.csv("/home/sensei/clustering/svm_2labels.csv")
sensor6data_2class <- within(sensor6data_2class, Class <- as.factor(Class))
set.seed("1298356")
inTrain_svm_2class <- createDataPartition(y = sensor6data_2class$Class, p = .75, list = FALSE)
training_svm_2class <- sensor6data_2class[inTrain_svm_2class,]
testing_svm_2class <- sensor6data_2class[-inTrain_svm_2class,]
trainX <- training_svm_2class[,1:20]
y <- training_svm_2class[,21]
ctrl_svm_2class <- rfeControl(functions = rfFuncs , method = "repeatedcv", number = 5, repeats = 2, allowParallel = TRUE)
model_train_svm_2class <- rfe(x = trainX, y = y, data = training_svm_2class, sizes = c(1:20), metric = "Accuracy", rfeControl = ctrl_svm_2class, method="svmRadial")
pred_svm_2class = predict(model_train_svm_2class, newdata=testing_svm_2class)
pred_svm <- prediction(pred_svm_2class[,2], y)
Thanks and appreciate your help.
This is because in the line
pred_svm <- prediction(pred_svm_2class[,2], y)
pred_svm_2class[,2] is the predictions on test data and y is the labels for training data. Just generate the labels for test in a separate variable like this
y_test <- testing_svm_2class[,21]
And now if you do
pred_svm <- prediction(pred_svm_2class[,2], y_test)
There will be no error. Full code below -
# install.packages("caret")
# install.packages("ROCR")
# install.packages("e1071")
# install.packages("randomForest")
library("caret")
library("ROCR")
sensor6data_2class <- read.csv("svm_2labels.csv")
sensor6data_2class <- within(sensor6data_2class, Class <- as.factor(Class))
set.seed("1298356")
inTrain_svm_2class <- createDataPartition(y = sensor6data_2class$Class, p = .75, list = FALSE)
training_svm_2class <- sensor6data_2class[inTrain_svm_2class,]
testing_svm_2class <- sensor6data_2class[-inTrain_svm_2class,]
trainX <- training_svm_2class[,1:20]
y <- training_svm_2class[,21]
y_test <- testing_svm_2class[,21]
ctrl_svm_2class <- rfeControl(functions = rfFuncs , method = "repeatedcv", number = 5, repeats = 2, allowParallel = TRUE)
model_train_svm_2class <- rfe(x = trainX, y = y, data = training_svm_2class, sizes = c(1:20), metric = "Accuracy", rfeControl = ctrl_svm_2class, method="svmRadial")
pred_svm_2class = predict(model_train_svm_2class, newdata=testing_svm_2class)
pred_svm <- prediction(pred_svm_2class[,2], y_test)
I am building a toy dataset based on the linear problem from page 5 from this paper in order to test feature selection using caret's RFE+SVM with rbf kernel. However, when RFE finishes, I get a warning per bootstrap iteration with the following message: "In data.row.names(row.names, rowsi, i) : some row.names duplicated:" followed by many row numbers, until the output is truncated.
Is this caused because the bootstrap may be selecting samples with replacement and therefore duplicating rows in the bootstrapped data, or is there something else wrong with this? Any advice appreciated (please forgive the lazy implementation of the artificial dataset).
library(caret)
################
# 1. Building dataset
################
set.seed(1)
n.samples <- 500
y <- round(runif(n = n.samples, min=0, max=1))
data <- matrix(nrow=n.samples, ncol=202)
for(i in 1:n.samples){
toss <- runif(n=1, min=0, max=1)
if(toss <= 0.7) {
for(j in 1:3){
data[i,j] <- y[i]*rnorm(n = 1, mean = i, sd = 1)
}
for(j in 4:6){
data[i,j] <- rnorm(n = 1, mean = 0, sd = 1)
}
} else {
for(j in 1:3){
data[i,j] <- rnorm(n=1, mean=0, sd=1)
}
for(j in 4:6){
data[i,j] <- y[i]*rnorm(n=1, mean=i-3, sd = 1)
}
}
for(j in 7:202){
data[i,j] <- rnorm(n = 1, mean = 0, sd = 20)
}
}
colnames(data) <- c(paste("s", 1:6, sep = ""), paste('ns', 7:202, sep=''))
rownames(data) <- paste('sample', 1:n.samples, sep='')
################
# 2. Perform SVM - RFE
################
set.seed(1)
rfe.control.settings <- rfeControl(functions = caretFuncs,
method = 'boot',
number = 30,
verbose = TRUE)
svm.fit <- rfe(x=data,
y=y,
sizes=c(1,2,3,4),
rfeControl = rfe.control.settings,
method = 'svmRadial') #passing options to train / caretFuncs
I was facing the same problem, and what fixed it for me is changing the data class from matrix to data.frame.