Custom Xgboost Hyperparameter tuning - r

I use the following code to tune parameters for my Xgboost implementation adapted from here:
searchGridSubCol <- expand.grid(subsample = c(0.5, 0.75, 1),
colsample_bytree = c(0.6, 0.8, 1))
ntrees <- 100
#Build a xgb.DMatrix object
#DMMatrixTrain <- xgb.DMatrix(data = yourMatrix, label = yourTarget)
rmseErrorsHyperparameters <- apply(searchGridSubCol, 1, function(parameterList){
#Extract Parameters to test
currentSubsampleRate <- parameterList[["subsample"]]
currentColsampleRate <- parameterList[["colsample_bytree"]]
xgboostModelCV <- xgb.cv(data = as.matrix(train), nrounds = ntrees, nfold = 5, showsd = TRUE, label = traintarget,
metrics = "rmse", verbose = TRUE, "eval_metric" = "rmse",
"objective" = "reg:linear", "max.depth" = 15, "eta" = 2/ntrees,
"subsample" = currentSubsampleRate, "colsample_bytree" = currentColsampleRate)
xvalidationScores <- as.data.frame(xgboostModelCV)
#Save rmse of the last iteration
rmse <- tail(xvalidationScores$test.rmse.mean, 1)
return(c(rmse, currentSubsampleRate, currentColsampleRate))
})
However I recieve the following error when storing the XGBoostModelCV:
Error in as.data.frame.default(xgboostModelCV) :
cannot coerce class ""xgb.cv.synchronous"" to a data.frame
Can someone explain to me what is causing this error and how may I fix it?

The above should be fixed by:
xvalidationScores <- xgboostModelCV
#Save rmse of the last iteration
rmse <- tail(xvalidationScores$evaluation_log$test_rmse_mean, 1)

Related

CARET xgbtree warning: `ntree_limit` is deprecated, use `iteration_range` instead

cv <- trainControl(
method = "cv",
number = 5,
classProbs = TRUE,
summaryFunction = prSummary,
seeds = set.seed(123))
turn_grid_xgb <- expand.grid(
eta = c(0.1,0.3,0.5),
max_depth = 5,
min_child_weight = 1,
subsample = 0.8,
colsample_bytree = 0.8,
nrounds = (1:10)*200,
gamma = 0)
set.seed(123)
suppressWarnings({
xgb_1 <- train(label~., data = baked_train,
method = "xgbTree",
tuneGrid = turn_grid_xgb,
trControl = cv,
verbose = FALSE,
metric = "F")
Hi, when I was trying to run the above code, the following warnings are shown in the R console. Does anyone know how to get rid of it? I have tried suppressWarnings() , warning = FALSE on the chunk setting, and it is still there.
thx!!
WARNING: amalgamation/../src/c_api/c_api.cc:718: `ntree_limit` is deprecated, use `iteration_range` instead.
[02:15:13] WARNING: amalgamation/../src/c_api/c_api.cc:718: `ntree_limit` is deprecated, use `iteration_range` instead.
[02:15:13] WARNING: amalgamation/../src/c_api/c_api.cc:718: `ntree_limit` is deprecated, use `iteration_range` instead.
To get rid of xgboost warnings you can set verbosity = 0 which will be passed on by caret::train to the xgboost call:
library(caret)
library(mlbench)
data(Sonar)
cv <- trainControl(
method = "cv",
number = 5,
classProbs = TRUE,
summaryFunction = prSummary,
seeds = set.seed(123))
turn_grid_xgb <- expand.grid(
eta = 0.1,
max_depth = 5,
min_child_weight = 1,
subsample = 0.8,
colsample_bytree = 0.8,
nrounds = c(1,5)*200,
gamma = 0)
set.seed(123)
xgb_1 <- train(Class~., data = Sonar,
method = "xgbTree",
tuneGrid = turn_grid_xgb,
trControl = cv,
verbose = FALSE,
metric = "F",
verbosity = 0)

Where does mlr3 save the final model?

Where does mlr3 save the final model, after training a learner --- learner$train(data)? By "final model", I mean something like a list produced by the following code:
model <- xgboost::xgb.train(data = data_train,
max.depth = 8, nthread = 2, nrounds = 15,
verbose = 0)
Is there a way to extract this list/object?
task <- TaskRegr$new("data", data, "y")
learner <- lrn("regr.xgboost")
preprocess <- po("scale", param_vals = list(center = TRUE, scale = TRUE))
pp <- preprocess %>>% learner
gg<- GraphLearner$new(pp)
gg$train(task)
In xgboost the 'model' is stored as:
model <- xgboost::xgb.train(data = data_train,
max.depth = 8, nthread = 2, nrounds = 15,
verbose = 0)
In MLR3, when trained using:
task <- TaskRegr$new("data", data, "y")
learner <- lrn("regr.xgboost")
preprocess <- po("scale", param_vals = list(center = TRUE, scale = TRUE))
pp <- preprocess %>>% learner
gg<- GraphLearner$new(pp)
gg$train(task)
The equivalent to 'model' is stored as
gg$model$regr.xgboost$model

what are the parameters of bayes optimization for tuning parameter?

I am using Bayesian optimization to tune the parameters of SVM for regression problem. In the following code, what should be the value of init_grid_dt = initial_grid ? I got the upper and lower bounds of the sigma and C parameters of SVM, but dont know what should be the initial-grid?
In one of the example on the web, they took a random search results as input to the initial grid. The code is as follow:
ctrl <- trainControl(method = "repeatedcv", repeats = 5)
svm_fit_bayes <- function(logC, logSigma) {
## Use the same model code but for a single (C, sigma) pair.
txt <- capture.output(
mod <- train(y ~ ., data = train_dat,
method = "svmRadial",
preProc = c("center", "scale"),
metric = "RMSE",
trControl = ctrl,
tuneGrid = data.frame(C = exp(logC), sigma = exp(logSigma)))
)
list(Score = -getTrainPerf(mod)[, "TrainRMSE"], Pred = 0)
}
lower_bounds <- c(logC = -5, logSigma = -9)
upper_bounds <- c(logC = 20, logSigma = -0.75)
bounds <- list(logC = c(lower_bounds[1], upper_bounds[1]),
logSigma = c(lower_bounds[2], upper_bounds[2]))
## Create a grid of values as the input into the BO code
initial_grid <- rand_search$results[, c("C", "sigma", "RMSE")]
initial_grid$C <- log(initial_grid$C)
initial_grid$sigma <- log(initial_grid$sigma)
initial_grid$RMSE <- -initial_grid$RMSE
names(initial_grid) <- c("logC", "logSigma", "Value")
library(rBayesianOptimization)
ba_search <- BayesianOptimization(svm_fit_bayes,
bounds = bounds,
init_grid_dt = initial_grid,
init_points = 0,
n_iter = 30,
acq = "ucb",
kappa = 1,
eps = 0.0,
verbose = TRUE)

Combining train + test data and running cross validation in R

I have the following R code that runs a simple xgboost model on a set of training and test data with the intention of predicting a binary outcome.
We start by
1) Reading in the relevant libraries.
library(xgboost)
library(readr)
library(caret)
2) Cleaning up the training and test data
train.raw = read.csv("train_data", header = TRUE, sep = ",")
drop = c('column')
train.df = train.raw[, !(names(train.raw) %in% drop)]
train.df[,'outcome'] = as.factor(train.df[,'outcome'])
test.raw = read.csv("test_data", header = TRUE, sep = ",")
drop = c('column')
test.df = test.raw[, !(names(test.raw) %in% drop)]
test.df[,'outcome'] = as.factor(test.df[,'outcome'])
train.c1 = subset(train.df , outcome == 1)
train.c0 = subset(train.df , outcome == 0)
3) Running XGBoost on the properly formatted data.
train_xgb = xgb.DMatrix(data.matrix(train.df [,1:124]), label = train.raw[, "outcome"])
test_xgb = xgb.DMatrix(data.matrix(test.df[,1:124]))
4) Running the model
model_xgb = xgboost(data = train_xgb, nrounds = 8, max_depth = 5, eta = .1, eval_metric = "logloss", objective = "binary:logistic", verbose = 5)
5) Making predicitions
pred_xgb <- predict(model_xgb, newdata = test_xgb)
My question is: How can I adjust this process so that I'm just pulling in / adjusting a single 'training' data set, and getting predictions on the hold-out sets of the cross-validated file?
To specify k-fold CV in the xgboost call one needs to call xgb.cv with nfold = some integer argument, to save the predictions for each resample use prediction = TRUE argument. For instance:
xgboostModelCV <- xgb.cv(data = dtrain,
nrounds = 1688,
nfold = 5,
objective = "binary:logistic",
eval_metric= "auc",
metrics = "auc",
verbose = 1,
print_every_n = 50,
stratified = T,
scale_pos_weight = 2
max_depth = 6,
eta = 0.01,
gamma=0,
colsample_bytree = 1 ,
min_child_weight = 1,
subsample= 0.5 ,
prediction = T)
xgboostModelCV$pred #contains predictions in the same order as in dtrain.
xgboostModelCV$folds #contains k-fold samples
Here's a decent function to pick hyperparams
function(train, seed){
require(xgboost)
ntrees=2000
searchGridSubCol <- expand.grid(subsample = c(0.5, 0.75, 1),
colsample_bytree = c(0.6, 0.8, 1),
gamma=c(0, 1, 2),
eta=c(0.01, 0.03),
max_depth=c(4,6,8,10))
aucErrorsHyperparameters <- apply(searchGridSubCol, 1, function(parameterList){
#Extract Parameters to test
currentSubsampleRate <- parameterList[["subsample"]]
currentColsampleRate <- parameterList[["colsample_bytree"]]
currentGamma <- parameterList[["gamma"]]
currentEta =parameterList[["eta"]]
currentMaxDepth =parameterList[["max_depth"]]
set.seed(seed)
xgboostModelCV <- xgb.cv(data = train,
nrounds = ntrees,
nfold = 5,
objective = "binary:logistic",
eval_metric= "auc",
metrics = "auc",
verbose = 1,
print_every_n = 50,
early_stopping_rounds = 200,
stratified = T,
scale_pos_weight=sum(all_data_nobad[index_no_bad,1]==0)/sum(all_data_nobad[index_no_bad,1]==1),
max_depth = currentMaxDepth,
eta = currentEta,
gamma=currentGamma,
colsample_bytree = currentColsampleRate,
min_child_weight = 1,
subsample= currentSubsampleRate)
xvalidationScores <- as.data.frame(xgboostModelCV$evaluation_log)
#Save rmse of the last iteration
auc=xvalidationScores[xvalidationScores$iter==xgboostModelCV$best_iteration,c(1,4,5)]
auc=cbind(auc, currentSubsampleRate, currentColsampleRate, currentGamma, currentEta, currentMaxDepth)
names(auc)=c("iter", "test.auc.mean", "test.auc.std", "subsample", "colsample", "gamma", "eta", "max.depth")
print(auc)
return(auc)
})
return(aucErrorsHyperparameters)
}
You can change the grid values and the params in the grid, as well as loss/evaluation metric. It is similar as provided by caret grid search, but caret does not provide the possibility to define alpha, lambda, colsample_bylevel, num_parallel_tree... hyper parameters in the grid search apart defining a custom function which I found cumbersome. Caret has the advantage of automatic preprocessing, automatic up/down sampling within CV etc.
setting the seed outside the xgb.cv call will pick the same folds for CV but not the same trees at each round so you will end up with a different model. Even if you set the seed inside the xgb.cv function call there is no guarantee you will end up with the same model but there's a much higher chance (depends on threads, type of model.. - I for one like the uncertainty and found it to have little impact on the result).
You can use xgb.cv and set prediction = TRUE.

Error in running h2o.ensemble

I am getting error while running h2o.ensemble in R. This is the error output
[1] "Cross-validating and training base learner 1: h2o.glm.wrapper"
|======================================================================| 100%
[1] "Cross-validating and training base learner 2: h2o.randomForest.1"
|============== | 19%
Got exception 'class java.lang.AssertionError', with msg 'null'
java.lang.AssertionError
at hex.tree.DHistogram.scoreMSE(DHistogram.java:323)
at hex.tree.DTree$DecidedNode$FindSplits.compute2(DTree.java:441)
at hex.tree.DTree$DecidedNode.bestCol(DTree.java:421)
at hex.tree.DTree$DecidedNode.<init>(DTree.java:449)
at hex.tree.SharedTree.makeDecided(SharedTree.java:489)
at hex.tree.SharedTree$ScoreBuildOneTree.onCompletion(SharedTree.java:436)
at jsr166y.CountedCompleter.__tryComplete(CountedCompleter.java:425)
at jsr166y.CountedCompleter.tryComplete(CountedCompleter.java:383)
at water.MRTask.compute2(MRTask.java:683)
at water.H2O$H2OCountedCompleter.compute(H2O.java:1069)
at jsr166y.CountedCompleter.exec(CountedCompleter.java:468)
at jsr166y.ForkJoinTask.doExec(ForkJoinTask.java:263)
at jsr166y.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:974)
at jsr166y.ForkJoinPool.runWorker(ForkJoinPool.java:1477)
at jsr166y.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:104)
Error: 'null'
This is my code that i am using. I am using this script for regression problem. "sales" column is for output prediction. Rest of the columns are for training.
response <- "Sales"
predictors <- setdiff(names(train), response)
h2o.glm.1 <- function(..., alpha = 0.0) h2o.glm.wrapper(..., alpha = alpha)
h2o.glm.2 <- function(..., alpha = 0.5) h2o.glm.wrapper(..., alpha = alpha)
h2o.glm.3 <- function(..., alpha = 1.0) h2o.glm.wrapper(..., alpha = alpha)
h2o.randomForest.1 <- function(..., ntrees = 200, nbins = 50, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, nbins = nbins, seed = seed)
h2o.randomForest.2 <- function(..., ntrees = 200, sample_rate = 0.75, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, sample_rate = sample_rate, seed = seed)
h2o.gbm.1 <- function(..., ntrees = 100, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, seed = seed)
h2o.gbm.6 <- function(..., ntrees = 100, col_sample_rate = 0.6, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.8 <- function(..., ntrees = 100, max_depth = 3, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.deeplearning.1 <- function(..., hidden = c(500,500), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
h2o.deeplearning.6 <- function(..., hidden = c(50,50), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
h2o.deeplearning.7 <- function(..., hidden = c(100,100), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
print("learning starts ")
#### Customized base learner library
learner <- c("h2o.glm.wrapper",
"h2o.randomForest.1", "h2o.randomForest.2",
"h2o.gbm.1", "h2o.gbm.6", "h2o.gbm.8",
"h2o.deeplearning.1", "h2o.deeplearning.6", "h2o.deeplearning.7")
metalearner <- "h2o.glm.wrapper"
#
#Train with new library:
fit <- h2o.ensemble(
x = predictors,
y= response,
training_frame=train,
family = "gaussian",
learner = learner,
metalearner = metalearner,
cvControl = list(V = 5))
All columns of train data are numeral. I am using R version 3.2.2.
The updated way to do this is
h2o.init(nthreads=-1,enable_assertions = FALSE)
As suggested by Spencer Aiello
Setting the assertion to FALSE in the h2o initialisation might do the trick
h2o.init(nthreads=-1, assertion = FALSE)
Make sure that you properly shutdown/restart h2o before applying the changes
h2o.shutdown()
h2o.init(nthreads=-1, assertion = FALSE)

Resources