I have the following code as a simple example.
library(tidymodels)
library(bonsai)
train_folds <- vfold_cv(data = train, strata = target)
train_rec <- recipe(formula = target ~ ., data = train) %>%
update_role(Id, new_role = "ID")
gb_mod <- boost_tree(engine = "lightgbm",
mtry = 11,
mode = "classification",
trees = 100)
gb_workflow <- workflow(preprocessor = train_rec,
spec = gb_mod)
model_fit <- gb_workflow %>% fit_resamples(train_folds,
metrics = metric_set(kap, roc_auc, accuracy))
model_fit %>% collect_metrics()
The kap function calculates the Kappa metric which has no weighting by default. To calculate quadratic weighted Kappa you must add weighting = "quadratic" as a parameter, which metric_set() doesn't seem to accept. How can I include QWK in the metrics output?
Apologies if this has been answered already but I couldn't find any examples.
You need to make an alternate function (just by wrapping the original):
library(yardstick)
#> For binary classification, the first factor level is assumed to be the event.
#> Use the argument `event_level = "second"` to alter this as needed.
library(dplyr)
#>
#> Attaching package: 'dplyr'
#> The following objects are masked from 'package:stats':
#>
#> filter, lag
#> The following objects are masked from 'package:base':
#>
#> intersect, setdiff, setequal, union
data(hpc_cv, package = "modeldata")
# See example in ?metric_set examples
kap_quad <- function(data, truth, estimate, na_rm = TRUE, ...) {
kap(
data = data,
truth = !! rlang::enquo(truth),
estimate = !! rlang::enquo(estimate),
# set weighting = "quadratic"
weighting = "quadratic",
na_rm = na_rm,
...
)
}
kap_quad <- new_numeric_metric(kap_quad, "maximize")
met <- metric_set(kap_quad)
hpc_cv %>%
met(obs, estimate = pred)
#> # A tibble: 1 × 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 kap multiclass 0.692
# no weighting
hpc_cv %>%
kap(obs, estimate = pred)
#> # A tibble: 1 × 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 kap multiclass 0.508
Created on 2023-01-31 by the reprex package (v2.0.1)
Related
I am trying to add r-squared and the number of observations to my regression tables using gtsummary and multiply imputed data from mice. I can still extract r-squared using glance if I pool the model, but mipo objects are not compatible with tbl_regression since pooling is already part of the tidying process. Any help with this issue is appreciated.
library(tidyverse)
library(gtsummary)
library(mice)
library(broom)
library(broom.helpers)
set.seed(123)
mice::version(pkg = "tidyverse")
#> [1] "tidyverse 1.3.1.9000 /Library/Frameworks/R.framework/Versions/4.1/Resources/library"
mice::version(pkg = "gtsummary")
#> [1] "gtsummary 1.5.2 /Library/Frameworks/R.framework/Versions/4.1/Resources/library"
mice::version(pkg = "mice")
#> [1] "mice 3.14.3 2021-12-06 /Library/Frameworks/R.framework/Versions/4.1/Resources/library"
mice::version(pkg = "broom")
#> [1] "broom 0.7.12.9000 /Library/Frameworks/R.framework/Versions/4.1/Resources/library"
mice::version(pkg = "broom.helpers")
#> [1] "broom.helpers 1.6.0.9000 /Library/Frameworks/R.framework/Versions/4.1/Resources/library"
R.Version()$version.string
#> [1] "R version 4.1.1 (2021-08-10)"
# imputed data
data(nhanes)
imp <- mice(nhanes, m = 3, print = FALSE)
mod <- with(imp, lm(age ~ bmi + chl))
# is it broom ?
broom::glance(mod)
#> Error: No glance method for objects of class mira
broom::glance(pool(mod))
#> nimp nobs r.squared adj.r.squared
#> 1 3 25 0.5245108 0.4799119
# regular tbl_regression
tbl_regression(mod) %>% as_kable() # kable so it prints nicely here
#> pool_and_tidy_mice(): Tidying mice model with
#> `mice::pool(x) %>% mice::tidy(exponentiate = FALSE, conf.int = TRUE, conf.level = 0.95)`
Characteristic
Beta
95% CI
p-value
bmi
-0.12
-0.21, -0.03
0.012
chl
0.01
0.00, 0.02
0.008
# tbl_regression with add_glance_source_note
tbl_regression(mod) %>% add_glance_source_note(include = c(r.squared, nobs) )
#> pool_and_tidy_mice(): Tidying mice model with
#> `mice::pool(x) %>% mice::tidy(exponentiate = FALSE, conf.int = TRUE, conf.level = 0.95)`
#> Error: No glance method for objects of class mira
# tbl_regression with add_glance_table
tbl_regression(mod) %>% add_glance_table(include = c(r.squared, nobs) )
#> pool_and_tidy_mice(): Tidying mice model with
#> `mice::pool(x) %>% mice::tidy(exponentiate = FALSE, conf.int = TRUE, conf.level = 0.95)`
#> Error: No glance method for objects of class mira
# tbl_regression with pooled data
pool <- mice::pool(mod)
tbl_regression(pool)
#> x Please pass the 'mice' model to `tbl_regression()` before models
#> have been combined with `mice::pool()`. The default tidier, `pool_and_tidy_mice()`, will both pool and tidy the regression model.
#> mice::mice(trial, m = 2) %>%
#> with(lm(age ~ marker + grade)) %>%
#> tbl_regression()
Thank you for the detailed question and reproducible example.
You diagnosed the issue well. tbl_regression() expects unpooled results and broom::glance() expects the pools results. You can get what you need by passing a custom glance() function.
Example below!
library(tidyverse)
library(gtsummary)
library(mice)
#>
#> Attaching package: 'mice'
#> The following object is masked from 'package:stats':
#>
#> filter
#> The following objects are masked from 'package:base':
#>
#> cbind, rbind
set.seed(123)
# imputed data
data(nhanes)
imp <- mice(nhanes, m = 3, print = FALSE)
mod <- with(imp, lm(age ~ bmi + chl))
broom::glance(pool(mod))
#> nimp nobs r.squared adj.r.squared
#> 1 3 25 0.5245108 0.4799119
tbl <-
tbl_regression(mod) %>%
add_glance_source_note(
include = c(r.squared, nobs),
# need to modify the glance function slightly to handle the regression object
glance_fun = function(x) broom::glance(pool(x))
)
#> pool_and_tidy_mice(): Tidying mice model with
#> `mice::pool(x) %>% mice::tidy(exponentiate = FALSE, conf.int = TRUE, conf.level = 0.95)`
Created on 2022-01-29 by the reprex package (v2.0.1)
I am watching one of the solutions for House Prices Kaggle competition. I would like to know how do you get RMSE value from this:
Subset the train rows and selected features
dt.train <- fulldt %>% filter(Set == "Train") %>% select("Id", "OverallQual", "TotalArea", "AreaAbvground", "GarageArea", "TotalBaths", "YearBuilt", "Neighborhood", "MSSubClass", "FireplaceQu", "ExterQual", "KitchenQual", "BsmtQual", "HouseStyle") %>% mutate(SalePrice = log(raw.train$SalePrice))
Same for the test features
dt.test <- fulldt %>% filter(Set == "Test") %>%
select("Id", "OverallQual", "TotalArea", "AreaAbvground", "GarageArea", "TotalBaths", "YearBuilt",
"Neighborhood", "MSSubClass", "FireplaceQu", "ExterQual", "KitchenQual", "BsmtQual", "HouseStyle")
Random Forest model
fit <- randomForest(SalePrice ~ ., data = dt.train, importance = T)
Use new model to predict SalePrice values from the test set
pred <- exp(predict(fit , newdata = dt.test))
How do you get RMSE value from pred ?
Let's calculate the RMSE of the training and test rows based on the minimal example iris data:
library(tibble)
library(randomForest)
#> randomForest 4.6-14
#> Type rfNews() to see new features/changes/bug fixes.
library(yardstick)
#> For binary classification, the first factor level is assumed to be the event.
#> Use the argument `event_level = "second"` to alter this as needed.
train_df <- head(iris, 100)
test_df <- tail(iris, 50)
model <- randomForest(Sepal.Length ~ ., data = train_df, importance = T)
# Test RMSE
tibble(
truth = predict(model, newdata = test_df),
predicted = test_df$Sepal.Length
) %>%
rmse(truth, predicted)
#> # A tibble: 1 x 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 rmse standard 0.836
# Train RMSE
tibble(
truth = predict(model, newdata = train_df),
predicted = train_df$Sepal.Length
) %>%
rmse(truth, predicted)
#> # A tibble: 1 x 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 rmse standard 0.265
Created on 2021-12-13 by the reprex package (v2.0.1)
I am using tidymodels for building a model where false negatives are more costly than false positives. Hence I'd like to use the yardstick::classification_cost metric for hyperparameter tuning, but with a custom classification cost matrix that reflects this fact.
Doing this after fitting a model is simple enough:
library(tidymodels)
# load simulated prediction output
data("two_class_example")
# cost matrix penalizing false negatives
cost_matrix <- tribble(
~truth, ~estimate, ~cost,
"Class1", "Class2", 2,
"Class2", "Class1", 1
)
# use function on simulated prediction output
classification_cost(
data = two_class_example,
truth = truth,
# target class probability
Class1,
# supply the function with the cost matrix
costs = cost_matrix)
#> # A tibble: 1 × 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 classification_cost binary 0.260
Created on 2021-11-01 by the reprex package (v2.0.1)
But using this function during hyperparameter tuning is where I run into problems. The documentation states that for setting options the metric should be wrapped in a custom function. Here's my attempt and the resulting error. Note how this wrapper works fine for evaluating a fitted model, but throws an error when trying to use for tuning:
library(tidymodels)
# load data
data("two_class_example")
data("two_class_dat")
# create custom metric penalizing false negatives
classification_cost_penalized <- function(
data,
truth,
class_proba,
na_rm = TRUE
) {
# cost matrix penalizing false negatives
cost_matrix <- tribble(
~truth, ~estimate, ~cost,
"Class1", "Class2", 2,
"Class2", "Class1", 1
)
classification_cost(
data = data,
truth = !! rlang::enquo(truth),
# supply the function with the class probabilities
!! rlang::enquo(class_proba),
# supply the function with the cost matrix
costs = cost_matrix,
na_rm = na_rm
)
}
# Use `new_numeric_metric()` to formalize this new metric function
classification_cost_penalized <- new_prob_metric(classification_cost_penalized, "minimize")
# test if this works on the simulated estimates
two_class_example %>%
classification_cost_penalized(truth = truth, class_prob = Class1)
#> # A tibble: 1 × 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 classification_cost binary 0.260
# test if this works with hyperparameter tuning
# specify a RF model
my_model <-
rand_forest(mtry = tune(),
min_n = tune(),
trees = 500) %>%
set_engine("ranger") %>%
set_mode("classification")
# specify recipe
my_recipe <- recipe(Class ~ A + B, data = two_class_dat)
# bundle to workflow
my_wf <- workflow() %>%
add_model(my_model) %>%
add_recipe(my_recipe)
# start tuning
tuned_rf <- my_wf %>%
# set up tuning grid
tune_grid(
resamples = vfold_cv(two_class_dat,
v = 5),
grid = 5,
metrics = metric_set(classification_cost_penalized))
#> i Creating pre-processing data to finalize unknown parameter: mtry
#> x Fold1: internal: Error: In metric: `classification_cost_penalized`
#> unused argum...
#> x Fold2: internal: Error: In metric: `classification_cost_penalized`
#> unused argum...
#> x Fold3: internal: Error: In metric: `classification_cost_penalized`
#> unused argum...
#> x Fold4: internal: Error: In metric: `classification_cost_penalized`
#> unused argum...
#> x Fold5: internal: Error: In metric: `classification_cost_penalized`
#> unused argum...
#> Warning: All models failed. See the `.notes` column.
Created on 2021-11-01 by the reprex package (v2.0.1)
Unnesting the notes shows that there are unused arguments: "internal: Error: In metric: classification_cost_penalized\nunused arguments (estimator = ~prob_estimator, event_level = ~event_level)" But apparently the yardstick_event_level()function, which is how event_level should be set according to this documentation, does not exist? No function under that name shows up when searching for it.
I don't know how to proceed here.
Thank you for your time.
When you are tweaking an existing yardstick metric, it is much easier to use the metric_tweak() function, which allows you to hard code certain optional arguments (like cost), while keeping everything else the same. It is sort of like purrr::partial(), but for yardstick metrics.
library(tidymodels)
# load data
data("two_class_example")
data("two_class_dat")
cost_matrix <- tribble(
~truth, ~estimate, ~cost,
"Class1", "Class2", 2,
"Class2", "Class1", 1
)
classification_cost_penalized <- metric_tweak(
.name = "classification_cost_penalized",
.fn = classification_cost,
costs = cost_matrix
)
# test if this works on the simulated estimates
two_class_example %>%
classification_cost_penalized(truth = truth, class_prob = Class1)
#> # A tibble: 1 × 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 classification_cost_penalized binary 0.260
# specify a RF model
my_model <-
rand_forest(
mtry = tune(),
min_n = tune(),
trees = 500
) %>%
set_engine("ranger") %>%
set_mode("classification")
# specify recipe
my_recipe <- recipe(Class ~ A + B, data = two_class_dat)
# bundle to workflow
my_wf <- workflow() %>%
add_model(my_model) %>%
add_recipe(my_recipe)
# start tuning
tuned_rf <- my_wf %>%
tune_grid(
resamples = vfold_cv(two_class_dat, v = 5),
grid = 5,
metrics = metric_set(classification_cost_penalized)
)
#> i Creating pre-processing data to finalize unknown parameter: mtry
collect_metrics(tuned_rf)
#> # A tibble: 5 × 8
#> mtry min_n .metric .estimator mean n std_err .config
#> <int> <int> <chr> <chr> <dbl> <int> <dbl> <chr>
#> 1 1 35 classification_cost… binary 0.407 5 0.0162 Preprocessor1…
#> 2 1 23 classification_cost… binary 0.403 5 0.0146 Preprocessor1…
#> 3 1 10 classification_cost… binary 0.403 5 0.0137 Preprocessor1…
#> 4 2 27 classification_cost… binary 0.396 5 0.0166 Preprocessor1…
#> 5 2 6 classification_cost… binary 0.401 5 0.0161 Preprocessor1…
Created on 2021-11-03 by the reprex package (v2.0.1)
Is there an alternative metric or approach you would recommend for a situation in which costs are different? It doesn't seem to do a whole lot for evaluating what is best, even when extreme differences between false positives and negatives are provided. An example based on your code above:
library(tidymodels)
# load data
data("two_class_example")
data("two_class_dat")
cost_matrix_1 <- tribble(
~truth, ~estimate, ~cost,
"Class1", "Class2", 10,
"Class2", "Class1", 1
)
cost_matrix_2 <- tribble(
~truth, ~estimate, ~cost,
"Class1", "Class2", 1,
"Class2", "Class1", 10
)
classification_cost_penalized_1 <- metric_tweak(
.name = "classification_cost_penalized_1",
.fn = classification_cost,
costs = cost_matrix_1
)
classification_cost_penalized_2 <- metric_tweak(
.name = "classification_cost_penalized_2",
.fn = classification_cost,
costs = cost_matrix_2
)
# test if this works on the simulated estimates
two_class_example %>%
classification_cost_penalized_1(truth = truth, class_prob = Class1)
#> # A tibble: 1 × 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 classification_cost_penalized binary 0.260
two_class_example %>%
classification_cost_penalized_2(truth = truth, class_prob = Class1)
# specify a RF model
my_model <-
rand_forest(
mtry = tune(),
min_n = tune(),
trees = 500
) %>%
set_engine("ranger") %>%
set_mode("classification")
# specify recipe
my_recipe <- recipe(Class ~ A + B, data = two_class_dat)
# bundle to workflow
my_wf <- workflow() %>%
add_model(my_model) %>%
add_recipe(my_recipe)
# start tuning
tuned_rf <- my_wf %>%
tune_grid(
resamples = vfold_cv(two_class_dat, v = 5),
grid = 50,
metrics = metric_set(classification_cost_penalized_1,classification_cost_penalized_2)
)
#> i Creating pre-processing data to finalize unknown parameter: mtry
#seems to always be increasing or decreaing and not much differrence even when large differences
collect_metrics(tuned_rf) %>%
ggplot(aes(x = min_n, y = mean, color = .metric)) +
geom_line() +
facet_grid(rows = "mtry")
visualization of metrics overview
I'm struggling with how the obtain the AUC from a logistic regression model using tidymodels.
Here's an example using the built-in mpg dataset.
library(tidymodels)
library(tidyverse)
# Use mpg dataset
df <- mpg
# Create an indicator variable for class="suv"
df$is_suv <- as.factor(df$class == "suv")
# Create the split object
df_split <- initial_split(df, prop=1/2)
# Create the training and testing sets
df_train <- training(df_split)
df_test <- testing(df_split)
# Create workflow
rec <-
recipe(is_suv ~ cty + hwy + cyl, data=df_train)
glm_spec <-
logistic_reg() %>%
set_engine(engine = "glm")
glm_wflow <-
workflow() %>%
add_recipe(rec) %>%
add_model(glm_spec)
# Fit the model
model1 <- fit(glm_wflow, df_train)
# Attach predictions to training dataset
training_results <- bind_cols(df_train, predict(model1, df_train))
# Calculate accuracy
accuracy(training_results, truth = is_suv, estimate = .pred_class)
# Calculate AUC??
roc_auc(training_results, truth = is_suv, estimate = .pred_class)
The last line returns this error:
> roc_auc(training_results, truth = is_suv, estimate = .pred_class)
Error in metric_summarizer(metric_nm = "roc_auc", metric_fn = roc_auc_vec, :
formal argument "estimate" matched by multiple actual arguments
Since you are doing binary classification, roc_auc() is expecting a vector of class probabilities corresponding to the "relevant" class, not the predicted class.
You can get this using predict(model1, df_train, type = "prob"). Alternatively, if you are using workflows version 0.2.2 or newer you can use the augment() to get class predictions and probabilities without using bind_cols().
library(tidymodels)
library(tidyverse)
# Use mpg dataset
df <- mpg
# Create an indicator variable for class="suv"
df$is_suv <- as.factor(df$class == "suv")
# Create the split object
df_split <- initial_split(df, prop=1/2)
# Create the training and testing sets
df_train <- training(df_split)
df_test <- testing(df_split)
# Create workflow
rec <-
recipe(is_suv ~ cty + hwy + cyl, data=df_train)
glm_spec <-
logistic_reg() %>%
set_engine(engine = "glm")
glm_wflow <-
workflow() %>%
add_recipe(rec) %>%
add_model(glm_spec)
# Fit the model
model1 <- fit(glm_wflow, df_train)
# Attach predictions to training dataset
training_results <- augment(model1, df_train)
# Calculate accuracy
accuracy(training_results, truth = is_suv, estimate = .pred_class)
#> # A tibble: 1 x 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 accuracy binary 0.795
# Calculate AUC
roc_auc(training_results, truth = is_suv, estimate = .pred_FALSE)
#> # A tibble: 1 x 3
#> .metric .estimator .estimate
#> <chr> <chr> <dbl>
#> 1 roc_auc binary 0.879
Created on 2021-04-12 by the reprex package (v1.0.0)
If you directly use the ranger function, one can obtain the out-of-bag error from the resulting ranger class object.
If instead, one proceeds by way of setting up a recipe, model specification/engine, with tuning parameters, etc., how can we extract that same error? The Tidymodels approach doesn't seem to hold on to that data.
If you want to access the ranger object inside of the parsnip object, it is there as $fit:
library(tidymodels)
data("ad_data", package = "modeldata")
rf_spec <-
rand_forest() %>%
set_engine("ranger", oob.error = TRUE) %>%
set_mode("classification")
rf_fit <- rf_spec %>%
fit(Class ~ ., data = ad_data)
rf_fit
#> parsnip model object
#>
#> Fit time: 158ms
#> Ranger result
#>
#> Call:
#> ranger::ranger(x = maybe_data_frame(x), y = y, oob.error = ~TRUE, num.threads = 1, verbose = FALSE, seed = sample.int(10^5, 1), probability = TRUE)
#>
#> Type: Probability estimation
#> Number of trees: 500
#> Sample size: 333
#> Number of independent variables: 130
#> Mtry: 11
#> Target node size: 10
#> Variable importance mode: none
#> Splitrule: gini
#> OOB prediction error (Brier s.): 0.1340793
class(rf_fit)
#> [1] "_ranger" "model_fit"
class(rf_fit$fit)
#> [1] "ranger"
rf_fit$fit$prediction.error
#> [1] 0.1340793
Created on 2021-03-11 by the reprex package (v1.0.0)