step_rose() fails in tune grid - r

I noted that when training with certain engines (e.g. keras and xgboost) the recipe returns more ys than Xs.
Here you'll find a minimal reproducible example:
library(themis)
library(recipes)
library(tune)
library(parsnip)
library(workflows)
library(dials)
library(rsample)
xg_mod <- parsnip::boost_tree(mode = "classification",
trees = tune(),
tree_depth = tune(),
min_n = tune(),
loss_reduction = tune(),
learn_rate = tune()) %>%
set_engine("xgboost")
xg_grid <- grid_latin_hypercube(over_ratio(range = c(0,1)),
trees(),
tree_depth(),
min_n(),
loss_reduction(),
learn_rate(),
size = 5)
my_recipe <- recipe(class ~ ., data = circle_example) %>%
step_rose(class, over_ratio = tune())
workflow() %>%
add_model(xg_mod) %>%
add_recipe(my_recipe) %>%
tune_grid(resamples = mc_cv(circle_example, strata = class),
grid = xg_grid)
The resulting error is Error in data.frame(ynew, Xnew): arguments imply differing number of rows: 385, 386

It is related to tuning the over_ratio. If you skip tuning it, the example will work with no errors.
library(tidymodels)
#> ── Attaching packages ────────────────────────────────────── tidymodels 0.1.1
library(themis)
data(iris)
iris_imbalance <- iris %>%
filter(Species != "setosa") %>%
slice_sample(n = 60, weight_by = case_when(
Species == "virginica" ~ 60,
TRUE ~ 1)) %>%
mutate(Species = factor(Species))
xg_mod <- parsnip::boost_tree(mode = "classification",
trees = tune(),
tree_depth = tune(),
min_n = tune(),
loss_reduction = tune(),
learn_rate = tune()) %>%
set_engine("xgboost")
xg_grid <- grid_latin_hypercube(#over_ratio(range = c(0,1)),
trees(),
tree_depth(),
min_n(),
loss_reduction(),
learn_rate(),
size = 5)
my_recipe <- recipe(Species ~ ., data = iris_imbalance) %>%
step_rose(Species) #, over_ratio = tune())
workflow() %>%
add_model(xg_mod) %>%
add_recipe(my_recipe) %>%
tune_grid(resamples = mc_cv(iris_imbalance, strata = Species),
grid = xg_grid)
#> # Tuning results
#> # Monte Carlo cross-validation (0.75/0.25) with 25 resamples using stratification
#> # A tibble: 25 x 4
#> splits id .metrics .notes
#> <list> <chr> <list> <list>
#> 1 <split [46/14]> Resample01 <tibble [10 × 9]> <tibble [0 × 1]>
#> 2 <split [46/14]> Resample02 <tibble [10 × 9]> <tibble [0 × 1]>
#> 3 <split [46/14]> Resample03 <tibble [10 × 9]> <tibble [0 × 1]>
#> 4 <split [46/14]> Resample04 <tibble [10 × 9]> <tibble [0 × 1]>
#> 5 <split [46/14]> Resample05 <tibble [10 × 9]> <tibble [0 × 1]>
#> 6 <split [46/14]> Resample06 <tibble [10 × 9]> <tibble [0 × 1]>
#> 7 <split [46/14]> Resample07 <tibble [10 × 9]> <tibble [0 × 1]>
#> 8 <split [46/14]> Resample08 <tibble [10 × 9]> <tibble [0 × 1]>
#> 9 <split [46/14]> Resample09 <tibble [10 × 9]> <tibble [0 × 1]>
#> 10 <split [46/14]> Resample10 <tibble [10 × 9]> <tibble [0 × 1]>
#> # … with 15 more rows
Created on 2020-11-13 by the reprex package (v0.3.0)

Related

Tidymodels: How to extra importance from training data

I have the following code, where I do some grid search for different mtry and min_n. I know how to extract the parameters that give the highest accuracy (see second code box). How can I extract the importance of each feature in the training dataset? The guides I found online show how to do it only in the test dataset using "last_fit". E.g. of guide: https://www.tidymodels.org/start/case-study/#data-split
set.seed(seed_number)
data_split <- initial_split(node_strength,prop = 0.8,strata = Group)
train <- training(data_split)
test <- testing(data_split)
train_folds <- vfold_cv(train,v = 10)
rfc <- rand_forest(mode = "classification", mtry = tune(),
min_n = tune(), trees = 1500) %>%
set_engine("ranger", num.threads = 48, importance = "impurity")
rfc_recipe <- recipe(data = train, Group~.)
rfc_workflow <- workflow() %>% add_model(rfc) %>%
add_recipe(rfc_recipe)
rfc_result <- rfc_workflow %>%
tune_grid(train_folds, grid = 40, control = control_grid(save_pred = TRUE),
metrics = metric_set(accuracy))
.
best <-
rfc_result %>%
select_best(metric = "accuracy")
To do this, you will want to create a custom extract function, as outlined in this documentation.
For random forest variable importance, your function will look something like this:
get_rf_imp <- function(x) {
x %>%
extract_fit_parsnip() %>%
vip::vi()
}
And then you can apply it to your resamples like so (notice that you get a new .extracts column):
library(tidymodels)
data(cells, package = "modeldata")
set.seed(123)
cell_split <- cells %>% select(-case) %>%
initial_split(strata = class)
cell_train <- training(cell_split)
cell_test <- testing(cell_split)
folds <- vfold_cv(cell_train)
rf_spec <- rand_forest(mode = "classification") %>%
set_engine("ranger", importance = "impurity")
ctrl_imp <- control_grid(extract = get_rf_imp)
cells_res <-
workflow(class ~ ., rf_spec) %>%
fit_resamples(folds, control = ctrl_imp)
cells_res
#> # Resampling results
#> # 10-fold cross-validation
#> # A tibble: 10 × 5
#> splits id .metrics .notes .extracts
#> <list> <chr> <list> <list> <list>
#> 1 <split [1362/152]> Fold01 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 2 <split [1362/152]> Fold02 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 3 <split [1362/152]> Fold03 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 4 <split [1362/152]> Fold04 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 5 <split [1363/151]> Fold05 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 6 <split [1363/151]> Fold06 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 7 <split [1363/151]> Fold07 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 8 <split [1363/151]> Fold08 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 9 <split [1363/151]> Fold09 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
#> 10 <split [1363/151]> Fold10 <tibble [2 × 4]> <tibble [0 × 3]> <tibble [1 × 2]>
Created on 2022-06-19 by the reprex package (v2.0.1)
Once you have those variable importance score extracts, you can unnest() them (right now, you have to do this twice because it is deeply nested) and then you can summarize and visualize as you prefer:
cells_res %>%
select(id, .extracts) %>%
unnest(.extracts) %>%
unnest(.extracts) %>%
group_by(Variable) %>%
summarise(Mean = mean(Importance),
Variance = sd(Importance)) %>%
slice_max(Mean, n = 15) %>%
ggplot(aes(Mean, reorder(Variable, Mean))) +
geom_crossbar(aes(xmin = Mean - Variance, xmax = Mean + Variance)) +
labs(x = "Variable importance", y = NULL)
Created on 2022-06-19 by the reprex package (v2.0.1)

LASSO regression - Force variables in glmnet with tidymodels

I am doing feature selection using LASSO regression with tidymodels and glmnet.
It is possible to force variables in glmnet by using the penalty.factors argument (see here and here, for example).
Is it possible to do the same using tidymodels ?
library(tidymodels)
library(vip)
library(forcats)
library(dplyr)
library(ggplot2)
library(data.table)
# Define data split
datasplit = rsample::initial_split(mtcars, prop=0.8)
data_training = rsample::training(datasplit)
data_testing = rsample::testing(datasplit)
# Model specifications - should penalty.factors go here?
model_spec = parsnip::linear_reg(penalty = tune::tune(),
mixture = 1) %>%
parsnip::set_engine("glmnet")
# Model recipe
rec = recipe(mpg ~ ., mtcars)
# Model workflow
wf = workflows::workflow() %>%
workflows::add_recipe(rec) %>%
workflows::add_model(model_spec)
# Resampling
data_resample = rsample::vfold_cv(data_training,
repeats = 3,
v = 2)
hyperparam_grid = dials::grid_regular(dials::penalty(),
levels = 100)
# Define metrics
metrics = yardstick::metric_set(yardstick::rsq,
yardstick::mape,
yardstick::mpe)
# Tune the model
tune_grid_results = tune::tune_grid(
wf,
resamples = data_resample,
grid = hyperparam_grid,
metrics = metrics
)
# Collect and finalise best model
selected_model = tune_grid_results %>%
tune::select_best("mape")
final_model = tune::finalize_workflow(wf, selected_model)
final_model_fit = final_model %>%
parsnip::fit(data_training) %>%
workflows::extract_fit_parsnip()
# Plot variables importance
t_importance = final_model_fit %>%
vip::vi(lambda = selected_model$penalty) %>%
dplyr::mutate(
Importance = Importance,
Variable = forcats::fct_reorder(Variable, Importance)
) %>%
data.table() %>%
setorder( - Importance)
t_importance %>%
ggplot(aes(x = Importance, y = Variable, fill = Sign)) +
geom_col() +
scale_x_continuous(expand = c(0, 0)) +
labs(y = NULL) +
theme_minimal()
Created on 2022-03-14 by the reprex package (v2.0.1)
As mentioned in the comment above, you can pass engine-specific arguments like penalty.factor in set_engine():
library(tidyverse)
library(tidymodels)
library(vip)
#>
#> Attaching package: 'vip'
#> The following object is masked from 'package:utils':
#>
#> vi
datasplit <- initial_split(mtcars, prop = 0.8)
car_train <- training(datasplit)
car_test <- testing(datasplit)
car_folds <- vfold_cv(car_train, repeats = 3, v = 2)
You can pass penalty.factor here to the model specification as an engine-specific argument:
glmnet_spec <- linear_reg(penalty = tune(), mixture = 1) %>%
set_engine("glmnet", penalty.factor = c(0, rep(1, 7), 0, 0))
car_wf <- workflow(mpg ~ ., glmnet_spec)
glmnet_res <- tune_grid(car_wf, resamples = car_folds, grid = 5)
glmnet_res
#> # Tuning results
#> # 2-fold cross-validation repeated 3 times
#> # A tibble: 6 × 5
#> splits id id2 .metrics .notes
#> <list> <chr> <chr> <list> <list>
#> 1 <split [12/13]> Repeat1 Fold1 <tibble [10 × 5]> <tibble [0 × 3]>
#> 2 <split [13/12]> Repeat1 Fold2 <tibble [10 × 5]> <tibble [0 × 3]>
#> 3 <split [12/13]> Repeat2 Fold1 <tibble [10 × 5]> <tibble [0 × 3]>
#> 4 <split [13/12]> Repeat2 Fold2 <tibble [10 × 5]> <tibble [0 × 3]>
#> 5 <split [12/13]> Repeat3 Fold1 <tibble [10 × 5]> <tibble [0 × 3]>
#> 6 <split [13/12]> Repeat3 Fold2 <tibble [10 × 5]> <tibble [0 × 3]>
best_penalty <- select_best(glmnet_res, "rmse")
final_fit <- car_wf %>%
finalize_workflow(best_penalty) %>%
fit(data = car_train) %>%
extract_fit_parsnip()
final_fit %>%
vi(lambda = best_penalty$penalty) %>%
mutate(Variable = fct_reorder(Variable, Importance)) %>%
ggplot(aes(x = Importance, y = Variable, fill = Sign)) +
geom_col() +
scale_x_continuous(expand = c(0, 0)) +
labs(y = NULL) +
theme_minimal()
Created on 2022-03-14 by the reprex package (v2.0.1)
This does require that you know the number of predictors when you create the model specification, which can become challenging for a complex recipe including many feature engineering steps.

How to make svm_linear work with tune_grid/tune_race_anova

So when I try to tune cost for svm_linear with tidymodels approach, it fails every time, but it works just fine with svm_rbf function, so I cannot understand where the problem comes from
rcpsvm<-recipe(Species~.,data=iris)
svmlin<-svm_linear(cost=tune())%>%
set_engine("LiblineaR")%>%
set_mode("classification")
svmlinwrkfl<-workflow()%>%
add_recipe(rcpsvm)%>%
add_model(svmlin)
gridwals<-expand_grid(cost=c(0.01, 0.1, 1, 10, 100))
folds<-vfold_cv(iris, strata=Species, 5)
tunelin<-tune_grid(svmlinwrkfl, grid = gridwals, folds)
And then it says that all models failed cause No data available in table
What I'm doing wrong?
The specific model you are using cannot generate class probabilities, only hard class predictions, so you need to tune using a metric for classes (not a metric for probabilities). An example of this is sensitivity:
library(tidymodels)
#> Registered S3 method overwritten by 'tune':
#> method from
#> required_pkgs.model_spec parsnip
data(two_class_dat)
data_train <- two_class_dat[-(1:10), ]
data_test <- two_class_dat[ 1:10 , ]
folds <- bootstraps(data_train, times = 5)
svm_cls_spec <-
svm_linear(cost = tune()) %>%
set_mode("classification") %>%
set_engine("LiblineaR")
workflow(Class ~ ., svm_cls_spec) %>%
tune_grid(folds, grid = 5, metrics = metric_set(sensitivity))
#> # Tuning results
#> # Bootstrap sampling
#> # A tibble: 5 × 4
#> splits id .metrics .notes
#> <list> <chr> <list> <list>
#> 1 <split [781/296]> Bootstrap1 <tibble [5 × 5]> <tibble [0 × 1]>
#> 2 <split [781/286]> Bootstrap2 <tibble [5 × 5]> <tibble [0 × 1]>
#> 3 <split [781/296]> Bootstrap3 <tibble [5 × 5]> <tibble [0 × 1]>
#> 4 <split [781/291]> Bootstrap4 <tibble [5 × 5]> <tibble [0 × 1]>
#> 5 <split [781/304]> Bootstrap5 <tibble [5 × 5]> <tibble [0 × 1]>
Created on 2022-01-28 by the reprex package (v2.0.1)

Combine nesting and rolling_origin from Tidymodels in R

I am trying to train a random forest using rolling_origin from the Tidymodels suite. I would like the folds to be exactly the months of the year. Nesting looks like it could do the trick, but tune_grid is not able to find the variables when the data is nested. How can I make this work? I put a reproducible example below.
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(tidymodels))
suppressPackageStartupMessages(library(yardstick))
# Create dummy data ====================================================================================================
dates <- seq(from = as.Date("2019-01-01"), to = as.Date("2019-12-31"), by = 'day' )
l <- length(dates)
set.seed(1)
data_set <- data.frame(
date = dates,
v1 = rnorm(l),
v2 = rnorm(l),
v3 = rnorm(l),
y = rnorm(l)
)
# Random Forest Model =================================================================================================
model <-
parsnip::rand_forest(
mode = "regression",
trees = tune()) %>%
set_engine("ranger")
# grid specification
params <-
dials::parameters(
trees()
)
# Set up grid and model workflow =======================================================================================
grid <-
dials::grid_max_entropy(
params,
size = 2
)
form <- as.formula(paste("y ~ v1 + v2 + v3"))
model_workflow <-
workflows::workflow() %>%
add_model(model) %>%
add_formula(form)
# Tuning on the normal data set works ====================================================================================================
data_ro_day <- data_set %>%
rolling_origin(
initial = 304,
assess = 30,
cumulative = TRUE,
skip = 30
)
results <- tune_grid(
model_workflow,
grid = grid,
resamples = data_ro_day,
param_info = params,
metrics = metric_set(mae, mape, rmse, rsq),
control = control_grid(verbose = TRUE))
results %>% show_best("mape", n = 2)
# Tuning on the nested data set doesn't work =========================================================================================
data_ro_month <- data_set %>%
mutate(year_month = format(date, "%Y-%m")) %>%
nest(-year_month) %>%
rolling_origin(
initial = 10,
assess = 1,
cumulative = TRUE
)
results <- tune_grid(
model_workflow,
grid = grid,
resamples = data_ro_month,
param_info = params,
metrics = metric_set(mae, mape, rmse, rsq),
control = control_grid(verbose = TRUE))
results$.notes ```
I'm not entirely clear on how you want to divide up your data for tuning, but I would recommend looking into some of the other rsample functions like sliding_window() and especially sliding_period(). They let you create experimental designs for tuning where you can fit on certain months of data and then asses on another month, sliding along all the months you have available:
library(tidymodels)
dates <- seq(from = as.Date("2019-01-01"), to = as.Date("2019-12-31"), by = 'day' )
l <- length(dates)
set.seed(1)
data_set <- tibble(
date = dates,
v1 = rnorm(l),
v2 = rnorm(l),
v3 = rnorm(l),
y = rnorm(l)
)
month_folds <- data_set %>%
sliding_period(
date,
"month",
lookback = Inf,
skip = 4
)
month_folds
#> # Sliding period resampling
#> # A tibble: 7 x 2
#> splits id
#> <list> <chr>
#> 1 <split [151/30]> Slice1
#> 2 <split [181/31]> Slice2
#> 3 <split [212/31]> Slice3
#> 4 <split [243/30]> Slice4
#> 5 <split [273/31]> Slice5
#> 6 <split [304/30]> Slice6
#> 7 <split [334/31]> Slice7
I used skip = 4 here to only keep slices where you will have more data for training. Each of these slices will training on several months of data and assess on a new, last month. The resamples slide forward through your dataset. Since I used lookback = Inf it always includes all past data, but you can change that.
When you have your resampling approach set up however is appropriate for your domain problem, you can then make a model specification and tune it:
rf_spec <-
rand_forest(
mode = "regression",
trees = tune()) %>%
set_engine("ranger")
rf_wf <-
workflow() %>%
add_model(rf_spec) %>%
add_formula(y ~ v1 + v2 + v3)
tune_grid(rf_wf, resamples = month_folds)
#> # Tuning results
#> # Sliding period resampling
#> # A tibble: 7 x 4
#> splits id .metrics .notes
#> <list> <chr> <list> <list>
#> 1 <split [151/30]> Slice1 <tibble [20 × 5]> <tibble [0 × 1]>
#> 2 <split [181/31]> Slice2 <tibble [20 × 5]> <tibble [0 × 1]>
#> 3 <split [212/31]> Slice3 <tibble [20 × 5]> <tibble [0 × 1]>
#> 4 <split [243/30]> Slice4 <tibble [20 × 5]> <tibble [0 × 1]>
#> 5 <split [273/31]> Slice5 <tibble [20 × 5]> <tibble [0 × 1]>
#> 6 <split [304/30]> Slice6 <tibble [20 × 5]> <tibble [0 × 1]>
#> 7 <split [334/31]> Slice7 <tibble [20 × 5]> <tibble [0 × 1]>
Created on 2020-11-15 by the reprex package (v0.3.0.9001)

How do I specify a PLS model in tidy models

I'm interested in learning tidymodels and have tried to apply it to some exercises in Appied Predictive Modeling. This is Exercise 6.2. I would like to specify a Partial Least Squares (PLS) model to the permeability data set.
I have the following code that works all the way up to the tune grid. I've modeled my analysis off of Julia Silge's - Lasso regression with tidymodels and The Office found here.
You can see my script and the tune_grid error message below.
library(tidymodels)
library(tidyverse)
library(skimr)
library(plsmod)
library(caret)
library(AppliedPredictiveModeling)
data(permeability)
dim(fingerprints)
fingerprints <- fingerprints[, -nearZeroVar(fingerprints)]
dim(fingerprints)
df <- cbind(fingerprints, permeability)
df <- as_tibble(df)
perm_split <- initial_split(df)
perm_train <- training(perm_split)
perm_test <- testing(perm_split)
perm_rec<- recipe(permeability ~ ., data=perm_train) %>%
step_center(all_numeric(),-all_outcomes()) %>%
step_scale(all_numeric(),-all_outcomes())
perm_prep <- perm_rec %>%
prep()
perm_prep
pls_spec <- pls(num_comp = 4) %>%
set_mode("regression") %>%
set_engine("mixOmics")
wf <- workflow() %>%
add_recipe(perm_prep)
pls_fit <- wf %>%
add_model(pls_spec) %>%
fit(data=perm_train)
pls_fit %>%
pull_workflow_fit() %>%
tidy()
set.seed(123)
perm_folds <- vfold_cv(perm_train, v=10)
pls_tune_spec <- pls(num_comp = tune()) %>%
set_mode("regression") %>%
set_engine("mixOmics")
comp_grid <- expand.grid(num_comp = seq(from = 1, to = 20, by = 1))
doParallel::registerDoParallel()
set.seed(4763)
pls_grid <- tune_grid(
wf %>% add_model(pls_tune_spec),
resamples = perm_folds,
grid = comp_grid
)
At this point I'm getting the following error:
All models failed in tune_grid(). See the .notes column.
Two questions:
Why is my tune grid failing and how can I fix it?
How does one see the .note column.
I am guessing that you may be using a Windows computer, because we currently have a bug in the CRAN version of tune for parallel processing on Windows. Try either:
training sequentially without parallel processing, or
installing the development version of tune where we have fixed this bug, via devtools::install_github("tidymodels/tune")
You should see results like this:
library(tidymodels)
library(plsmod)
library(AppliedPredictiveModeling)
data(permeability)
df <- cbind(fingerprints, permeability)
df <- as_tibble(df)
set.seed(123)
perm_split <- initial_split(df)
perm_train <- training(perm_split)
perm_test <- testing(perm_split)
set.seed(234)
perm_folds <- vfold_cv(perm_train, v=10)
perm_rec <- recipe(permeability ~ ., data = perm_train) %>%
step_nzv(all_predictors()) %>%
step_center(all_numeric(), -all_outcomes()) %>%
step_scale(all_numeric(), -all_outcomes())
pls_spec <- pls(num_comp = tune()) %>%
set_mode("regression") %>%
set_engine("mixOmics")
comp_grid <- tibble(num_comp = seq(from = 1, to = 20, by = 5))
doParallel::registerDoParallel()
workflow() %>%
add_recipe(perm_rec) %>%
add_model(pls_spec) %>%
tune_grid(
resamples = perm_folds,
grid = comp_grid
)
#>
#> Attaching package: 'rlang'
#> The following objects are masked from 'package:purrr':
#>
#> %#%, as_function, flatten, flatten_chr, flatten_dbl, flatten_int,
#> flatten_lgl, flatten_raw, invoke, list_along, modify, prepend,
#> splice
#>
#> Attaching package: 'vctrs'
#> The following object is masked from 'package:tibble':
#>
#> data_frame
#> The following object is masked from 'package:dplyr':
#>
#> data_frame
#> Loading required package: MASS
#>
#> Attaching package: 'MASS'
#> The following object is masked from 'package:dplyr':
#>
#> select
#> Loading required package: lattice
#>
#> Loaded mixOmics 6.12.2
#> Thank you for using mixOmics!
#> Tutorials: http://mixomics.org
#> Bookdown vignette: https://mixomicsteam.github.io/Bookdown
#> Questions, issues: Follow the prompts at http://mixomics.org/contact-us
#> Cite us: citation('mixOmics')
#>
#> Attaching package: 'mixOmics'
#> The following object is masked from 'package:plsmod':
#>
#> pls
#> The following object is masked from 'package:tune':
#>
#> tune
#> The following object is masked from 'package:purrr':
#>
#> map
#> # Tuning results
#> # 10-fold cross-validation
#> # A tibble: 10 x 4
#> splits id .metrics .notes
#> <list> <chr> <list> <list>
#> 1 <split [111/13]> Fold01 <tibble [8 × 5]> <tibble [0 × 1]>
#> 2 <split [111/13]> Fold02 <tibble [8 × 5]> <tibble [0 × 1]>
#> 3 <split [111/13]> Fold03 <tibble [8 × 5]> <tibble [0 × 1]>
#> 4 <split [111/13]> Fold04 <tibble [8 × 5]> <tibble [0 × 1]>
#> 5 <split [112/12]> Fold05 <tibble [8 × 5]> <tibble [0 × 1]>
#> 6 <split [112/12]> Fold06 <tibble [8 × 5]> <tibble [0 × 1]>
#> 7 <split [112/12]> Fold07 <tibble [8 × 5]> <tibble [0 × 1]>
#> 8 <split [112/12]> Fold08 <tibble [8 × 5]> <tibble [0 × 1]>
#> 9 <split [112/12]> Fold09 <tibble [8 × 5]> <tibble [0 × 1]>
#> 10 <split [112/12]> Fold10 <tibble [8 × 5]> <tibble [0 × 1]>
Created on 2020-11-12 by the reprex package (v0.3.0.9001)
If you have an object like pls_grid with notes, you should be able to get to the column via pls_grid$.notes, or to see the first example via pls_grid$.notes[[1]].

Resources