I am trying to figure out the correct method of going about applying a keras model to each of my lists. I have used the iris dataset and created 4 lists and the goal is to correctly predict versicolor or virginica (I omit setosa because I want a binary classification model).
data(iris)
iris <- iris %>%
mutate(
splt = sample(4, size = nrow(.), replace = TRUE),
binary = case_when(
Species == "versicolor" ~ 0,
Species == "virginica" ~ 1
)
) %>%
filter(Species != "setosa") %>%
split(., .$splt)
iris_x_train <- iris %>%
map(., ~select(., Sepal.Length, Sepal.Width, Petal.Length, Petal.Width) %>%
as.matrix())
iris_y_train <- iris %>%
map(., ~select(., binary) %>%
to_categorical(2))
NN_model <- keras_model_sequential() %>%
layer_dense(units = 4, activation = 'relu', input_shape = 4) %>%
layer_dense(units = 2, activation = 'softmax')
NN_model %>%
summary
NN_model %>%
compile(
loss = 'binary_crossentropy',
optimizer_sgd(lr = 0.01, momentum = 0.9),
metrics = c('accuracy')
)
My problem occurs here. When I apply the below code:
NN_model %>%
future_map(., ~future_map2(
.x = iris_x_train,
.y = iris_y_train,
~fit(
x = .x,
y = .y,
epochs = 5,
batch_size = 20,
validation_split = 0
)
)
)
I get this error:
Error in py_get_item_impl(x, key, FALSE) : TypeError: 'Sequential'
object does not support indexing
When I apply this code:
NN_model %>%
future_map2(
.x = iris_x_train,
.y = iris_y_train,
~fit(
x = .x,
y = .y,
epochs = 5,
batch_size = 20,
validation_split = 0
)
)
I get this error:
~fit(x = .x, y = .y, epochs = 5, batch_size = 20, validation_split =
0) Error in py_call_impl(callable,
dots$args, dots$keywords) : Evaluation error: Unable to convert R
object to Python type.
How can I map a keras model to each of the 4 datasets?
library(keras)
library(tensorflow)
library(furrr)
library(purrr)
The following works for the first list:
NN_model %>%
fit(
x = iris_x_train[[1]],
y = iris_y_train[[1]],
epochs = 50,
batch_size = 20,
validation_split = 0
)
EDIT: I seem to have solved it.
Putting the NN_model inside the fit() function appears to work.
future_map2(
.x = iris_x_train,
.y = iris_y_train,
~fit(NN_model,
.x,
.y,
epochs = 5,
batch_size = 20,
validation_split = 0
)
)
Related
After following keras tutorial on functional API, I'm trying merging 2 Neural Networks, however, I keep getting this error:
Error in dim(x) <- length(x) :
invalid first argument, must be vector (list or atomic)
this is the code:
Note: x_train is a tibble and train_gen is flow_images_from_directory object.
input_img <- layer_input(shape = c(140,140,3) , name = "image")
input_tab <- layer_input(shape = c(ncol(x_train)) , name = "tabular")
cnn <- input_img %>%
layer_conv_2d(32, kernel_size = c(3,3) , padding = "same" ,
activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(64 , kernel_size = c(3,3) , activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_flatten() %>%
layer_dropout(0.5)
nn <- input_tab %>%
layer_dense(128) %>%
layer_activation("relu") %>%
layer_dense(64) %>%
layer_activation("relu") %>%
layer_dense(32) %>%
layer_activation("relu") %>%
layer_dense(16) %>%
layer_activation("relu")
output <- layer_concatenate(c(cnn , nn)) %>%
layer_dense(16) %>%
layer_activation("relu") %>%
layer_dense(6) %>%
layer_activation("sigmoid")
model <- keras_model(inputs = c(input_img , input_tab) ,
outputs = output) %>%
compile(loss = "categorical_crossentropy" , optimizer = optimizer_adam() ,
metrics = "accuracy")
callbacks <- callback_early_stopping(monitor = "val_loss" , patience = 5)
final_history <- model %>% fit(
x = list(train_gen , x_train),
y = list(train_gen$classes , x_train),
epochs = 50 ,
callbacks = callbacks,
validation_split = 0.2)
Error in dim(x) <- length(x) :
invalid first argument, must be vector (list or atomic)
After long searching online i could not find any help on this.
why this error is produced?
Feeding several multivariate time series in a neural network in R seems to be a hard problem for me.
As far as I can tell there are three way to do so and they all do have their pitfalls:
A nested tibble would be the most intuitive solution for R, but it does not seem to work at all.
With a 3D array you have to be careful about the ordering.
Converting a 3D Array in a dataset and then creating a tfdatasets::feature_spec(.) results in an error.
Here is a small example, which does not work at the moment:
I get the data as a nested tibble so I start from there.
library(magrittr)
reticulate::use_python("/usr/local/bin/python")
data <- tibble::tibble(
Timeseires = list(
A = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
),
C = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
),
B = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
),
D = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
)
)
)
measurement.data <- data %>%
dplyr::mutate(Timeseires = purrr::map(
.x = Timeseires,
.f = simplify2array
)) %>%
.$Timeseires %>%
simplify2array() %>%
tfdatasets::tensor_slices_dataset()
feature.spec.measurment <- measurement.data %>%
tfdatasets::feature_spec(.) %>%
tfdatasets::step_numeric_column(
tfdatasets::all_numeric(),
normalizer_fn = tfdatasets::scaler_standard()
) %>%
tfdatasets::fit()
It is crashing with the following error:
Error: Unable to resolve features for dataset that does not have named outputs
I've tried adding some names to it, bot without success.
Do you have any idea how to insert several multivariate time series in a neural network in R with keras and thus get the example to work?
The remainder of the example looks like this:
##### Data Prep for meta.data #####
meta.data <- data %>%
dplyr::select(-Timeseires) %>%
tfdatasets::tensor_slices_dataset()
feature.spec.meta <- meta.data %>%
tfdatasets::feature_spec(Result ~ .) %>%
tfdatasets::step_numeric_column(
tfdatasets::all_numeric(),
normalizer_fn = tfdatasets::scaler_standard()
) %>%
tfdatasets::step_categorical_column_with_vocabulary_list(
tfdatasets::all_nominal()
) %>%
tfdatasets::step_indicator_column(
tfdatasets::all_nominal()
) %>%
tfdatasets::fit()
input.measurement <- keras::layer_input(shape = c(3, 4))
lstm.out <- input.measurement %>%
keras::layer_lstm(units = 32)
#####
input.meta <- data %>%
dplyr::select(-Timeseires, -Result) %>%
tfdatasets::layer_input_from_dataset()
dense.out <- input.meta %>%
keras::layer_dense_features(feature_columns = tfdatasets::dense_features(feature.spec.meta))
#####
output <- keras::layer_concatenate(c(dense.out, lstm.out)) %>%
keras::layer_dense(units = 64, activation = "relu") %>%
keras::layer_dense(units = 64, activation = "relu") %>%
keras::layer_dense(units = 64, activation = "relu") %>%
keras::layer_dense(units = 1, activation = "sigmoid")
output.auxiliary <- input.measurement %>%
keras::layer_dense(units = 1, activation = "sigmoid", name = "aux_output")
model <- keras::keras_model(
inputs = c(input.meta, input.measurement),
outputs = c(output, output.auxiliary)
)
model %>% keras::compile(
loss = keras::loss_binary_crossentropy,
optimizer = "adam",
metrics = "binary_accuracy"
)
summary(model)
history <- model %>%
keras::fit(
x = list(
# tfdatasets::dataset_use_spec(meta.data, spec = feature.spec.meta),
# tfdatasets::dataset_use_spec(measurement.data ,feature.spec.measurment)
data %>% dplyr::select(-Timeseires, -Result),
data %>%
dplyr::mutate(Timeseires = purrr::map(
.x = Timeseires,
.f = simplify2array
)) %>% .$Timeseires
%>% simplify2array()
),
y = list(
data$Result,
data$Result
),
epochs = 10,
validation_split = 0.3
)
At the moment I've chosen this Input shape because it gets the example to work at least, but it is wrong since dim(measurement.data) = 50 3 4 and I should actually use keras::layer_input(shape = c(50, 3)) since the number of time series (matrices) will vary within the evaluation and be the same as in training.
Hello I am trying to build a neural network in R, and so far it works fine as shown in the following code:
library(keras)
library(tensorflow)
library(tidyverse)
nn_dat = iris %>% as_tibble %>%
mutate(sepal_l_feat = scale(Sepal.Length),
sepal_w_feat = scale(Sepal.Width),
petal_l_feat = scale(Petal.Length),
petal_w_feat = scale(Petal.Width),
class_num = as.numeric(Species) - 1, # factor, so = 0, 1, 2
class_label = Species) %>%
select(contains("feat"), class_num, class_label)
nn_dat %>% head(3)
set.seed(2022)
test_f = 0.40
nn_dat = nn_dat %>%
mutate(partition = sample(c('train','test'), nrow(.), replace = TRUE, prob = c(1 - test_f, test_f)))
x_train = nn_dat %>% filter(partition == 'train') %>% select(contains("feat")) %>% as.matrix
y_train = nn_dat %>% filter(partition == 'train') %>% pull(class_num) %>% to_categorical(3)
x_test = nn_dat %>% filter(partition == 'test') %>% select(contains("feat")) %>% as.matrix
y_test = nn_dat %>% filter(partition == 'test') %>% pull(class_num) %>% to_categorical(3)
model = keras_model_sequential()
model %>%
## Number of columns
layer_dense(units = 4, activation = 'relu', input_shape = 4) %>%
layer_dense(units = 3, activation = 'softmax')
model %>% summary
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
history = model %>% fit(
x = x_train, y = y_train,
epochs = 200,
batch_size = 20,
validation_split = 0
)
plot(history)
perf = model %>% evaluate(x_test, y_test)
print(perf)
however, I would like to see if there is a better architecture for my model, I am using iris just because it is public but I am working with other data.
so in particular here:
model %>%
## Number of predictors
layer_dense(units = 4, activation = 'relu', input_shape = 4) %>%
layer_dense(units = 3, activation = 'softmax')
model %>% summary
I am using 4 units because I have 4 columns and 3 at the end, for the result, however if I wanted to add more layers in between, how should I go about deciding on the architecture?
Any recommendation is welcome
Tried to follow the instructions to generate the engine, workflow, then the recipe.
The part which i can't seem to get right is to tune the cubist model. appreciate your guidance.
These untuned models work;
#untuned rf
rf_model <-
rand_forest(trees = 1000
# ,mtry = 30
# ,min_n = 3
) %>%
set_engine("ranger",
num.threads = parallel::detectCores(),
importance = "permutation") %>%
set_mode("regression")
rf_wflow <-
workflow() %>%
add_recipe(df_recipe) %>%
add_model(rf_model)
system.time(rf_fit <- rf_wflow %>% fit(data = train))
# build untuned cubist model
cubist_mod <-
cubist_rules(
committees = 100,
neighbors = 9
# max_rules = integer(1)
) %>%
set_engine("Cubist") %>%
set_mode("regression")
cubist_wflow <-
workflow() %>%
add_recipe(cube_recipe) %>%
add_model(cubist_mod)
system.time(final_cb_mod <- cubist_wflow %>% fit(data = train))
summary(final_cb_mod$fit)
And the tuned version of the rf works too
#tune ranger
tune_spec <- rand_forest(
mtry = tune(),
trees = 1000,
min_n = tune()) %>%
set_mode("regression") %>%
set_engine("ranger")
tune_wf <-
workflow() %>%
add_model(tune_spec) %>%
add_recipe(df_recipe)
set.seed(234)
trees_folds <- vfold_cv(train)
rf_grid <- grid_regular(
mtry(range = c(10, 30)),
min_n(range = c(2, 8)),
levels = 5
)
set.seed(345)
system.time(
tune_res <- tune_grid(
tune_wf,
resamples = trees_folds,
grid = rf_grid,
control =
control_grid(#save_pred = T,
pkgs = c('tm', 'stringr'))
)
)
But when i tried to tune cubist, it generated this error. My cubist tune code is as below
Warning message:
All models failed. See the `.notes` column.
> car_tune_res$.notes[1]
[[1]]
# A tibble: 1 x 1
.notes
<chr>
1 preprocessor 1/1: Error in `contrasts<-`(`*tmp*`, value = contr.funs[1 + isOF[nn]]): contrasts can be applied only to factors with 2 or more levels
the tuning of cubist
#tune cubist
cb_grid <- expand.grid(committees = c(1, 10, 50, 100), neighbors = c(1, 5, 7, 9))
set.seed(8226)
cubist_mod <-
cubist_rules(neighbors = tune(), committees = tune()) %>%
set_engine("Cubist") %>%
set_mode("regression")
tuned_cubist_wf <- workflow() %>%
add_model(cubist_mod) %>%
add_recipe(cube_recipe)
system.time(
car_tune_res <-
cubist_mod %>%
tune_grid(
price ~ .,
resamples = trees_folds,
grid = cb_grid,
control =
control_grid(#save_pred = T,
pkgs = c('tm', 'stringr'))
)
)
added the recipe for reference
int_var <- train %>% select(where(is.integer)) %>% colnames()
int_var <- c(int_var,'geo_dist')
# excl_var <- c('url')
add_words <-
str_extract(train$url,'(?<=-).*(?=-)') %>%
str_extract_all(.,'[[:alpha:]]+') %>%
unlist() %>%
unique() %>%
str_to_lower()
df_recipe <-
recipe(price ~ .,data = train) %>%
step_geodist(lat = lat, lon = long, log = FALSE,
ref_lat = 144.946457, ref_lon = -37.840935, # Melb CBD
is_lat_lon = FALSE) %>%
step_rm('suburb') %>%
step_rm('prop_type') %>%
step_rm('url') %>%
step_zv(all_predictors()) %>%
# step_rm('desc') %>%
step_mutate(desc_raw = desc) %>%
step_textfeature(desc_raw) %>%
step_rename_at(
starts_with("textfeature_"),
fn = ~ gsub("textfeature_desc_raw_", "", .)) %>%
step_mutate(desc = str_to_lower(desc)) %>%
step_mutate(desc = removeNumbers(desc)) %>%
step_mutate(desc = removePunctuation(desc)) %>%
step_tokenize(desc) %>% #engine = "spacyr"
step_stopwords(desc, stopword_source = 'snowball') %>%
step_stopwords(desc, custom_stopword_source = add_words) %>%
step_tokenfilter(desc, max_tokens = 1e3) %>% #, max_tokens = tune()
step_tfidf(desc) %>% #lda_models = lda_model
step_novel(all_nominal(), -all_outcomes()) %>%
step_YeoJohnson(all_of(!!int_var), -all_outcomes()) %>%
step_dummy(all_nominal(), -all_outcomes(), one_hot = TRUE) %>%
step_normalize(all_of(!!int_var))
#dimension reduction due to the sparse matrix
cube_recipe <-
df_recipe %>%
step_pca(matches("school|tfidf_desc"),threshold = .8) %>% #|lda_desc
step_rm(starts_with("school")) %>%
step_rm(starts_with("lda_desc"))
I have the following results and I am trying to apply a function within a pipe command.
The code I am using which gives me the error is the following:
sample_rmse_tbl <- dataset %>%
mutate(rmse = map_dbl(predict, calc_rmse)) %>%
select(id, rmse)
Which gives the following error:
Error in mutate_impl(.data, dots) :
Evaluation error: Result 1 is not a length 1 atomic vector.
The data uses the sun spots data and the code that I have is the following (the error I run into is the last line of the code):
I have followed the tutorial carefully and everything works for me up until this line of code.
--- The code is a cut down version of this tutorial: https://www.business-science.io/timeseries-analysis/2018/04/18/keras-lstm-sunspots-time-series-prediction.html
# Core Tidyverse
library(tidyverse)
library(glue)
library(forcats)
# Time Series
library(timetk)
library(tidyquant)
library(tibbletime)
# Visualization
library(cowplot)
# Preprocessing
library(recipes)
# Sampling / Accuracy
library(rsample)
library(yardstick)
# Modeling
library(keras)
sun_spots <- datasets::sunspot.month %>%
tk_tbl() %>%
mutate(index = as_date(index)) %>%
as_tbl_time(index = index)
sun_spots
############################################
periods_train <- 12 * 50
periods_test <- 12 * 10
skip_span <- 12 * 20
rolling_origin_resamples <- rolling_origin(
sun_spots,
initial = periods_train,
assess = periods_test,
cumulative = FALSE,
skip = skip_span
)
rolling_origin_resamples
############################################
calc_rmse <- function(prediction_tbl) {
rmse_calculation <- function(data) {
data %>%
spread(key = key, value = value) %>%
select(-index) %>%
filter(!is.na(predict)) %>%
rename(
truth = actual,
estimate = predict
) %>%
rmse(truth, estimate)
}
safe_rmse <- possibly(rmse_calculation, otherwise = NA)
safe_rmse(prediction_tbl)
}
#############################################
predict_keras_lstm <- function(split, epochs = 300, ...) {
lstm_prediction <- function(split, epochs, ...) {
# 5.1.2 Data Setup
df_trn <- training(split)
df_tst <- testing(split)
df <- bind_rows(
df_trn %>% add_column(key = "training"),
df_tst %>% add_column(key = "testing")
) %>%
as_tbl_time(index = index)
# 5.1.3 Preprocessing
rec_obj <- recipe(value ~ ., df) %>%
step_sqrt(value) %>%
step_center(value) %>%
step_scale(value) %>%
prep()
df_processed_tbl <- bake(rec_obj, df)
center_history <- rec_obj$steps[[2]]$means["value"]
scale_history <- rec_obj$steps[[3]]$sds["value"]
# 5.1.4 LSTM Plan
lag_setting <- 120 # = nrow(df_tst)
batch_size <- 40
train_length <- 440
tsteps <- 1
epochs <- epochs
# 5.1.5 Train/Test Setup
lag_train_tbl <- df_processed_tbl %>%
mutate(value_lag = lag(value, n = lag_setting)) %>%
filter(!is.na(value_lag)) %>%
filter(key == "training") %>%
tail(train_length)
x_train_vec <- lag_train_tbl$value_lag
x_train_arr <- array(data = x_train_vec, dim = c(length(x_train_vec), 1, 1))
y_train_vec <- lag_train_tbl$value
y_train_arr <- array(data = y_train_vec, dim = c(length(y_train_vec), 1))
lag_test_tbl <- df_processed_tbl %>%
mutate(
value_lag = lag(value, n = lag_setting)
) %>%
filter(!is.na(value_lag)) %>%
filter(key == "testing")
x_test_vec <- lag_test_tbl$value_lag
x_test_arr <- array(data = x_test_vec, dim = c(length(x_test_vec), 1, 1))
y_test_vec <- lag_test_tbl$value
y_test_arr <- array(data = y_test_vec, dim = c(length(y_test_vec), 1))
# 5.1.6 LSTM Model
model <- keras_model_sequential()
model %>%
layer_lstm(units = 50,
input_shape = c(tsteps, 1),
batch_size = batch_size,
return_sequences = TRUE,
stateful = TRUE) %>%
layer_lstm(units = 50,
return_sequences = FALSE,
stateful = TRUE) %>%
layer_dense(units = 1)
model %>%
compile(loss = 'mae', optimizer = 'adam')
# 5.1.7 Fitting LSTM
for (i in 1:epochs) {
model %>% fit(x = x_train_arr,
y = y_train_arr,
batch_size = batch_size,
epochs = 1,
verbose = 1,
shuffle = FALSE)
model %>% reset_states()
cat("Epoch: ", i)
}
# 5.1.8 Predict and Return Tidy Data
# Make Predictions
pred_out <- model %>%
predict(x_test_arr, batch_size = batch_size) %>%
.[,1]
# Retransform values
pred_tbl <- tibble(
index = lag_test_tbl$index,
value = (pred_out * scale_history + center_history)^2
)
# Combine actual data with predictions
tbl_1 <- df_trn %>%
add_column(key = "actual")
tbl_2 <- df_tst %>%
add_column(key = "actual")
tbl_3 <- pred_tbl %>%
add_column(key = "predict")
# Create time_bind_rows() to solve dplyr issue
time_bind_rows <- function(data_1, data_2, index) {
index_expr <- enquo(index)
bind_rows(data_1, data_2) %>%
as_tbl_time(index = !! index_expr)
}
ret <- list(tbl_1, tbl_2, tbl_3) %>%
reduce(time_bind_rows, index = index) %>%
arrange(key, index) %>%
mutate(key = as_factor(key))
return(ret)
}
safe_lstm <- possibly(lstm_prediction, otherwise = NA)
safe_lstm(split, epochs, ...)
}
#################################################
sample_predictions_lstm_tbl <- rolling_origin_resamples %>%
mutate(predict = map(splits, predict_keras_lstm, epochs = 10))
sample_predictions_lstm_tbl
sample_predictions_lstm_tbl$predict
map_dbl(sample_predictions_lstm_tbl$predict, calc_rmse)
sample_rmse_tbl <- sample_predictions_lstm_tbl %>%
mutate(rmse = map_dbl(predict, calc_rmse)) %>%
select(id, rmse)
EDIT1:
[[11]]
# A time tibble: 840 x 3
# Index: index
index value key
<date> <dbl> <fct>
1 1949-11-01 144. actual
2 1949-12-01 118. actual
3 1950-01-01 102. actual
4 1950-02-01 94.8 actual
5 1950-03-01 110. actual
6 1950-04-01 113. actual
7 1950-05-01 106. actual
8 1950-06-01 83.6 actual
9 1950-07-01 91 actual
10 1950-08-01 85.2 actual
# ... with 830 more rows
EDIT2:
I come up with a "workaround" but I am getting different results to the article.
temp <- NULL
sample_rmse_tbl <- NULL
for(i in 1:length(sample_predictions_lstm_tbl$predict)){
temp <- calc_rmse(sample_predictions_lstm_tbl$predict[[i]])
sample_rmse_tbl[[i]] <- temp
}
sample_rmse_tbl <- do.call(rbind.data.frame, sample_rmse_tbl)
sample_rmse_tbl %>%
setNames(., c("metric", "estimator", "rmse")) %>%
mutate(id = row_number()) %>%
select(id, rmse)
The problem is that the function rmse() returns a list rather than a single double value. You need to select the estimate value from this list using .$.estimate. However, I had to remove the possibly() call to make my solution work.
So, the new function calc_rmse() looks like this.
calc_rmse <- function(prediction_tbl) {
rmse_calculation <- function(data) {
data %>%
spread(key = key, value = value) %>%
select(-index) %>%
filter(!is.na(predict)) %>%
rename(
truth = actual,
estimate = predict
) %>%
rmse(truth, estimate) %>% .$.estimate
}
rmse_calculation(prediction_tbl)
}