Feeding several multivariate time series in a neural network in R seems to be a hard problem for me.
As far as I can tell there are three way to do so and they all do have their pitfalls:
A nested tibble would be the most intuitive solution for R, but it does not seem to work at all.
With a 3D array you have to be careful about the ordering.
Converting a 3D Array in a dataset and then creating a tfdatasets::feature_spec(.) results in an error.
Here is a small example, which does not work at the moment:
I get the data as a nested tibble so I start from there.
library(magrittr)
reticulate::use_python("/usr/local/bin/python")
data <- tibble::tibble(
Timeseires = list(
A = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
),
C = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
),
B = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
),
D = tibble::tibble(
Period = rep(0:4, each = 10),
MeasuremntA = runif(50, 20, 30),
MeasuremntB = runif(50, 0.001, 0.01)
)
)
)
measurement.data <- data %>%
dplyr::mutate(Timeseires = purrr::map(
.x = Timeseires,
.f = simplify2array
)) %>%
.$Timeseires %>%
simplify2array() %>%
tfdatasets::tensor_slices_dataset()
feature.spec.measurment <- measurement.data %>%
tfdatasets::feature_spec(.) %>%
tfdatasets::step_numeric_column(
tfdatasets::all_numeric(),
normalizer_fn = tfdatasets::scaler_standard()
) %>%
tfdatasets::fit()
It is crashing with the following error:
Error: Unable to resolve features for dataset that does not have named outputs
I've tried adding some names to it, bot without success.
Do you have any idea how to insert several multivariate time series in a neural network in R with keras and thus get the example to work?
The remainder of the example looks like this:
##### Data Prep for meta.data #####
meta.data <- data %>%
dplyr::select(-Timeseires) %>%
tfdatasets::tensor_slices_dataset()
feature.spec.meta <- meta.data %>%
tfdatasets::feature_spec(Result ~ .) %>%
tfdatasets::step_numeric_column(
tfdatasets::all_numeric(),
normalizer_fn = tfdatasets::scaler_standard()
) %>%
tfdatasets::step_categorical_column_with_vocabulary_list(
tfdatasets::all_nominal()
) %>%
tfdatasets::step_indicator_column(
tfdatasets::all_nominal()
) %>%
tfdatasets::fit()
input.measurement <- keras::layer_input(shape = c(3, 4))
lstm.out <- input.measurement %>%
keras::layer_lstm(units = 32)
#####
input.meta <- data %>%
dplyr::select(-Timeseires, -Result) %>%
tfdatasets::layer_input_from_dataset()
dense.out <- input.meta %>%
keras::layer_dense_features(feature_columns = tfdatasets::dense_features(feature.spec.meta))
#####
output <- keras::layer_concatenate(c(dense.out, lstm.out)) %>%
keras::layer_dense(units = 64, activation = "relu") %>%
keras::layer_dense(units = 64, activation = "relu") %>%
keras::layer_dense(units = 64, activation = "relu") %>%
keras::layer_dense(units = 1, activation = "sigmoid")
output.auxiliary <- input.measurement %>%
keras::layer_dense(units = 1, activation = "sigmoid", name = "aux_output")
model <- keras::keras_model(
inputs = c(input.meta, input.measurement),
outputs = c(output, output.auxiliary)
)
model %>% keras::compile(
loss = keras::loss_binary_crossentropy,
optimizer = "adam",
metrics = "binary_accuracy"
)
summary(model)
history <- model %>%
keras::fit(
x = list(
# tfdatasets::dataset_use_spec(meta.data, spec = feature.spec.meta),
# tfdatasets::dataset_use_spec(measurement.data ,feature.spec.measurment)
data %>% dplyr::select(-Timeseires, -Result),
data %>%
dplyr::mutate(Timeseires = purrr::map(
.x = Timeseires,
.f = simplify2array
)) %>% .$Timeseires
%>% simplify2array()
),
y = list(
data$Result,
data$Result
),
epochs = 10,
validation_split = 0.3
)
At the moment I've chosen this Input shape because it gets the example to work at least, but it is wrong since dim(measurement.data) = 50 3 4 and I should actually use keras::layer_input(shape = c(50, 3)) since the number of time series (matrices) will vary within the evaluation and be the same as in training.
Related
After following keras tutorial on functional API, I'm trying merging 2 Neural Networks, however, I keep getting this error:
Error in dim(x) <- length(x) :
invalid first argument, must be vector (list or atomic)
this is the code:
Note: x_train is a tibble and train_gen is flow_images_from_directory object.
input_img <- layer_input(shape = c(140,140,3) , name = "image")
input_tab <- layer_input(shape = c(ncol(x_train)) , name = "tabular")
cnn <- input_img %>%
layer_conv_2d(32, kernel_size = c(3,3) , padding = "same" ,
activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(64 , kernel_size = c(3,3) , activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_flatten() %>%
layer_dropout(0.5)
nn <- input_tab %>%
layer_dense(128) %>%
layer_activation("relu") %>%
layer_dense(64) %>%
layer_activation("relu") %>%
layer_dense(32) %>%
layer_activation("relu") %>%
layer_dense(16) %>%
layer_activation("relu")
output <- layer_concatenate(c(cnn , nn)) %>%
layer_dense(16) %>%
layer_activation("relu") %>%
layer_dense(6) %>%
layer_activation("sigmoid")
model <- keras_model(inputs = c(input_img , input_tab) ,
outputs = output) %>%
compile(loss = "categorical_crossentropy" , optimizer = optimizer_adam() ,
metrics = "accuracy")
callbacks <- callback_early_stopping(monitor = "val_loss" , patience = 5)
final_history <- model %>% fit(
x = list(train_gen , x_train),
y = list(train_gen$classes , x_train),
epochs = 50 ,
callbacks = callbacks,
validation_split = 0.2)
Error in dim(x) <- length(x) :
invalid first argument, must be vector (list or atomic)
After long searching online i could not find any help on this.
why this error is produced?
I'm looking to run a function on each group of a dataset, and bind the output to the existing set inside the tidyverse environment. After the example set, I've added how I do it right now, which requires splitting the set and running lapply (I want to move everything towards the tidyverse).
library(TTR)
test = data.frame('high'=rnorm(100,10,0.1),'low'=rnorm(100,0,0.1), 'close'=rnorm(100,5,0.1))
stoch(test,
nFastK = 14, nFastD = 3, nSlowD = 3,
maType=list(list(SMA), list(SMA), list(SMA)),
bounded = TRUE,
smooth = 1)
Here is how it used to be done with lists:
get_stoch = function(dat_) {
stochs = stoch(dat_ %>% select(-ticker), nFastK = 14, nFastD = 3, nSlowD = 3,
maType=list(list(SMA), list(SMA), list(SMA)),
bounded = TRUE, smooth = 1)
dat_ = cbind(dat_,stochs)
}
test = data.frame('ticker'=c(rep('A',50),rep('B',50)),
'high'=rnorm(100,10,0.1),'low'=rnorm(100,0,0.1), 'close'=rnorm(100,5,0.1)) %>%
split(.,.$ticker) %>%
lapply(.,get_stoch) %>%
bind_rows
If you want to translate your code to tidyverse you can use :
library(dplyr)
library(purrr)
df %>% group_split(ticker) %>% map_dfr(get_stoch)
You can use plyr::ddply to run a split-apply-bind method in tidyverse-like language:
df <- data.frame(ticker = c(rep('A', 50), rep('B', 50)),
high = rnorm(100, 10, 0.1),
low = rnorm(100, 0, 0.1),
close = rnorm(100, 5, 0.1))
test1 <- df %>%
split(.,.$ticker) %>%
lapply(.,get_stoch) %>%
bind_rows
test2 <- df %>%
ddply("ticker", get_stoch)
identical(test1, test2)
#> [1] TRUE
I have an array of data (around 1000+ cases (rows), and about 650+ columns of information (V1 - V5 in the example below) which I want to use for a keras 1D-CNN (which has 3 classifications (class in the example below)).
I'm having trouble trying to format the data in a way that will fit the model that I have written.
The data looks similar to this:
set.seed(999)
case <- c('A','B','C','D','E','F','G','H')
class <- c('N','N','T','R','T','R','N','T')
V1 <- runif(8, min = 0, max = 1)
V2 <- runif(8, min = 0, max = 1)
V3 <- runif(8, min = 0, max = 1)
V4 <- runif(8, min = 0, max = 1)
V5 <- runif(8, min = 0, max = 1)
df <- data.frame(case, class, V1, V2, V3, V4, V5)
df
The keras 1D-CNN I have written is this:
build.model <- function() {
model <- keras_model_sequential()
model %>%
layer_conv_1d(
filter = 32, kernel_size = c(5), padding = "same",
input_shape = c(100, 3)
) %>%
layer_activation("relu") %>%
# Use max pooling
layer_max_pooling_1d(pool_size = c(2)) %>%
# Flatten max filtered output into feature vector
# and feed into dense layer
layer_flatten() %>%
layer_dense(128) %>%
layer_activation('relu') %>%
layer_dense(3) %>%
layer_activation('softmax')
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'rmsprop',
metrics = 'accuracy'
)
return(model)
}
model <- build.model()
print(model)
When I try to pass the df through the model it doesn't work.
I know it's to do with the format of the data, but I'm at a loss as to how to rectify this.
Any tips/pointers would be greatly appreciated.
TIA
Miki
I am trying to figure out the correct method of going about applying a keras model to each of my lists. I have used the iris dataset and created 4 lists and the goal is to correctly predict versicolor or virginica (I omit setosa because I want a binary classification model).
data(iris)
iris <- iris %>%
mutate(
splt = sample(4, size = nrow(.), replace = TRUE),
binary = case_when(
Species == "versicolor" ~ 0,
Species == "virginica" ~ 1
)
) %>%
filter(Species != "setosa") %>%
split(., .$splt)
iris_x_train <- iris %>%
map(., ~select(., Sepal.Length, Sepal.Width, Petal.Length, Petal.Width) %>%
as.matrix())
iris_y_train <- iris %>%
map(., ~select(., binary) %>%
to_categorical(2))
NN_model <- keras_model_sequential() %>%
layer_dense(units = 4, activation = 'relu', input_shape = 4) %>%
layer_dense(units = 2, activation = 'softmax')
NN_model %>%
summary
NN_model %>%
compile(
loss = 'binary_crossentropy',
optimizer_sgd(lr = 0.01, momentum = 0.9),
metrics = c('accuracy')
)
My problem occurs here. When I apply the below code:
NN_model %>%
future_map(., ~future_map2(
.x = iris_x_train,
.y = iris_y_train,
~fit(
x = .x,
y = .y,
epochs = 5,
batch_size = 20,
validation_split = 0
)
)
)
I get this error:
Error in py_get_item_impl(x, key, FALSE) : TypeError: 'Sequential'
object does not support indexing
When I apply this code:
NN_model %>%
future_map2(
.x = iris_x_train,
.y = iris_y_train,
~fit(
x = .x,
y = .y,
epochs = 5,
batch_size = 20,
validation_split = 0
)
)
I get this error:
~fit(x = .x, y = .y, epochs = 5, batch_size = 20, validation_split =
0) Error in py_call_impl(callable,
dots$args, dots$keywords) : Evaluation error: Unable to convert R
object to Python type.
How can I map a keras model to each of the 4 datasets?
library(keras)
library(tensorflow)
library(furrr)
library(purrr)
The following works for the first list:
NN_model %>%
fit(
x = iris_x_train[[1]],
y = iris_y_train[[1]],
epochs = 50,
batch_size = 20,
validation_split = 0
)
EDIT: I seem to have solved it.
Putting the NN_model inside the fit() function appears to work.
future_map2(
.x = iris_x_train,
.y = iris_y_train,
~fit(NN_model,
.x,
.y,
epochs = 5,
batch_size = 20,
validation_split = 0
)
)
By forecasting errors, I mean the differences between predicted and actual values.
I am doing a time series analysis using a deep learning model called the long-short term memory (LSTM) based on this great article. The author distributed the data set into 11 samples to train the model and then make future predictions. keras package is required to run this model. It is using TensorFlow backend.
What I am trying to do is to get a confidence level for any predicted value. For example, let's say the model predicts that there will be 56 sunspots on Friday. I'd like to find out the probability of the number of sunspots that is more than the average of 50 (this is just a arbitrary number).
A possible solution I can think of for this question (please let me know if there is a better way to solve it) is to get the distribution of of the errors (the differences between predicted and actual values) and then calculate the Z-score and look up the probability, assuming normal distribution. In my example, the error is 6 (56-50).
In the above mentioned article, the 11 sample predictions (sample_predictions_lstm_tbl) are in an tibble with classes "rolling_origin" "rset" "tbl_df" "tbl" "data.frame". I'd like to know if there is a way to extract the errors (predicted values - actual values) from all of the samples and transform them into a single data frame so that I can plot a histogram of errors.
# Core Tidyverse
library(tidyverse)
library(glue)
library(forcats)
# Time Series
library(timetk)
library(tidyquant)
library(tibbletime)
# Visualization
library(cowplot)
# Preprocessing
library(recipes)
# Sampling / Accuracy
library(rsample)
library(yardstick)
# Modeling
library(keras)
# Install Keras if you have not installed before
install_keras()
sun_spots <- datasets::sunspot.month %>%
tk_tbl() %>%
mutate(index = as_date(index)) %>%
as_tbl_time(index = index)
# Distribute the samples into 11 sets
periods_train <- 12 * 50
periods_test <- 12 * 10
skip_span <- 12 * 20
rolling_origin_resamples <- rolling_origin(
sun_spots,
initial = periods_train,
assess = periods_test,
cumulative = FALSE,
skip = skip_span
)
split <- rolling_origin_resamples$splits
# Backtesting on all samples
predict_keras_lstm <- function(split, epochs = 300, ...) {
lstm_prediction <- function(split, epochs, ...) {
# 5.1.2 Data Setup
df_trn <- training(split)
df_tst <- testing(split)
df <- bind_rows(
df_trn %>% add_column(key = "training"),
df_tst %>% add_column(key = "testing")
) %>%
as_tbl_time(index = index)
# 5.1.3 Preprocessing
rec_obj <- recipe(value ~ ., df) %>%
step_sqrt(value) %>%
step_center(value) %>%
step_scale(value) %>%
prep()
df_processed_tbl <- bake(rec_obj, df)
center_history <- rec_obj$steps[[2]]$means["value"]
scale_history <- rec_obj$steps[[3]]$sds["value"]
# 5.1.4 LSTM Plan
lag_setting <- 120 # = nrow(df_tst)
batch_size <- 40
train_length <- 440
tsteps <- 1
epochs <- 300
# 5.1.5 Train/Test Setup
lag_train_tbl <- df_processed_tbl %>%
mutate(value_lag = lag(value, n = lag_setting)) %>%
filter(!is.na(value_lag)) %>%
filter(key == "training") %>%
tail(train_length)
x_train_vec <- lag_train_tbl$value_lag
x_train_arr <- array(data = x_train_vec, dim = c(length(x_train_vec), 1, 1))
y_train_vec <- lag_train_tbl$value
y_train_arr <- array(data = y_train_vec, dim = c(length(y_train_vec), 1))
lag_test_tbl <- df_processed_tbl %>%
mutate(
value_lag = lag(value, n = lag_setting)
) %>%
filter(!is.na(value_lag)) %>%
filter(key == "testing")
x_test_vec <- lag_test_tbl$value_lag
x_test_arr <- array(data = x_test_vec, dim = c(length(x_test_vec), 1, 1))
y_test_vec <- lag_test_tbl$value
y_test_arr <- array(data = y_test_vec, dim = c(length(y_test_vec), 1))
# 5.1.6 LSTM Model
model <- keras_model_sequential()
model %>%
layer_lstm(units = 50,
input_shape = c(tsteps, 1),
batch_size = batch_size,
return_sequences = TRUE,
stateful = TRUE) %>%
layer_lstm(units = 50,
return_sequences = FALSE,
stateful = TRUE) %>%
layer_dense(units = 1)
model %>%
compile(loss = 'mae', optimizer = 'adam')
# 5.1.7 Fitting LSTM
for (i in 1:epochs) {
model %>% fit(x = x_train_arr,
y = y_train_arr,
batch_size = batch_size,
epochs = 1,
verbose = 1,
shuffle = FALSE)
model %>% reset_states()
cat("Epoch: ", i)
}
# 5.1.8 Predict and Return Tidy Data
# Make Predictions
pred_out <- model %>%
predict(x_test_arr, batch_size = batch_size) %>%
.[,1]
# Retransform values
pred_tbl <- tibble(
index = lag_test_tbl$index,
value = (pred_out * scale_history + center_history)^2
)
# Combine actual data with predictions
tbl_1 <- df_trn %>%
add_column(key = "actual")
tbl_2 <- df_tst %>%
add_column(key = "actual")
tbl_3 <- pred_tbl %>%
add_column(key = "predict")
# Create time_bind_rows() to solve dplyr issue
time_bind_rows <- function(data_1, data_2, index) {
index_expr <- enquo(index)
bind_rows(data_1, data_2) %>%
as_tbl_time(index = !! index_expr)
}
ret <- list(tbl_1, tbl_2, tbl_3) %>%
reduce(time_bind_rows, index = index) %>%
arrange(key, index) %>%
mutate(key = as_factor(key))
return(ret)
}
safe_lstm <- possibly(lstm_prediction, otherwise = NA)
safe_lstm(split, epochs, ...)
}
# Modified epochs to 10 to reduce processing time
predict_keras_lstm(split, epochs = 10)
# Map to all samples
sample_predictions_lstm_tbl <- rolling_origin_resamples %>%
mutate(predict = map(splits, predict_keras_lstm, epochs = 5))