Check whether the R package keras has compiled a model - r

When I run the following R script I get summary information about a keras model and its added layers, but no confirmation that the model has been compiled. How do I check whether the compile step has been completed?
library(keras)
model <- keras_model_sequential()
model %>%
layer_dense(units = 64, activation = 'relu', input_shape = c(20)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 64, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax') %>%
compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_sgd(lr = 0.01, decay = 1e-6,
momentum = 0.9, nesterov = TRUE),
metrics = c('accuracy')
)
summary(model)

Check the built flag ?
library(keras)
model <- keras_model_sequential()
model$built # False
model %>%
layer_dense(units = 64, activation = 'relu', input_shape = c(20)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 64, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_activation(activation = 'relu') %>%
layer_dense(units = 10) %>%
layer_activation(activation = 'softmax')
model$built # False
model %>%
compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_sgd(lr = 0.01, decay = 1e-6,
momentum = 0.9, nesterov = TRUE),
metrics = c('accuracy')
)
model$built # True

Related

How to implement Bayesian optimization with Keras tuneR

I am hoping to run Bayesian optimization for my neural network via keras tuner.
I have the following code so far:
build_model <- function(hp) {
model <- keras_model_sequential()
model %>% layer_dense(units = hp$Int('units', min_value = 10, max_value = 50, step = 10),
activation = "relu",
input_shape = dim(X_pca_scores_scaled)[[2]]) %>%
layer_dropout(rate = hp$Float('rate', min_value = 0, max_value = 0.5, step = 0.1)) %>%
layer_dense(units = hp$Int('units', min_value = 0, max_value = 50, step = 10),
activation = "relu") %>%
layer_dropout(rate = hp$Float('rate', min_value = 0, max_value = 0.5, step = 0.1)) %>%
layer_dense(units = 1) %>%
compile(
optimizer = "adam",
loss = "mse",
metrics = c("mae"))
return(model)
}
tuner <- kerastuneR::BayesianOptimization(
objective = 'mae',
max_trials = 30)
stop_early <- callback_early_stopping(monitor = "mae",
patience = 5,
min_delta = 0.25,
mode = "min")
tuner %>% fit_tuner(np_array(X_pca_scores_scaled),
np_array(train_targets),
epochs = 30,
callbacks = c(stop_early))
The above code will lead to the following error:
Error in py_get_attr_impl(x, name, silent) :
AttributeError: 'BayesianOptimizationOracle' object has no attribute 'search'
I'm not sure what an oracle is...so I know the problem is somewhere in my implementation regarding that.

How to control learning rate in KerasR in R

To fit a classification model in R, have been using library(KerasR). To control learning rate and KerasR says
compile(optimizer=Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08, decay = 0, clipnorm = -1, clipvalue = -1), loss = 'binary_crossentropy', metrics = c('categorical_accuracy') )
But it is given me an error like this
Error in modules$keras.optimizers$Adam(lr = lr, beta_1 = beta_2,
beta_2 = beta_2, : attempt to apply non-function
I also used keras_compile still getting the same error.
I can change optimizer in compile but the largest learning rate is 0.01, I want to try 0.2.
model <- keras_model_sequential()
model %>% layer_dense(units = 512, activation = 'relu', input_shape = ncol(X_train)) %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 128, activation = 'relu')%>%
layer_dropout(rate = 0.1) %>%
layer_dense(units = 2, activation = 'sigmoid')%>%
compile(
optimizer = 'Adam',
loss = 'binary_crossentropy',
metrics = c('categorical_accuracy')
)
I think the issue is you are using two different libraries kerasR and keras together. You should use only one of them. First, you are using keras_model_sequential function
which is from keras and then you try to use Adam function which is from kerasR library. You find the difference between these two libraries here: https://www.datacamp.com/community/tutorials/keras-r-deep-learning#differences
The following code is working for me which is using only keras library.
library(keras)
model <- keras_model_sequential()
model %>%
layer_dense(units = 512, activation = 'relu', input_shape = ncol(X_train)) %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 128, activation = 'relu')%>%
layer_dropout(rate = 0.1) %>%
layer_dense(units = 2, activation = 'sigmoid')%>%
compile(optimizer=optimizer_adam(lr = 0.2), loss= 'binary_crossentropy', metrics = c('accuracy') )

Keras neural network not fitting in R

I made a neural network in R using the Keras package. I basically made the same model I had created in python. I used the same data as well in the same order. However, when I run it in R, the model doesn't seem to be fitting at all.
When I call predict on the model, it returns the same value regardless of the input.
I'm guessing the weights are zeroing out and its returning the bias.
Heres how I built the model:
model <- keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu',input= c(18)) %>%
layer_dense(units = 64, activation = 'relu')%>%
layer_dropout(rate = 0.25) %>%
layer_dense(units = 32, activation = 'relu') %>%
layer_dropout(rate = 0.25) %>%
layer_dense(units = 16, activation = 'relu') %>%
layer_dropout(rate = 0.25) %>%
layer_dense(units = 8, activation = 'relu') %>%
layer_dense(units = 2, activation = 'softmax')
Heres the output when I call predict:
model%>%
predict(nbainput_test_x)

L1 and L2 regularization using keras pack in R?

library(keras)
build_model <- function() {
model <- keras_model_sequential() %>%
layer_dense(units = 64, activation = "relu",
input_shape = dim(train_data)[[2]]) %>%
regularizer_l1_l2(l1 = 0.01, l2 = 0.01) %>%
layer_dense(units = 64, activation = "relu") %>%
regularizer_l1_l2(l1 = 0.01, l2 = 0.01) %>%
layer_dense(units = 1)
model %>% compile(
optimizer = "rmsprop",
loss = "mse",
metrics = c("mae")
)
}
model <- build_model()
I am trying to apply L1 and L2 regularization using keras in R. However, I am getting an error:
Error in regularizer_l1_l2(., l1 = 0.01, l2 = 0.01) : unused argument (.)
The syntax for regularization that I have used is same as mentioned in the link.
https://keras.rstudio.com/reference/regularizer_l1.html
Can anyone tell me what am I doing wrong?
This would be the correct syntax for L1 and L2 regularization using keras in R:
library(keras)
build_model <- function() {
model <- keras_model_sequential() %>%
layer_dense(units = 64,
activation = "relu",
kernel_regularizer = regularizer_l1_l2(l1 = 0.01, l2 = 0.01),
input_shape = dim(train_data)[[2]]) %>%
layer_dense(units = 64,
activation = "relu",
kernel_regularizer = regularizer_l1_l2(l1 = 0.01, l2 = 0.01)) %>%
layer_dense(units = 1)
model %>% compile(
optimizer = "rmsprop",
loss = "mse",
metrics = c("mae")
)
}
reproducible example:
library(keras)
mnist <- dataset_mnist()
train_images <- mnist$train$x
train_labels <- mnist$train$y
test_images <- mnist$test$x
test_labels <- mnist$test$y
train_images <- array_reshape(train_images, c(60000, 28*28))
train_images <- train_images / 255
test_images <- array_reshape(test_images, c(10000, 28*28))
test_images <- test_images / 255
train_labels <- to_categorical(train_labels)
test_labels <- to_categorical(test_labels)
network <- keras_model_sequential() %>%
layer_dense(units = 512,
activation = "relu",
kernel_regularizer = regularizer_l1_l2(l1 = 0.001, l2 = 0.001),
input_shape = c(28 * 28)) %>%
layer_dense(units = 10, activation = "softmax")
network %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
network %>% fit(train_images,
train_labels,
epochs = 5,
batch_size = 128)
metrics <- network %>% evaluate(test_images, test_labels)
> metrics
#output
$`loss`
[1] 0.6863746
$acc
[1] 0.921

R-Keras - How come val_loss is not plotting

My first time playing with Keras. I tried to run the model and see the loss and accuracy. For some reason, its not plotting the loss for val_loss.
My code:
model <- keras_model_sequential() %>%
layer_dense(units = 256, activation = "relu", input_shape = dim(train.X)[[2]]) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 128, activation = "relu") %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 1, activation = "sigmoid")
model %>% compile (
optimizer = "rmsprop", #configuring optimizer = optimizer_rmsprop(lr = 0.001)
loss = "binary_crossentropy", #custom loss -> loss_binary_crossentropy
metrics = c("accuracy") #metric_binary_accuracy
)
history <- model %>% fit(
train.X,
train.Y,
epochs = 100,
batch_size = 64,
validation_data = list(x_val, y_val)
)
My results:
I would really appreciate if someone can explain to me why the val_loss function is not plotting.

Resources