now I am trying to build a deep learning model as a classifier. My input is a 2D dataframe. I was wondering if it is possible to use CNN layer in the model.
My data looks like this way:
x_train <- data.frame(pain = c(rep(c(0,1), c(4,5))),
gender = c(rep(c(0,1), c(3,6))),
age = c(runif(9,0,1)))
y_train <- data.frame(illness = c(rep(c(0,1), c(2,2)), rep(c(0,1), c(3,2))))
Now I construct a model like this way:
model <- keras_model_sequential() %>%
layer_conv_1d(filters = 1, kernel_size = 2, activation = "elu",
input_shape = c(3,1)) %>%
# layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_1d(filters = 1, kernel_size = 2, activation = "elu") %>%
layer_max_pooling_1d(pool_size = 2) %>%
layer_dropout(0.5) %>%
layer_dense(
units = 5,
activation = "elu"
) %>%
layer_dropout(0.3) %>%
layer_dense(
units = 3,
activation = "elu"
) %>%
layer_dropout(0.1) %>%
layer_dense(units = 1, activation = "elu")
model %>% compile(
loss = "mean_squared_error",
optimizer = "SGD",
metrics = c('mae', 'acc') # "accuracy"
)
But it doesn't work and tells me it needs a 3 dimensions input
Error in py_call_impl(callable, dots$args, dots$keywords) :
ValueError: Error when checking input: expected conv1d_33_input to have 3 dimensions, but got array with shape (9, 3)
Could anyone give me some advice about that? Thanks a lot for your help : )
Related
I have been struggling to produce a printed output in my R-console for each epoch when using Keras. This is even when 'verbose' is set to a value of '1' or '2'.
Please see the following code:
cifar <- dataset_cifar10()
x_train <- cifar$train$x/255
x_test <- cifar$test$x/255
y_train <- to_categorical(cifar$train$y, num_classes = 10)
y_test <- to_categorical(cifar$test$y, num_classes = 10)
model <- keras_model_sequential() %>%
layer_conv_2d(filters = 16, kernel_size = 2, padding = 'same', activation = 'relu', input_shape = c(32,32,3)) %>%
layer_max_pooling_2d(pool_size = 2) %>%
layer_conv_2d(filters = 32, kernel_size = 2, padding = 'same', activation = 'relu') %>%
layer_max_pooling_2d(pool_size = 2) %>%
layer_conv_2d(filters = 64, kernel_size = 2, padding = 'same', activation = 'relu') %>%
layer_max_pooling_2d(pool_size =2) %>%
layer_dropout(0.3) %>%
layer_flatten() %>%
layer_dense(500, activation = 'relu') %>%
layer_dropout(0.4) %>%
layer_dense(10, activation = 'softmax')
model %>% compile(loss = 'categorical_crossentropy', optimizer = 'rmsprop', metrics= 'accuracy')
checkpointer <- callback_model_checkpoint(filepath = 'model.weights.best.hdf5', verbose = 1, save_best_only = 'true')
k <- model %>% fit(x_train, y_train, batch_size = 32, epochs = 5, validation_data = list(x_test, y_test), callbacks = checkpointer, verbose = 1, shuffle = 'true')
Whenever I run my last line of code, I see no printed output for each epoch.
I am looking for an output like the following each time an epoch is completed:
Epoch 1/5
Epoch 00000: val_loss improved from inf to 1.35820, saving model to model.weights.best.hdf5
46s - loss: 1.6192 - acc: 0.4140 - val_loss: 1.3582 - val_acc: 0.5166
Any help at all would be greatly appreciated!
What is the cause of the following code error?
library(magrittr)
x_data <- matrix(data = runif(500,0,1),nrow = 50,ncol = 5)
y_data <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix()
x_data2 <- matrix(data = runif(500,0,1),nrow = 50,ncol = 5)
y_data2 <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix()
library(keras)
library(tensorflow)
library(kerastuneR)
build_model = function(hp) {
model = keras_model_sequential()
model %>% layer_dense(units = hp$Int('units',
min_value = 32,
max_value = 512,
step= 32),input_shape = ncol(x_data),
activation = 'relu') %>%
layer_dense(units = 1, activation = 'softmax') %>%
compile(
optimizer = tf$keras$optimizers$Adam(
hp$Choice('learning_rate',
values=c(1e-2, 1e-3, 1e-4))),
loss = 'binary_crossentropy',
metrics = 'accuracy')
return(model)
}
tuner = RandomSearch(
build_model,
objective = 'val_accuracy',
max_trials = 5,
executions_per_trial = 3,
directory = 'my_dir',
project_name = 'helloworld')
tuner %>% search_summary()
tuner %>% fit_tuner(x_data,y_data,
epochs = 5,
validation_data = list(x_data2,y_data2))
result = kerastuneR::plot_tuner(tuner)
best_5_models = tuner %>% get_best_models(5)
best_5_models[[1]] %>% plot_keras_model()
Error in py_call_impl(callable, dots$args, dots$keywords) :
ValueError: Objective value missing in metrics reported to the Oracle, expected: ['val_accuracy'], found: dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])
I am hoping to run Bayesian optimization for my neural network via keras tuner.
I have the following code so far:
build_model <- function(hp) {
model <- keras_model_sequential()
model %>% layer_dense(units = hp$Int('units', min_value = 10, max_value = 50, step = 10),
activation = "relu",
input_shape = dim(X_pca_scores_scaled)[[2]]) %>%
layer_dropout(rate = hp$Float('rate', min_value = 0, max_value = 0.5, step = 0.1)) %>%
layer_dense(units = hp$Int('units', min_value = 0, max_value = 50, step = 10),
activation = "relu") %>%
layer_dropout(rate = hp$Float('rate', min_value = 0, max_value = 0.5, step = 0.1)) %>%
layer_dense(units = 1) %>%
compile(
optimizer = "adam",
loss = "mse",
metrics = c("mae"))
return(model)
}
tuner <- kerastuneR::BayesianOptimization(
objective = 'mae',
max_trials = 30)
stop_early <- callback_early_stopping(monitor = "mae",
patience = 5,
min_delta = 0.25,
mode = "min")
tuner %>% fit_tuner(np_array(X_pca_scores_scaled),
np_array(train_targets),
epochs = 30,
callbacks = c(stop_early))
The above code will lead to the following error:
Error in py_get_attr_impl(x, name, silent) :
AttributeError: 'BayesianOptimizationOracle' object has no attribute 'search'
I'm not sure what an oracle is...so I know the problem is somewhere in my implementation regarding that.
I have been trying to build a multi-input model in keras. One input branch would be images and the second one some metaData for the corresponding images.
For the images I need a generator function which would input batches of images. The metaData is in a tabular form.
Now I am wondering how I should pass the data to the model so the right image would be processed with the respective metaData Information. For your Information this will be a regression Task.
The Input Data I have:
Images in dir1/
Data Frame with the path and features.
path feature1 feature2 target
image1.jpg 23.5 100 16
image2.jpg 25.0 88 33
The code I have for now:
generator function for Images:
train_datagen <- image_data_generator(rescale = 1/255)
train_generator <- flow_images_from_dataframe(
dataframe = joined_path_with_metadata,
directory = 'data_dir',
x_col = "path",
y_col = "train",
generator = train_datagen,
target_size = c(150, 150),
batch_size = 20,
color_mode = 'rgb',
class_mode = "sparse"
)
model definition:
vision_model <- keras_model_sequential()
vision_model %>%
layer_conv_2d(filters = 64,
kernel_size = c(3, 3),
activation = 'relu',
padding = 'same',
input_shape = c(150, 150, 3)) %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_flatten()
# Now let's get a tensor with the output of our vision model:
image_input <- layer_input(shape = c(150, 150, 3))
encoded_image <- image_input %>% vision_model
# ANN for tabular data
tabular_input <- layer_input(shape = ncol(dataframe), dtype = 'float32')
mlp_model <- tabular_input %>%
layer_dense(
units = 16,
kernel_initializer = "uniform",
activation = "relu") # Dropout to prevent overfitting
layer_dropout(rate = 0.1) %>%
layer_dense(
units = 32,
kernel_initializer = "uniform",
activation = "relu") %>%
# concatenate the metadata and the image vector then
# train a linear regression on it
output <- layer_concatenate(c(mlp_model, encoded_image)) %>%
layer_dense(units = 1, activation='linear')
# This is the final model:
vqa_model <- keras_model(inputs = c(image_input, tabular_input), outputs = output)
compile:
vqa_model %>% compile(
optimizer = 'adam',
loss = 'mean_squared_error',
metrics = c('mean_squared_error')
)
and the last step would be to fit the model. I am not sure how to do this to make sure that the first row of features will be taken as the metadata of the Images which are read in in batches.
I have an error using fit_generator in R...
here's my code..`
model <- keras_model_sequential()
model %>%
layer_conv_2d(32, c(3,3), input_shape = c(64, 64, 3)) %>%
layer_activation("relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(32, c(3, 3)) %>%
layer_activation("relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_flatten() %>%
layer_dense(128) %>%
layer_activation("relu") %>%
layer_dense(128) %>%
layer_activation("relu") %>%
layer_dense(2) %>%
layer_activation("softmax")
opt <- optimizer_adam(lr = 0.001, decay = 1e-6)
model %>%
compile(loss = "categorical_crossentropy", optimizer = opt, metrics = "accuracy")
train_gen <- image_data_generator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = T)
test_gen <- image_data_generator(rescale = 1./255)
train_set = train_gen$flow_from_directory('dataset/training_set',
target_size = c(64, 64),
class_mode = "categorical")
test_set = test_gen$flow_from_directory('dataset/test_set',
target_size = c(64, 64),
batch_size = 32,
class_mode = 'categorical')
model$fit_generator(train_set,
steps_per_epoch = 50,
epochs = 10)
Error:
Error in py_call_impl(callable, dots$args, dots$keywords) :
StopIteration: 'float' object cannot be interpreted as an integer
If I put validation set it has another error too
bool(validation_data). Float error..
It is difficult help you without a minimal reproducible example.
I am guessing you get this error when you are trying to run
train_set = train_gen$flow_from_directory('dataset/training_set',
target_size = c(64, 64),
class_mode = "categorical")
Here you are calling the python function yourself using reticulate and not a keras (the R package) wrapper. That might work, but you have to be more explicit about the type and use target_size = as.integer(c(64, 64)), since python expects an integer.
Alternatively, I would suggest looking into the flow_images_from_directory() function included in the keras package.
The same goes for
model$fit_generator(train_set,
steps_per_epoch = 50,
epochs = 10)
I'd suggest looking into
model %>%
fit_generator()
instead, which is part of the keras package.