R/Keras Image Classification Training Yields No Differentiation - r

I am learning image classification and am attempting to train a model to classify two different types of R plots. The reproducible code below resembles my real world problem.
Basically, I create two types of plots in R, one is a barplot and the other a scatter. The goal is to train a model so that scatters can be uniquely identified or bars can be uniquely identified.
The problem I am encountering is that all images are classified as scatter with equivalent probability in training.
The code below generates 20 total plots. The first 10 are bar and the second 10 are scatter. I am uncertain if there is a user/code error in my work or if Keras is doing the right thing it's that perhaps I'm expecting a different result.
My real world problem is doing something similar, but training on many hundreds of images and I am getting the same result.
Is there any obvious code/logic/user error that might help me?
Code
library(reticulate)
library(tidyverse)
library(tensorflow)
library(keras)
library(magick)
### Create 20 images. First 10 are bar second are scatter
### Put these in training folder
K <- 20
for(i in 1:K){
fname <- paste('some\\path\\Train\\', i, '.jpg', sep='')
jpeg(fname)
if(i < 11){
barplot(rnorm(10)~letters[1:10])
} else {
plot(rnorm(10), rnorm(10))
}
dev.off()
}
loadpics <- function(filenames) {
a = lapply(filenames, image_load, grayscale = FALSE) #grayscale the image
b = lapply(a, image_to_array) #turns it into an array
c = lapply(b,image_array_resize, height = 100, width = 100) #resize
d = normalize(c, axis = 1) #normalize to make small numbers
return(d)}
### Load training data
trainpics <- list.files("some\\path\\Train", full.names = TRUE)
trainx <- loadpics(trainpics)
### Position of bar plots in name vector
pos <- numeric(10)
for(i in 1:10){
nm <- paste('/', i,'.jpg', sep='')
pos[i] <- grep(nm,trainpics)
}
### Create labels
trainy = rep(1, length(trainpics))
trainy[pos] <- 0
trainlabel = to_categorical(trainy)
###### Build Keras Model
model1 = keras_model_sequential()
model1 %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dense(units = 128, activation = 'relu')%>%
layer_dense(units = 2, activation = 'sigmoid')
model1 %>% compile(optimizer = 'adam',loss = 'binary_crossentropy', metrics = c('accuracy'))
fit1 = model1 %>% fit(x = trainx, y = trainlabel, epochs = 10, batch_size=32,
validation_split = .4, callbacks = callback_tensorboard("logs/run_a"))
plot(fit1)

Related

Output order in Keras predict_generator

I have followed online tutorial about image recognition using Keras in R ending up with the following code:
library(keras)
view_list <- c("Inside", "Outside")
output_n <- length(view_list)
# image size to scale down to (original images are 100 x 100 px)
img_width <- 20
img_height <- 20
target_size <- c(img_width, img_height)
# RGB = 3 channels
channels <- 3
train_image_files_path <- "C:/Users/Tomek/Desktop/Photos"
valid_image_files_path <- "C:/Users/Tomek/Desktop/Photos valid"
test_image_files_path <- "C:/Users/Tomek/Desktop/Photos test"
# optional data augmentation
train_data_gen = image_data_generator(rescale = 1/255 )
# Validation data shouldn't be augmented! But it should also be scaled.
valid_data_gen <- image_data_generator(rescale = 1/255)
test_data_gen <- image_data_generator(rescale = 1/255)
# training images
train_image_array_gen <- flow_images_from_directory(train_image_files_path,
train_data_gen,
target_size = target_size,
class_mode = "categorical",
classes = view_list,
seed = 42)
# validation images
valid_image_array_gen <- flow_images_from_directory(valid_image_files_path,
valid_data_gen,
target_size = target_size,
class_mode = "categorical",
classes = view_list,
seed = 42)
# test images
test_image_array_gen <- flow_images_from_directory(test_image_files_path,
test_data_gen,
target_size = target_size,
class_mode = "categorical",
classes = view_list,
seed = 42)
cat("Number of images per class:")
table(factor(train_image_array_gen$classes))
train_image_array_gen$class_indices
views_classes_indices <- train_image_array_gen$class_indices
save(views_classes_indices, file = "C:/Users/Tomek/Desktop/views_classes_indices.RData")
# number of training samples
train_samples <- train_image_array_gen$n
# number of validation samples
valid_samples <- valid_image_array_gen$n
# number of test samples
test_samples <- test_image_array_gen$n
# define batch size and number of epochs
batch_size <- 1
epochs <- 10
# initialise model
model <- keras_model_sequential()
# add layers
model %>%
layer_conv_2d(filter = 32, kernel_size = c(3,3), padding = "same", input_shape = c(img_width, img_height, channels)) %>%
layer_activation("relu") %>%
# Second hidden layer
layer_conv_2d(filter = 16, kernel_size = c(3,3), padding = "same") %>%
layer_activation_leaky_relu(0.5) %>%
layer_batch_normalization() %>%
# Use max pooling
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_dropout(0.25) %>%
# Flatten max filtered output into feature vector
# and feed into dense layer
layer_flatten() %>%
layer_dense(100) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
# Outputs from dense layer are projected onto output layer
layer_dense(output_n) %>%
layer_activation("softmax")
# compile
model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer_rmsprop(lr = 0.0001, decay = 1e-6),
metrics = "accuracy"
)
summary(model)
# fit
hist <- model %>% fit_generator(
# training data
train_image_array_gen,
# epochs
steps_per_epoch = as.integer(train_samples / batch_size),
epochs = epochs,
# validation data
validation_data = valid_image_array_gen,
validation_steps = as.integer(valid_samples / batch_size),
# print progress
verbose = 2,
callbacks = list(
# save best model after every epoch
callback_model_checkpoint("C:/Users/Tomek/Desktop/views_checkpoints.h5", save_best_only = TRUE),
# only needed for visualising with TensorBoard
callback_tensorboard(log_dir = "C:/Users/Tomek/Desktop/keras/logs")
)
)
plot(hist)
#prediction
a <- model %>% predict_generator(test_image_array_gen, steps = 5, verbose = 1, workers = 1)
a <- round(a, digits = 4)
The classification model (with two output classes) seems to work quite nicely. The accuracy on the train and the validation sets is equal to ~99% and ~95% respectively. However, I am not sure about the results of predictions on the test set. It looks like the predictions for observations are shuffled and I am not able to find a way to check which prediction refers to which image(observation). I have seen some threads on that issue: github medium 1 medium 2.
Nevertheless, I am really new to Keras and Python and I have hard time applying the suggested solutions in R. What is the easiest way to track which prediction refers to which image from the test set in predict_generator output?
I figured it out and the answer is simple. The shuffling is caused by argument shuffle which by default is set to true. After changing it, predictions correspond to the order of test_image_array_gen$filenames However, bear in mind that the order of predictions (and filenames) is different than the one on Windows which may be a bit confusing.
Order in Windows: Photo 1 Photo 2 ... Photo 10 Photo 11
Order in R: Photo 1 Photo 10 Photo 11 ... Photo 2
# test images
test_image_array_gen <- flow_images_from_directory(test_image_files_path,
test_data_gen,
target_size = target_size,
class_mode = "categorical",
classes = view_list,
seed = 42,
shuffle = FALSE)
#prediction
a <- model %>% predict_generator(test_image_array_gen, steps = ceiling(test_samples/32), verbose = 1, workers = 1)
#bind predictions with photos names
b <- cbind.data.frame(a, test_image_array_gen$filenames)

Text classification with own word embeddings using Neural Networks in R

This is a rather lengthy one, so please bear with me, unfortunately enough the error occurs right at the very end...I cannot predict on the unseen test set!
I would like to perform text classification with word embeddings (that I have trained on my data set) that are embedded into neural networks.
I simply have column with textual descriptions = input and four different price classes = target.
For a reproducible example, here are the necessary data set and the word embedding:
DF: https://www.dropbox.com/s/it0jsbv8e7nkryt/DF.csv?dl=0
WordEmb: https://www.dropbox.com/s/ia5fmio2e0plwkr/WordEmb.txt?dl=0
And here my code:
set.seed(2077)
DF = read.delim("DF.csv", header = TRUE, sep = ",",
dec = ".", stringsAsFactors = FALSE)
DF <- DF[,-1]
# parameters
max_num_words = 9000 # simply see number of observations
validation_split = 0.3
embedding_dim = 300
##### Data Preparation #####
# split into training and test set
set.seed(2077)
n <- nrow(DF)
shuffled <- DF[sample(n),]
# Split the data in train and test
train <- shuffled[1:round(0.7 * n),]
test <- shuffled[(round(0.7 * n) + 1):n,]
rm(n, shuffled)
# predictor/target variable
x_train <- train$Description
x_test <- test$Description
y_train <- train$Price_class
y_test <- test$Price_class
### encode target variable ###
# One hot encode training target values
trainLabels <- to_categorical(y_train)
trainLabels <- trainLabels[, 2:5]
# One hot encode test target values
testLabels <- keras::to_categorical(y_test)
testLabels <- testLabels[, 2:5]
### encode predictor variable ###
# pad sequences
tokenizer <- text_tokenizer(num_words = max_num_words)
# finally, vectorize the text samples into a 2D integer tensor
set.seed(2077)
tokenizer %>% fit_text_tokenizer(x_train)
train_data <- texts_to_sequences(tokenizer, x_train)
tokenizer %>% fit_text_tokenizer(x_test)
test_data <- texts_to_sequences(tokenizer, x_test)
# determine average length of document -> set as maximal sequence length
seq_mean <- stri_count(train_data, regex="\\S+")
mean((seq_mean))
max_sequence_length = 70
# This turns our lists of integers into a 2D integer tensor of shape`(samples, maxlen)`
x_train <- keras::pad_sequences(train_data, maxlen = max_sequence_length)
x_test <- keras::pad_sequences(test_data, maxlen = max_sequence_length)
word_index <- tokenizer$word_index
Encoding(names(word_index)) <- "UTF-8"
#### PREPARE EMBEDDING MATRIX ####
embeddings_index <- new.env(parent = emptyenv())
lines <- readLines("WordEmb.txt")
for (line in lines) {
values <- strsplit(line, ' ', fixed = TRUE)[[1]]
word <- values[[1]]
coefs <- as.numeric(values[-1])
embeddings_index[[word]] <- coefs
}
embedding_dim <- 300
embedding_matrix <- array(0,c(max_num_words, embedding_dim))
for(word in names(word_index)){
index <- word_index[[word]]
if(index < max_num_words){
embedding_vector <- embeddings_index[[word]]
if(!is.null(embedding_vector)){
embedding_matrix[index+1,] <- embedding_vector
}
}
}
##### Convolutional Neural Network #####
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
num_words <- min(max_num_words, length(word_index) + 1)
embedding_layer <- keras::layer_embedding(
input_dim = num_words,
output_dim = embedding_dim,
weights = list(embedding_matrix),
input_length = max_sequence_length,
trainable = FALSE
)
# train a 1D convnet with global maxpooling
sequence_input <- layer_input(shape = list(max_sequence_length), dtype='int32')
preds <- sequence_input %>%
embedding_layer %>%
layer_conv_1d(filters = 128, kernel_size = 1, activation = 'relu') %>%
layer_max_pooling_1d(pool_size = 5) %>%
layer_conv_1d(filters = 128, kernel_size = 1, activation = 'relu') %>%
layer_max_pooling_1d(pool_size = 5) %>%
layer_conv_1d(filters = 128, kernel_size = 1, activation = 'relu') %>%
layer_max_pooling_1d(pool_size = 2) %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dense(units = 4, activation = 'softmax')
model <- keras_model(sequence_input, preds)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = c('acc')
)
model %>% keras::fit(
x_train,
trainLabels,
batch_size = 1024,
epochs = 20,
validation_split = 0.3
)
Now here is where I get stuck:
I cannot use the results of the NN to predict on the unseen test data set:
# Predict the classes for the test data
classes <- model %>% predict_classes(x_test, batch_size = 128)
I get this error:
Error in py_get_attr_impl(x, name, silent) :
AttributeError: 'Model' object has no attribute 'predict_classes'
Afterwards, I'd proceed like this:
# Confusion matrix
table(y_test, classes)
# Evaluate on test data and labels
score <- model %>% evaluate(x_val, testLabels, batch_size = 128)
# Print the score
print(score)
For now the actual accuracy does not really matter since this is only a small example of my data set.
I know this is a long one but AAANNY help would be very muuuch appreciated.

Unscale predicted value for Neural Network (Keras package)

partition of data
set.seed(1234)
ind <- sample(2, nrow(bronx_data), replace = T, prob = c(.7,.3))
train <- bronx_data[ind==1,2:11]
test <- bronx_data[ind==2,2:11]
train_target <- bronx_data[ind==1,1]
test_target <- bronx_data[ind==2,1]
normalize my data**
m <- colMeans(train)
s <- apply(train, 2, sd)
train <- scale(train, center = m, scale = s)
test <- scale(test, center = m, scale = s) # use same mean and sd obtained form train data
Model
This is my model
library(keras)
model <- keras_model_sequential()
model %>%
layer_dense(units = 5, activation = 'relu', input_shape = c(10)) %>%
layer_dense(units = 1)
I get good output but the problem I am having is un-scaling the data. Someone please HELP. I am new coder.
I've tried
unscale(vals, norm.data, col.ids)
and got the following error
Error in scale.default(data, center = FALSE, scale = 1/scale) : length of 'scale' must equal the number of columns of 'x'

Shape error in image classification model in Keras R

I am having trouble with one area of code and it prevents me from finishing my research paper. I am new to Machine Learning and R, but I have learned a lot so far. Here is my code:
# Install packages and libraries
install.packages("keras")
source("http://bioconductor.org/biocLite.R")
library(keras)
library(EBImage)
# Read images
setwd('C:/Users/ebarn/Desktop/DataSet')
pics <- c('p1.jpg', 'p2.jpg', 'p3.jpg', 'p4.jpg', 'p5.jpg',
'p6.jpg','c1.jpg', 'c2.jpg', 'c3.jpg', 'c4.jpg', 'c5.jpg',
'c6.jpg')
mypic <- list()
for (i in 1:12) {mypic[[i]] <- readImage(pics[i])}
# Explore
print(mypic[[1]])
display(mypic[[1]])
display(mypic[[8]])
summary(mypic[[1]])
hist(mypic[[12]])
str(mypic)
# Resize
for (i in 1:12) {mypic[[i]] <- resize(mypic[[i]], 28, 28)}
str(mypic)
# Reshape
28*28*3
for (i in 1:12) {mypic[[i]] <- array_reshape(mypic[[i]], c(28,
28, 3))}
str(mypic)
# Row Bind
trainx <- NULL
for(i in 1:5) {trainx <- rbind(trainx, mypic[[i]])}
str(trainx)
for(i in 7:11) {trainx <- rbind(trainx, mypic[[i]])}
str(trainx)
testx <- rbind(mypic[[6]], mypic[[12]])
trainy <- c(0,0,0,0,0,1,1,1,1,1)
testy <- c(0, 1)
# One Hot Encoding
trainLabels <- to_categorical(trainy)
testLabels <- to_categorical(testy)
trainLabels
# Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu', input_shape =
c(2352))
%>%
layer_dense(units = 128, activation = 'relu')
%>%
layer_dense(units = 2, activation = 'softmax')
summary(model)
# Compile
model %>%
compile(loss = 'sparse_categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy'))
# model.add(Dense(10, activation = 'softmax'))
# Fit Model
history <- model %>%
fit(trainx, trainLabels, epochs = 30, batch_size = 32,
validation_split = 0.2)
plot(history)
# Evaluation & Prediction - train data
model %>% evaluate(trainx, trainLabels)
The Fit Model method will not print out my graph. Here is the error it gives me:
ValueError: Error when checking target: expected dense _1 to have shape (1,) but got array with shape (2,)
You are one-hot encoding the labels:
# One Hot Encoding
trainLabels <- to_categorical(trainy)
testLabels <- to_categorical(testy)
Therefore, they are no longer sparse labels and you need to use categorical_crossentropy as the loss function instead of sparse_categorical_crossentropy. Alternatively, you can comment the one-hot encoding lines.

How to get the labels of clusters after using autoencoder in R

I'm a beginner in machine learning. I'm doing clustering using autoencoder in R (h2o package). For now, I've done the following codes:
`mydata = h2o.importFile(path = mfile)
NN_model = h2o.deeplearning(
x = 2:9,
training_frame = mydata,
hidden = c(2),
epochs = 100,
activation = "Tanh",
autoencoder = TRUE
)
train_supervised_features = h2o.deepfeatures(NN_model, mydata, layer=1)`
For my data, there are not too many columns (as you can see in the codes, only 8 variables now), but lots of rows.
Then I plot the 2 nodes in "train_supervised_features" obtained above. And I got the plot like this
It is clear that there are 8 clusters of my data (right?)
My question is, how can I extract the labels from the autoencoder results? I want to add the labels to original data, and plot in colors using these labels, something like this:
Since the autoencoder doesn't have any idea about "clusters", you would have to call h2o.kmeans() on the 2D dataset first, to get the cluster assignments. Then you can score the dataset using that k-means model, using h2o.predict(model, 2Ddata), and you'll get a cluster label for each row.
Basically, AE can be a good DNN for reinstructing the input, however, you can use the latent layer for clustering.
The important steps:
1- For initializing the cluster assignment you may apply simple k-means for your data and label them;
2- Train your AE to learn features;
3- Try to extract the output of the layer before the latent layer which includes the trained space of your data;
4- Cluster the data with a k-means.
I hope it helps,
Here is an example I have provided in Keras:
library(keras)
library(caret)
library(tidyverse)
c(c(xtrain, ytrain), c(xtest, ytest)) %<-% dataset_mnist()
xtrain = xtrain/255
xtest = xtest/255
input_size = dim(xtrain)[2]*dim(xtrain)[3]
latent_size = 10
print(input_size)
x_train = array_reshape(xtrain, dim=c(dim(xtrain)[1], input_size))
x_test = array_reshape(xtest, dim=c(dim(xtest)[1], input_size))
x <- rbind( x_test, x_train )/255.0
# Encoder
encoder_input = layer_input(shape = input_size)
encoder_output = encoder_input %>%
layer_dense(units=256, activation = "relu") %>%
layer_activation_leaky_relu() %>%
layer_dense(units=latent_size) %>%
layer_activation_leaky_relu()
encoderoder = keras_model(encoder_input, encoder_output)
summary(encoderoder)
# Decoder
decoder_input = layer_input(shape = latent_size)
decoder_output = decoder_input %>%
layer_dense(units=256, activation = "relu") %>%
layer_activation_leaky_relu() %>%
layer_dense(units = input_size, activation = "relu") %>%
layer_activation_leaky_relu()
decoderoder = keras_model(decoder_input, decoder_output)
summary(decoderoder)
# Autoencoder
autoencoderoder_input = layer_input(shape = input_size)
autoencoderoder_output = autoencoderoder_input %>%
encoderoder() %>%
decoderoder()
autoencoderoder = keras_model(autoencoderoder_input, autoencoderoder_output)
summary(autoencoderoder)
autoencoderoder %>% compile(optimizer="rmsprop", loss="binary_crossentropy")
autoencoderoder %>% fit(x_train,x_train, epochs=20, batch_size=256)
encoderoded_imgs = encoderoder %>% predict(x_test)
decoderoded_imgs = decoderoder %>% predict(encoderoded_imgs)
# Images plot
pred_images = array_reshape(decoderoded_imgs, dim=c(dim(decoderoded_imgs)[1], 28, 28))
n = 10
op = par(mfrow=c(12,2), mar=c(1,0,0,0))
for (i in 1:n)
{
plot(as.raster(pred_images[i,,]))
plot(as.raster(xtest[i,,]))
}
# Saving trained Net
autoencoderoder_weights <- autoencoderoder %>%
keras::get_weights()
keras::save_model_weights_hdf5(object = autoencoderoder,filepath = '..../autoencoderoder_weights.hdf5',overwrite = TRUE)
encoderoder_model <- keras_model(inputs = encoder_input, outputs = encoderoder$output)
encoderoder_model %>% keras::load_model_weights_hdf5(filepath = "..../autoencoderoder_weights.hdf5",skip_mismatch = TRUE,by_name = TRUE)
encoderoder_model %>% compile(
loss='mean_squared_error',
optimizer='adam',
metrics = c('accuracy')
)
embeded_points <-
encoderoder_model %>%
keras::predict_on_batch(x = x_train)
summary(encoderoder_model)
# Getting layer
layer_name<-"dense_1"
intermediate_layer_model <- keras_model(inputs = encoderoder_model$input, outputs = get_layer(encoderoder_model, layer_name)$output)
intermediate_output <- predict(intermediate_layer_model, x)
# Clustering latent space
km <- stats::kmeans( intermediate_output, centers = 10L, nstart = 20L )
labPrediction <- km$cluster
plot(labPrediction)
# The End
labels are available in "labPrediction" file
For the reference:
https://www.datatechnotes.com/2020/02/how-to-build-simple-autoencoder-with-keras-in-r.html

Resources