I am having trouble with one area of code and it prevents me from finishing my research paper. I am new to Machine Learning and R, but I have learned a lot so far. Here is my code:
# Install packages and libraries
install.packages("keras")
source("http://bioconductor.org/biocLite.R")
library(keras)
library(EBImage)
# Read images
setwd('C:/Users/ebarn/Desktop/DataSet')
pics <- c('p1.jpg', 'p2.jpg', 'p3.jpg', 'p4.jpg', 'p5.jpg',
'p6.jpg','c1.jpg', 'c2.jpg', 'c3.jpg', 'c4.jpg', 'c5.jpg',
'c6.jpg')
mypic <- list()
for (i in 1:12) {mypic[[i]] <- readImage(pics[i])}
# Explore
print(mypic[[1]])
display(mypic[[1]])
display(mypic[[8]])
summary(mypic[[1]])
hist(mypic[[12]])
str(mypic)
# Resize
for (i in 1:12) {mypic[[i]] <- resize(mypic[[i]], 28, 28)}
str(mypic)
# Reshape
28*28*3
for (i in 1:12) {mypic[[i]] <- array_reshape(mypic[[i]], c(28,
28, 3))}
str(mypic)
# Row Bind
trainx <- NULL
for(i in 1:5) {trainx <- rbind(trainx, mypic[[i]])}
str(trainx)
for(i in 7:11) {trainx <- rbind(trainx, mypic[[i]])}
str(trainx)
testx <- rbind(mypic[[6]], mypic[[12]])
trainy <- c(0,0,0,0,0,1,1,1,1,1)
testy <- c(0, 1)
# One Hot Encoding
trainLabels <- to_categorical(trainy)
testLabels <- to_categorical(testy)
trainLabels
# Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu', input_shape =
c(2352))
%>%
layer_dense(units = 128, activation = 'relu')
%>%
layer_dense(units = 2, activation = 'softmax')
summary(model)
# Compile
model %>%
compile(loss = 'sparse_categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy'))
# model.add(Dense(10, activation = 'softmax'))
# Fit Model
history <- model %>%
fit(trainx, trainLabels, epochs = 30, batch_size = 32,
validation_split = 0.2)
plot(history)
# Evaluation & Prediction - train data
model %>% evaluate(trainx, trainLabels)
The Fit Model method will not print out my graph. Here is the error it gives me:
ValueError: Error when checking target: expected dense _1 to have shape (1,) but got array with shape (2,)
You are one-hot encoding the labels:
# One Hot Encoding
trainLabels <- to_categorical(trainy)
testLabels <- to_categorical(testy)
Therefore, they are no longer sparse labels and you need to use categorical_crossentropy as the loss function instead of sparse_categorical_crossentropy. Alternatively, you can comment the one-hot encoding lines.
Related
I am getting an error using the following R code. The dataset is https://archive.ics.uci.edu/ml/datasets/online+news+popularity. I am trying to build a dense neural network in order to solve a regression problem. I am attempting to predict the value of the variable "shares" using all the other variables"
Thank you in advance
#Loading libraries
library(keras)
library(mlbench)
library(ggplot2)
library(neuralnet)
library(tidymodels)
library(tidyverse)
library(magrittr)
#Loading the dataset
df <- read.csv("/Users/prathamdave/Desktop/OnlineNewsPopularity.csv")
#Inspecting the dataset
str(df)
glimpse(df)
#Preliminary data cleaning
data <- df[2:61]
str(df)
data %<>% mutate_if(is.integer, as.numeric)
#Preparing data
#Matrix
data <- as.matrix(data)
dimnames(data) <- NULL
#Partition
set.seed(1234)
ind <- sample(2, nrow(data), replace = T, prob = c(0.7,.03))
training <- data[ind==1, 1:59]
test <- data[ind==2, 1:59]
trainingtarget <- data[ind==1, 60]
testtarget <- data[ind==2, 60]
#Normalizing data
m <- colMeans(training)
s <- apply(training, 2, sd)
training <- scale(training, center = m, scale = s)
test <- scale(test, center = m, scale = s)
#Creating the model specification
model <- keras_model_sequential()
model %>%
layer_dense(units = 41, activation = 'relu', input_shape = c(60)) %>%
layer_dense(units = 1)
#Compile
model %>% compile(loss = 'mse',
optimizer = 'rmsprop',
metrics = 'mae')
#Fit Model
mymodel <- model %>%
fit(
training,
trainingtarget,
epochs = 180,
batch_size = 32,
validation_split = 0.2
)
output
Epoch 1/180
Error in py_call_impl(callable, dots$args, dots$keywords) :
ValueError: in user code:
<... omitted ...>te/lib/python3.8/site-packages/keras/engine/training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "/Users/prathamdave/Library/r-miniconda/envs/r-reticulate/lib/python3.8/site-packages/keras/engine/training.py", line 889, in train_step
y_pred = self(x, training=True)
File "/Users/prathamdave/Library/r-miniconda/envs/r-reticulate/lib/python3.8/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/Users/prathamdave/Library/r-miniconda/envs/r-reticulate/lib/python3.8/site-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
ValueError: Input 0 of layer "sequential_3" is incompatible with the layer: expected shape=(None, 60), found shape=(None, 59)
See `reticulate::py_last_error()` for details
Your data has 59 features and 1 target. So you need to have an input_shape of 59 instead of 60. You can use the following reproducible code:
#Loading libraries
library(keras)
library(mlbench)
library(ggplot2)
library(neuralnet)
library(tidymodels)
library(tidyverse)
library(magrittr)
#Loading the dataset
df <- read.csv("OnlineNewsPopularity/OnlineNewsPopularity.csv")
#Inspecting the dataset
str(df)
glimpse(df)
#Preliminary data cleaning
data <- df[2:61]
str(df)
data %<>% mutate_if(is.integer, as.numeric)
#Preparing data
#Matrix
data <- as.matrix(data)
dimnames(data) <- NULL
#Partition
set.seed(1234)
ind <- sample(2, nrow(data), replace = T, prob = c(0.7,.03))
training <- data[ind==1, 1:59]
test <- data[ind==2, 1:59]
trainingtarget <- data[ind==1, 60]
testtarget <- data[ind==2, 60]
#Normalizing data
m <- colMeans(training)
s <- apply(training, 2, sd)
training <- scale(training, center = m, scale = s)
test <- scale(test, center = m, scale = s)
#Creating the model specification
model <- keras_model_sequential()
model %>%
layer_dense(units = 41, activation = 'relu', input_shape = c(59)) %>%
layer_dense(units = 1)
#Compile
model %>% compile(loss = 'mse',
optimizer = 'rmsprop',
metrics = 'mae')
#Fit Model
mymodel <- model %>%
fit(
training,
trainingtarget,
epochs = 180,
batch_size = 32,
validation_split = 0.2
)
Output:
I try to write a program based on Deep Networks to read handwritten numbers. I found a code in Youtube (https://www.youtube.com/watch?v=5bso_5X7Zu4) which works there but it does not work for me. The problem is that I get error when I try to predict my handwritten number (namely number 5) that I have made in Windows Paint.
My number in Paint which its file name is number5.jpg is:
My complete code is:
library("pkgdown")
library(keras)
# devtools::install_github("rstudio/reticulate")
mnist <- dataset_mnist()
# str(mnist)
trainx <- mnist$train$x
trainy <- mnist$train$y
testx <- mnist$test$x
testy <- mnist$test$y
table(mnist$train$y, mnist$train$y)
table(mnist$test$y, mnist$test$y)
# plot images
windows()
par(mfrow = c(3,3))
for (i in 1:9) plot(as.raster(trainx[i,,], max=255))
trainx[2,,]
windows()
hist(trainx[1,,])
# Reshape & rescale
trainx <- array_reshape(trainx, c(nrow(trainx), 784))
testx <- array_reshape(testx, c(nrow(testx), 784))
trainx <- trainx / 255
testx <- testx / 255
#windows()
#hist(trainx[1,])
# One hot encoding
trainy <- to_categorical(trainy, 10)
testy <- to_categorical(testy, 10)
# trainx <- as.matrix(trainx)
# trainy <- as.matrix(trainy)
# Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 128, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 64, activation = 'relu') %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
# Compile
model %>%
compile(loss ='categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = 'accuracy')
# Fit model
history <- model %>%
fit(trainx,
trainy,
epochs = 30,
batch_size = 32,
validation_split = 0.2)
plot(history)
# Evaluation and Precition - Test data
model %>% evaluate(testx, testy)
pred <- model %>% predict(testx) %>% k_argmax() %>% as.integer() %>% .[1:7840000]
prob <- model %>% predict(testx)
cbind(Predicted_class = pred , Actual = mnist$test$y)[1:150,]
# New data
#install.packages("BiocManager")
#BiocManager::install("EBImage")
library(EBImage)
setwd("C:/Users/hofo/Arbeidsmapper/Documents/NLP/NLP_BOSTOTTE/JPG_filer")
mypic <- readImage("number5.jpg")
mypic <- resize(mypic, 28, 28)
mypic <- array_reshape(mypic, c(28, 28, 3))
new <- NULL
new <- rbind(new, mypic)
str(new)
newx <- new[1:1,1:784]
newy <- c(5)
pred <- model %>% predict(newx) %>% k_argmax() %>% as.integer() %>% .[1:9]
The error when I run the last row is:
Error in py_call_impl(callable, dots$args, dots$keywords) :
ValueError: in user code:
C:\Users\hofo\AppData\Local\R-MINI~1\envs\R-RETI~1\lib\site-packages\keras\engine\training.py:1586 predict_function *
return step_function(self, iterator)
C:\Users\hofo\AppData\Local\R-MINI~1\envs\R-RETI~1\lib\site-packages\keras\engine\training.py:1576 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\hofo\AppData\Local\R-MINI~1\envs\R-RETI~1\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\hofo\AppData\Local\R-MINI~1\envs\R-RETI~1\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\hofo\AppData\Local\R-MINI~1\envs\R-RETI~1\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
Can you also please help me with this?
Hi I m trying to develop a neural net which it is capable to read handwritten numbers
I`ve copied this model from the internet (Ref: https://www.youtube.com/watch?v=5bso_5X7Zu4&t=643s)
My problem comes with new data, It has a high rate of missclassification (See the end of the post)
#Youtube link https://www.youtube.com/watch?v=5bso_5X7Zu4&t=785s
# MNIST data
library(keras)
mnist <- dataset_mnist()
trainx <- mnist$train$x
trainy <- mnist$train$y
testx <- mnist$test$x
testy <- mnist$test$y
# Plot images
par(mfrow = c(3,3))
for (i in 1:9) plot(as.raster(trainx[i,,], max = 255))
par(mfrow= c(1,1))
# Five
a <- c(1, 12, 36, 48, 66, 101, 133, 139, 146)
par(mfrow = c(3,3))
for (i in a) plot(as.raster(trainx[i,,], max = 255))
par(mfrow= c(1,1))
# Reshape & rescale
trainx <- array_reshape(trainx, c(nrow(trainx), 784))
testx <- array_reshape(testx, c(nrow(testx), 784))
trainx <- trainx / 255
testx <- testx /255
# One hot encoding
trainy <- to_categorical(trainy, 10)
testy <- to_categorical(testy, 10)
# Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 512, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units= 256, activation = 'relu') %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 10, activation = 'softmax')
# Compile
model %>%
compile(loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = 'accuracy')
# Fit model
history <- model %>%
fit(trainx,
trainy,
epochs = 30,
batch_size = 32,
validation_split = 0.2)
# Evaluation and Prediction - Test data
model %>% evaluate(testx, testy)
pred <- model %>% predict_classes(testx)
table(Predicted = pred, Actual = mnist$test$y)
prob <- model %>% predict_proba(testx)
cbind(prob, Predicted_class = pred, Actual = mnist$test$y)[1:5,]
# New data
library(EBImage)
temp = list.files(pattern = "*.jpg")
mypic <- list()
for (i in 1:length(temp)) {mypic[[i]] <- readImage(temp[[i]])}
par(mfrow = c(4,4))
for (i in 1:length(temp)) plot(mypic[[i]])
for (i in 1:length(temp)) {colorMode(mypic[[i]]) <- Grayscale}
for (i in 1:length(temp)) {mypic[[i]] <- 1-mypic[[i]]}
for (i in 1:length(temp)) {mypic[[i]] <- resize(mypic[[i]], 28, 28)}
str(mypic)
par(mfrow = c(4,5))
for (i in 1:length(temp)) plot(mypic[[i]])
for (i in 1:length(temp)) {mypic[[i]] <- array_reshape(mypic[[i]], c(28,28,3))}
new <- NULL
for (i in 1:length(temp)) {new <- rbind(new, mypic[[i]])}
newx <- new[,1:784]
newy <- c(7,5,2,0,5,3,4,3,2,7,5,6,8,5,6)
# Prediction
pred <- model %>% predict_classes(newx)
pred
table(Predicted = pred, Actual = newy)
prob <- model %>% predict_proba(newx)
cbind(prob, Predicted = pred, Actual = newy)
The problem is the classification with new data
I have obtained the next prediction (You can download the images from here)
7 5 2 0 5 3 8 2 2 6 6 6 3 5 5
The first six numbers are correctly classificated(Numbers from the video)(7 5 2 0 5 3) but the next 9 (Numbers written by me) show a very poor result.
I also tried to test my nnet with the training data numbers but it continues failing and I dont understand why :(
Any idea? Why is happening this?
Thanks
I have a question regarding applying a neural network in categorical data.
1- I have one output which is numeric (Connection.Duration)
2- I have 5 inputs, 4 of them (EVSE.ID, User.ID, Fee, Day) are categorical and 1 (Time) is numeric.
I want to apply a neural network to predict the Connection.Duration. I do not know the correct command to use for categorical data. I used model.matrix but I did not how to continue with the new data frame (m) which contains the categorical data.
I would like to ask for help please.
data$Fee <- as.factor(data$Fee)
data$EVSE.ID <- as.factor(data$EVSE.ID)
data$User.ID <- as.factor(data$User.ID)
data$Day <- as.factor(data$Day)
data$Time <- as.factor(data$Time)
data$Connection.Duration <- as.factor(data$Connection.Duration)
m <- model.matrix(Connection.Duration ~ EVSE.ID+Time+Day+Fee+User.ID,
data= data)
# Neural Networks
n <- neuralnet(Connection.Duration ~ EVSE.ID+Time+Day+Fee+User.ID,
data = m,
hidden=c(100,60))
# Data partition
set.seed(1234)
ind <- sample(2, nrow(m), replace = TRUE, prob = c(0.7, 0.3))
training <- m[ind==1,1:5]
testing <- m[ind==2,1:5]
trainingtarget <- m[ind==1, 6]
testingtarget <- m[ind==2, 6]
# Normalize
m <- colMeans(training)
s <- apply(training, 2, sd)
training <- scale(training, center = m, scale = s)
testing <- scale(testing, center = m, scale = s)
# Create Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 5, activation = 'relu', input_shape = c(5)) %>%
layer_dense(units = 1)
# Compile
model %>% compile(loss= 'mse',
optimizer= 'rmsprop',
metrics='mae')
# Fit model
mymodel <- model %>%
fit(training,
trainingtarget,
epochs= 100,
batch_size = 32,
validation_split = 0.2)
# Evaluate
model %>% evaluate(testing, testingtarget)
pred <- model %>% predict(testing)
mean(testingtarget- pred^2)
plot(testingtarget, pred)
# Fine-tune Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 100, activation = 'relu', input_shape = c(5)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 60, activation = 'relu', input_shape = c(5)) %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 1)
# Compile
model %>% compile(loss= 'mse',
optimizer= optimizer_rmsprop(lr=0.0001),
metrics='mae')
# Fit model
mymodel <- model %>%
fit(training,
trainingtarget,
epochs= 100,
batch_size = 32,
validation_split = 0.2)
# Evaluate
model %>% evaluate(testing, testingtarget)
pred <- model %>% predict(testing)
mean(testingtarget- pred^2)
plot(testingtarget, pred)
What you're looking for is called "one hot encoding". There are functions in tensorflow/keras to help out with the encoding.
But otherwise, I would try to do it up front. I would not rely on model.matrix as it doesn't give you quite what you want.
You can easily write your own function, but here's an example using the mltools package:
library(data.table)
library(mltools)
one_hot(data.table(x = factor(letters), n = 1:26))
Note: it requires data.table rather than data.frame but you can convert your data back and forth.
This is a rather lengthy one, so please bear with me, unfortunately enough the error occurs right at the very end...I cannot predict on the unseen test set!
I would like to perform text classification with word embeddings (that I have trained on my data set) that are embedded into neural networks.
I simply have column with textual descriptions = input and four different price classes = target.
For a reproducible example, here are the necessary data set and the word embedding:
DF: https://www.dropbox.com/s/it0jsbv8e7nkryt/DF.csv?dl=0
WordEmb: https://www.dropbox.com/s/ia5fmio2e0plwkr/WordEmb.txt?dl=0
And here my code:
set.seed(2077)
DF = read.delim("DF.csv", header = TRUE, sep = ",",
dec = ".", stringsAsFactors = FALSE)
DF <- DF[,-1]
# parameters
max_num_words = 9000 # simply see number of observations
validation_split = 0.3
embedding_dim = 300
##### Data Preparation #####
# split into training and test set
set.seed(2077)
n <- nrow(DF)
shuffled <- DF[sample(n),]
# Split the data in train and test
train <- shuffled[1:round(0.7 * n),]
test <- shuffled[(round(0.7 * n) + 1):n,]
rm(n, shuffled)
# predictor/target variable
x_train <- train$Description
x_test <- test$Description
y_train <- train$Price_class
y_test <- test$Price_class
### encode target variable ###
# One hot encode training target values
trainLabels <- to_categorical(y_train)
trainLabels <- trainLabels[, 2:5]
# One hot encode test target values
testLabels <- keras::to_categorical(y_test)
testLabels <- testLabels[, 2:5]
### encode predictor variable ###
# pad sequences
tokenizer <- text_tokenizer(num_words = max_num_words)
# finally, vectorize the text samples into a 2D integer tensor
set.seed(2077)
tokenizer %>% fit_text_tokenizer(x_train)
train_data <- texts_to_sequences(tokenizer, x_train)
tokenizer %>% fit_text_tokenizer(x_test)
test_data <- texts_to_sequences(tokenizer, x_test)
# determine average length of document -> set as maximal sequence length
seq_mean <- stri_count(train_data, regex="\\S+")
mean((seq_mean))
max_sequence_length = 70
# This turns our lists of integers into a 2D integer tensor of shape`(samples, maxlen)`
x_train <- keras::pad_sequences(train_data, maxlen = max_sequence_length)
x_test <- keras::pad_sequences(test_data, maxlen = max_sequence_length)
word_index <- tokenizer$word_index
Encoding(names(word_index)) <- "UTF-8"
#### PREPARE EMBEDDING MATRIX ####
embeddings_index <- new.env(parent = emptyenv())
lines <- readLines("WordEmb.txt")
for (line in lines) {
values <- strsplit(line, ' ', fixed = TRUE)[[1]]
word <- values[[1]]
coefs <- as.numeric(values[-1])
embeddings_index[[word]] <- coefs
}
embedding_dim <- 300
embedding_matrix <- array(0,c(max_num_words, embedding_dim))
for(word in names(word_index)){
index <- word_index[[word]]
if(index < max_num_words){
embedding_vector <- embeddings_index[[word]]
if(!is.null(embedding_vector)){
embedding_matrix[index+1,] <- embedding_vector
}
}
}
##### Convolutional Neural Network #####
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
num_words <- min(max_num_words, length(word_index) + 1)
embedding_layer <- keras::layer_embedding(
input_dim = num_words,
output_dim = embedding_dim,
weights = list(embedding_matrix),
input_length = max_sequence_length,
trainable = FALSE
)
# train a 1D convnet with global maxpooling
sequence_input <- layer_input(shape = list(max_sequence_length), dtype='int32')
preds <- sequence_input %>%
embedding_layer %>%
layer_conv_1d(filters = 128, kernel_size = 1, activation = 'relu') %>%
layer_max_pooling_1d(pool_size = 5) %>%
layer_conv_1d(filters = 128, kernel_size = 1, activation = 'relu') %>%
layer_max_pooling_1d(pool_size = 5) %>%
layer_conv_1d(filters = 128, kernel_size = 1, activation = 'relu') %>%
layer_max_pooling_1d(pool_size = 2) %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dense(units = 4, activation = 'softmax')
model <- keras_model(sequence_input, preds)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = c('acc')
)
model %>% keras::fit(
x_train,
trainLabels,
batch_size = 1024,
epochs = 20,
validation_split = 0.3
)
Now here is where I get stuck:
I cannot use the results of the NN to predict on the unseen test data set:
# Predict the classes for the test data
classes <- model %>% predict_classes(x_test, batch_size = 128)
I get this error:
Error in py_get_attr_impl(x, name, silent) :
AttributeError: 'Model' object has no attribute 'predict_classes'
Afterwards, I'd proceed like this:
# Confusion matrix
table(y_test, classes)
# Evaluate on test data and labels
score <- model %>% evaluate(x_val, testLabels, batch_size = 128)
# Print the score
print(score)
For now the actual accuracy does not really matter since this is only a small example of my data set.
I know this is a long one but AAANNY help would be very muuuch appreciated.