Layer ModuleWrapper has arguments ['module', 'method_name'] in `__init__` and therefore must override `get_config()` - networking

I have created a simple ann like this:
`from keras.models import Sequential
import tensorflow
from tensorflow.python.keras.layers import Dense
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
model = Sequential()
model.add(Dense(20, input_dim=2, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer = 'adam', metrics= tf.keras.metrics.RootMeanSquaredError())
n_epochs = 1000
n_batch = 6
model.fit(x_train_norm, y_train, epochs=n_epochs, batch_size=n_batch, verbose = 1)
`
And when I try to save the model: (already installed h5py)
model.save("model.h5")
I get the next error message:
Layer ModuleWrapper has arguments ['module', 'method_name']
in `__init__` and therefore must override `get_config()`.
Example:
class CustomLayer(keras.layers.Layer):
def __init__(self, arg1, arg2):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
def get_config(self):
config = super().get_config()
config.update({
"arg1": self.arg1,
"arg2": self.arg2,
})
return config
I've read NotImplementedError: Layers with arguments in `__init__` must override `get_config`
but I don't know how to apply it to my problem because It's a very simple ann, I don't youse encoder like in the example above

Related

not able to load the image and pass it to preprocessing for the model prediction

I am trying to upload the image from the local system within the same directory. Post uploading, when I am passing through open cv split and merge for b,g, and r colors, i get the error ValueError: not enough values to unpack (expected 3, got 0)
Error :
this is the error that is showing is there any possibility to debug in the streamlit where I can track changes at different lines of code? (As in the image path,) when executed in a google collab as individual ipynb files run properly and I get by required classification
ValueError: not enough values to unpack (expected 3, got 0)
Traceback:
File "C:\Users\ADARSH\anaconda3\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 564, in _run_script
exec(code, module.__dict__)
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 76, in <module>
main()
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 68, in main
mask = imageToTensor('image')
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 44, in imageToTensor
b,g,r = cv2.split(bgr_img)
My entire streamlit app code
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
from sklearn.utils import shuffle
from tqdm import tqdm_notebook
import streamlit as st
from PIL import Image as impo
from fastai import *
from fastai.vision import *
from torchvision.models import *
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
def read_image(name):
image = st.file_uploader("Upload an "+ name, type=["png", "jpg", "jpeg",'tif'])
if image is not None:
im = impo.open(image)
im.filename = image.name
return image
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
image = read_image('image')
mask = imageToTensor('image')
if mask is not None:
get_prediction('mask')
if __name__ == "__main__":
main()
In your main function you are passing 'str' instead of variables, and also I think your read_image is not well structured.
What you should do is to first save the uploaded file in a directory and fetch the file from that directory and pass it to imageToTensor() as a parameter. That's one work around which will give you a total control over the file. Otherwise you will get other error messages after the first error is fixed.
You can automate some few lines of code in a separate python file to delete the uploaded file from the directory with a given duration.
Note: Keep an eye on the imports because I skipped them to keep the code short
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
# Refactured read_image()
def get_uploaded_image():
upload = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg",'tif'])
if upload is not None:
st.write(upload.name)
# Create a directory and save the image file before proceeding.
file_path = os.path.join("data/uploadedImages/", upload.name)
with open(file_path, "wb") as user_file:
user_file.write(upload.getbuffer())
return file_path # fixed indentation
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
# Holds the saved file path
user_image = get_uploaded_image()
if user_image is not None:
# Pass the path to imageToTensor() as a parameter.
mask = imageToTensor(user_image)
get_prediction(mask)
if __name__ == "__main__":
main()
output:

Type error when fine-tuning a bert-large-uncased-whole-word-masking model by Huggingface

I am trying to fine-tune a Huggingface bert-large-uncased-whole-word-masking model and i get a type error like this when training:
"TypeError: only integer tensors of a single element can be converted to an index"
Here is the code:
train_inputs = tokenizer(text_list[0:457], return_tensors='pt', max_length=512, truncation=True, padding='max_length')
train_inputs['labels']= train_inputs.input_ids.detach().clone()
Then i mask randomly about 15% of the words in the input-ids,
and define a class for the dataset, and then the mistake happens in the training loop:
class MeditationsDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings= encodings
def __getitem__(self, idx):
return {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
def __len__(self):
return self.encodings.input_ids
train_dataset = MeditationsDataset(train_inputs)
train_dataloader = torch.utils.data.DataLoader(dataset= train_dataset, batch_size=8, shuffle=False)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
from transformers import BertModel, AdamW
model = BertModel.from_pretrained("bert-large-uncased-whole-word-masking")
model.to(device)
model.train()
optim = AdamW(model.parameters(), lr=1e-5)
num_epochs = 2
from tqdm.auto import tqdm
for epoch in range(num_epochs):
loop = tqdm(train_dataloader, leave=True)
for batch in loop:
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
The mistake happens in "for batch in loop"
Does anybody understand it and know how to solve this? Thanks in advance for your help
In the class MeditationsDataset in function __getitem__ torch.tensor(val[idx]) is deprecated by PyTorch you should use instead val[idx].clone().detach()

External code incompatibility with metadata?

If I use the same setup with metadata for an external code component and explicit component it seems to fail with the externalcode comp. The code below has both of the component. As is there is no error, if i comment out the explicit component part I get an error :
comp = GFWrapper(DVLO=np.array(DVLIST))
TypeError: init() got an unexpected keyword argument 'DVLO'
import numpy as np
import json as js
import re,sys,subprocess,os
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.api import ScipyOptimizeDriver
from openmdao.api import ExternalCode
class GFWrapper(ExternalCode):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
self.input_file = 'GFWrapper_input.dat'
self.output_file = 'GFWrapper_output.dat'
self.options['external_input_files'] = [self.input_file,]
self.options['external_output_files'] = [self.output_file,]
self.options['command'] = [
'python', 'run.py', self.input_file, self.output_file
]
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
# generate the input file for the external code
outF = open(self.input_file, "w")
for dv in DVLO:
outF.write(inputs[dv])
print(dv,inputs[dv])
outF.write("\n")
outF.close()
# the parent compute function actually runs the external code
super(GFWrapper, self).compute(inputs, outputs)
# parse the output file from the external code
file_contents=np.loadtxt(self.output_file)
outputs['OBJECTIVE'] = file_contents[0]
from openmdao.api import ExplicitComponent
#
class GFWrapper(ExplicitComponent):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
powetemp = 0
for dv in DVLO:
powetemp += inputs[dv]
outputs['OBJECTIVE'] = powetemp
#
DVLIST=['DV1','DV2']
DVMIN =[2,11]
DVMAX =[3,12]
InitDVVal=[3,5]
nr_of_desvar=len(DVLIST)
top = Problem()
top.model = model = Group()
" Introduce independent variables later will be design variables (optimization parameters) "
inputs_comp = IndepVarComp()
for i in range(nr_of_desvar):
inputs_comp.add_output(DVLIST[i],InitDVVal[i])
" Add components/subsystems to the Group-model- "
model.add_subsystem('inputs_comp', inputs_comp)
comp = GFWrapper(DVLO=np.array(DVLIST))
model.add_subsystem('GFWrapper', comp)
This is a bug in OpenMDAO version 2.2. The error occurs because we didn't include **kwargs in the init statement for ExternalCode. We intend to fix this for V2.3, but in the meantime, you can work around for your GFWrapper component by adding an init statement like this:
class GFWrapper(ExternalCode):
def __init__(self, **kwargs):
super(GFWrapper, self).__init__()
self.metadata.update(kwargs)

Specify Input Argument with KerasRegressor

I use a Keras neural network and I would like the input dimension to be automatically set, not hardcoded like in every tutorial I have seen so far. How could I accomplish this?
My code:
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
seed = 1
X = df_input
Y = df_res
def baseline_model(x):
# create model
model = Sequential()
model.add(Dense(20, input_dim=x, kernel_initializer='normal', activation=relu))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='adam')
return model
inpt = len(X.columns)
estimator = KerasRegressor(build_fn = baseline_model(inpt ) , epochs=2, batch_size=1000, verbose=2)
estimator.fit(X,Y)
And the error I get:
Traceback (most recent call last):
File ipython-input-2-49d765e85d15, line 20, in estimator.fit(X,Y)
TypeError: call() missing 1 required positional argument: 'inputs'
I would wrap your baseline_model as follows:
def baseline_model(x):
def bm():
# create model
model = Sequential()
model.add(Dense(20, input_dim=x, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='adam')
return model
return bm
And then define and fit the KerasRegressor as:
estimator = KerasRegressor(build_fn=baseline_model(inpt), epochs=2, batch_size=1000, verbose=2)
estimator.fit(X, Y)
This avoids having to hardcode the input dimension in baseline_model.
I try that and works
def create_model(max_features, num_class):
def bm():
model = Sequential()
model.add(Dense(512, input_shape=(max_features,)))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(num_class, activation='softmax'))
model.summary()
model.compile(
loss='categorical_crossentropy', optimizer='adam',metrics['accuracy'])
return model
return bm
and then
model_clf = KerasClassifier(
build_fn=create_model(max_features, num_class), epochs=10,
batch_size=32, verbose=2)
history = model_clf.fit(
X_train, y_train,
batch_size=32,
epochs=10,
verbose=2,
validation_data=(X_test, y_test))

openmdao v1.4 optimization with metamodel

I which to perform an optimization with openmdao 1.4 on a metamodel. Using the tutorials I have build u p problem that i do not mange to solve: I think the problem is coming from a misuse of setup() and run() : I do not manage to train my metamodel and to optimize on it at the same time (perhpas I should use two differentes "groups" to do this ..)
Here is my code :
from __future__ import print_function
from openmdao.api import Component, Group, MetaModel ,IndepVarComp, ExecComp, NLGaussSeidel, KrigingSurrogate, FloatKrigingSurrogate
import numpy as np
class KrigMM(Group):
''' FloatKriging gives responses as floats '''
def __init__(self):
super(KrigMM, self).__init__()
# Create meta_model for f_x as the response
pmm = self.add("pmm", MetaModel())
pmm.add_param('x', val=0.)
pmm.add_output('f_x:float', val=0., surrogate=FloatKrigingSurrogate())
self.add('p1', IndepVarComp('x', 0.0))
self.connect('p1.x','pmm.x')
# mm.add_output('f_xy:norm_dist', val=(0.,0.), surrogate=KrigingSurrogate())
if __name__ == '__main__':
# Setup and run the model.
from openmdao.core.problem import Problem
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
from openmdao.core.driver import Driver
import numpy as np
import doe_lhs
#prob = Problem(root=ParaboloidProblem())
###########################################################
prob = Problem(root=Group())
prob.root.add('meta',KrigMM(), promotes=['*'])
prob.driver = ScipyOptimizer()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.add_desvar('p1.x', lower=0, upper=10)
prob.driver.add_objective('pmm.f_x:float')
prob.setup()
prob['pmm.train:x'] = np.linspace(0,10,20)
prob['pmm.train:f_x:float'] = np.sin(prob['pmm.train:x'])
prob.run()
print('\n')
print('Minimum of %f found for meta at %f' % (prob['pmm.f_x:float'],prob['pmm.x'])) #predicted value
I believe your problem is actually working fine. Its just that the sinusiod you've picked has an local optimum at 0.0, which happens to be your initial condition.
If I change the initial condition as follows:
prob.setup()
prob['p1.x'] = 5
prob['pmm.train:x'] = np.linspace(0,10,20)
prob['pmm.train:f_x:float'] = np.sin(prob['pmm.train:x'])
prob.run()
I get:
Optimization terminated successfully. (Exit mode 0)
Current function value: [-1.00004544]
Iterations: 3
Function evaluations: 3
Gradient evaluations: 3
Optimization Complete
-----------------------------------
Minimum of -1.000045 found for meta at 4.710483

Resources