SparseCategoricalCrossentropy.__init__() got an unexpected keyword argument using Streamlit /Python - streamlit

I am classifying images using Streamlit and Python. I am getting an error: SparseCategoricalCrossentropy.__init__() got an unexpected keyword argument 'ignore_class'.
TypeError: SparseCategoricalCrossentropy.__init__() got an unexpected keyword argument 'ignore_class'
Traceback:
File "C:\Users\DELL\AppData\Local\Programs\Python\Python310\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 565, in _run_script
exec(code, module.__dict__)
File "C:\Users\DELL\Desktop\Gradio\flask and htm\project-folder\backend\org.py", line 18, in <module>
model = load_model()
File "C:\Users\DELL\AppData\Local\Programs\Python\Python310\lib\site-packages\streamlit\runtime\legacy_caching\caching.py", line 625, in wrapped_func
return get_or_create_cached_value()
File "C:\Users\DELL\AppData\Local\Programs\Python\Python310\lib\site-packages\streamlit\runtime\legacy_caching\caching.py", line 609, in get_or_create_cached_value
return_value = non_optional_func(*args, **kwargs)
File "C:\Users\DELL\Desktop\Gradio\flask and htm\project-folder\backend\org.py", line 8, in load_model
model = tf.keras.models.load_model('model/my_model.h5')
File "C:\Users\DELL\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\DELL\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\losses.py", line 153, in from_config
return cls(**config)
This is my code.
import streamlit as st
import tensorflow as tf
st.set_option('deprecation.showfileUploaderEncoding', False)
#st.cache(allow_output_mutation=True)
def load_model():
model = tf.keras.models.load_model('model/my_model.h5')
return model
model = load_model()
st.write("""
# Image Classification App
"""
)
file = st.file_uploader("Please upload an image", type=["jpg", "png"])
import cv2
from PIL import Image, ImageOps
import numpy as np
def import_and_predict(image_data, model):
size = (180,180)
image =ImageOps.fit(image_data, size, Image.LANCZOS)
img =np.asarray(image)
img_reshape = img[np.newaxis,...]
prediction = model.predict(img_reshape)
return prediction
if file is None:
st.text("Please upload an image file")
else:
image = Image.open(file)
st.image(image, use_column_width=True)
predictions = import_and_predict(image, model)
class_names = ['dog', 'cat', 'horse']
string = "This image most likely is a :" +class_names[np.argmax(predictions)]
st.success(string)

Try loading the model with compile=False. This may also be due to a version mismatch.
Try it out,
def load_model():
model = tf.keras.models.load_model('model/my_model.h5', compile=False)
return model
It will load the model only for inference. If you still face issues then it may be a version conflict. The model was saved in a different version of TensorFlow.

Related

BS4: AttributeError: 'NoneType' object stops the parser from working

I'm currently working on a parser to make a small preview of a page from a URL given by the user in PHP.
I'd like to retrieve only the title of the page and a little chunk of information (a bit of text)
The project: for a list of meta-data of popular wordpress-plugins and gathering the first 50 URLs - that are 50 plugins which are of interest! The challenge is: i want to fetch meta-data of all the existing plugins. What i subsequently want to filter out after the fetch is - those plugins that have the newest timestamp - that are updated (most) recently. It is all aobut acutality...
https://wordpress.org/plugins/wp-job-manager
https://wordpress.org/plugins/ninja-forms
import requests
from bs4 import BeautifulSoup
from concurrent.futures.thread import ThreadPoolExecutor
url = "https://wordpress.org/plugins/browse/popular/{}"
def main(url, num):
with requests.Session() as req:
print(f"Collecting Page# {num}")
r = req.get(url.format(num))
soup = BeautifulSoup(r.content, 'html.parser')
link = [item.get("href")
for item in soup.findAll("a", rel="bookmark")]
return set(link)
with ThreadPoolExecutor(max_workers=20) as executor:
futures = [executor.submit(main, url, num)
for num in [""]+[f"page/{x}/" for x in range(2, 50)]]
allin = []
for future in futures:
allin.extend(future.result())
def parser(url):
with requests.Session() as req:
print(f"Extracting {url}")
r = req.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
target = [item.get_text(strip=True, separator=" ") for item in soup.find(
"h3", class_="screen-reader-text").find_next("ul").findAll("li")[:8]]
head = [soup.find("h1", class_="plugin-title").text]
new = [x for x in target if x.startswith(
("V", "Las", "Ac", "W", "T", "P"))]
return head + new
with ThreadPoolExecutor(max_workers=50) as executor1:
futures1 = [executor1.submit(parser, url) for url in allin]
for future in futures1:
print(future.result())
see the results:
Extracting https://wordpress.org/plugins/tuxedo-big-file-uploads/Extracting https://wordpress.org/plugins/cherry-sidebars/
Extracting https://wordpress.org/plugins/meks-smart-author-widget/
Extracting https://wordpress.org/plugins/wp-limit-login-attempts/
Extracting https://wordpress.org/plugins/automatic-translator-addon-for-loco-translate/
Extracting https://wordpress.org/plugins/event-organiser/
Traceback (most recent call last):
File "/home/martin/unbenannt0.py", line 45, in <module>
print(future.result())
File "/home/martin/anaconda3/lib/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/home/martin/anaconda3/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/martin/anaconda3/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/martin/unbenannt0.py", line 34, in parser
"h3", class_="screen-reader-text").find_next("ul").findAll("li")[:8]]
AttributeError: 'NoneType' object has no attribute 'find_next'
well i have a severe error - the
AttributeError: 'NoneType' object has no attribute 'find_next'
It looks like soup.find("h3", class_="screen-reader-text") has not found anything.
Well we could either break this line up and only call find_next if there was a result or use a try/except that captures the AttributeError.
at the moment i do not know how to fix this whole thing - only that we can surround the offending code with:
try:
code that causes error
except AttributeError:
print(f"Attribution error on {some data here}, {whatever else would be of value}, {...}")
... whatever action is thinkable to take here.
btw.- besides this error i want to add a option that gives the results back: see complete and unaltered error traceback. It contains valuable process call stack information.
Extracting https://wordpress.org/plugins/automatic-translator-addon-for-loco-translate/
Extracting https://wordpress.org/plugins/wpforo/Extracting https://wordpress.org/plugins/accesspress-social-share/
Extracting https://wordpress.org/plugins/mailoptin/
Extracting https://wordpress.org/plugins/tuxedo-big-file-uploads/
Extracting https://wordpress.org/plugins/post-snippets/
Extracting https://wordpress.org/plugins/woocommerce-payfast-gateway/Extracting https://wordpress.org/plugins/woocommerce-grid-list-toggle/
Extracting https://wordpress.org/plugins/goodbye-captcha/
Extracting https://wordpress.org/plugins/gravity-forms-google-analytics-event-tracking/
Traceback (most recent call last):
File "/home/martin/dev/wordpress_plugin.py", line 44, in <module>
print(future.result())
File "/home/martin/anaconda3/lib/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/home/martin/anaconda3/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/martin/anaconda3/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/martin/dev/wordpress_plugin.py", line 33, in parser
"h3", class_="screen-reader-text").find_next("ul").findAll("li")[:8]]
AttributeError: 'NoneType' object has no attribute 'find_next'
hope that this was not too long and complex - thank you for the help!

Loading saved NER transformers model causes AttributeError?

I have trained and saved some NER models using
torch.save(model)
I need to load these model files (extension .pt) for evaluation using
torch.load('PATH_TO_MODEL.pt')
And I get the following error: 'BertConfig' object has no attribute 'return_dict'
For the same, I updated my transformer package to the latest one, but the error persists.
This is the stack trace:
Traceback (most recent call last):
File "/home/systematicReviews/train_mtl_3.py", line 523, in <module>
test_loss, test_cr, test_cr_fine = evaluate_i(test_model, optimizer, scheduler, validation_dataloader, args, device)
File "/home/systematicReviews/train_mtl_3.py", line 180, in evaluate_i
e_loss_coarse, e_output, e_labels, e_loss_fine, e_f_output, e_f_labels, mask, e_cumulative_loss = defModel(args, e_input_ids, attention_mask=e_input_mask, P_labels=e_labels, P_f_labels=e_f_labels)
File "/home/anaconda3/envs/systreviewclassifi/lib/python3.6/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/anaconda3/envs/systreviewclassifi/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 150, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/anaconda3/envs/systreviewclassifi/lib/python3.6/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/systematicReviews/models/mtl/model.py", line 122, in forward
attention_mask = attention_mask
File "/home/anaconda3/envs/systreviewclassifi/lib/python3.6/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/anaconda3/envs/systreviewclassifi/lib/python3.6/site-packages/transformers/modeling_bert.py", line 784, in forward
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
File "/home/anaconda3/envs/systreviewclassifi/lib/python3.6/site-packages/transformers/configuration_utils.py", line 219, in use_return_dict
return self.return_dict and not self.torchscript
AttributeError: 'BertConfig' object has no attribute 'return_dict'
Here is some more information about my system:
- `transformers` version: 3.1.0
- Platform: Linux-4.4.0-186-generic-x86_64-with-debian-stretch-sid
- Python version: 3.6.9
- PyTorch version (GPU?): 1.3.1 (True)
- Tensorflow version (GPU?): not installed (NA)
- Using GPU in script?: Yes
- Using distributed or parallel set-up in script?: No
It worked pretty fine until now, but suddenly this bug appears. Any help or hint is appreciated.
Try to save your model with model.save_pretrained(output_dir). Then you can load your model with model = *.from_pretrained(output_dir) where * is the model class (e.g. BertForTokenClassification).
To save model dictionary rather than an entire model is slightly different. Instead of torch.save(model) use torch.save('path_to_the_model/model.pth') and load using torch.load('path_to_the_model/model.pth').

Python3 - coinbase API authentication

I am trying to access my coinbase account using their API.
I am using the following code: (which is from the website https://developers.coinbase.com/docs/wallet/api-key-authentication)
import json, hmac, hashlib, time, requests
from requests.auth import AuthBase
# Before implementation, set environmental variables with the names API_KEY and API_SECRET
API_KEY = 'API_KEY'
API_SECRET = 'API_SECRET'
# Create custom authentication for Coinbase API
class CoinbaseWalletAuth(AuthBase):
def __init__(self, api_key, secret_key):
self.api_key = api_key
self.secret_key = secret_key
def __call__(self, request):
timestamp = str(int(time.time()))
message = timestamp + request.method + request.path_url + (request.body or '')
signature = hmac.new(self.secret_key, message, hashlib.sha256).hexdigest()
request.headers.update({
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
})
return request
api_url = 'https://api.coinbase.com/v2/'
auth = CoinbaseWalletAuth(API_KEY, API_SECRET)
# Get current user
r = requests.get(api_url + 'user', auth=auth)
print r.json()
# {u'data': {u'username': None, u'resource': u'user', u'name': u'User'...
However I am getting the following error:
Traceback (most recent call last):
File "test1.py", line 44, in <module>
r = requests.get(api_url + 'user', auth=auth)
File "C:\Users\lclar\virtualenv-env\lib\site-packages\requests\api.py", line 72, in get
return request('get', url, params=params, **kwargs)
File "C:\Users\lclar\virtualenv-env\lib\site-packages\requests\api.py", line 58, in request
return session.request(method=method, url=url, **kwargs)
File "C:\Users\lclar\virtualenv-env\lib\site-packages\requests\sessions.py", line 494, in request
prep = self.prepare_request(req)
File "C:\Users\lclar\virtualenv-env\lib\site-packages\requests\sessions.py", line 437, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "C:\Users\lclar\virtualenv-env\lib\site-packages\requests\models.py", line 309, in prepare
self.prepare_auth(auth, url)
File "C:\Users\lclar\virtualenv-env\lib\site-packages\requests\models.py", line 540, in prepare_auth
r = auth(self)
File "test1.py", line 29, in __call__
signature = hmac.new(self.secret_key, message, hashlib.sha256).encode("utf-8").digest()
File "C:\Users\lclar\AppData\Local\Programs\Python\Python36-32\lib\hmac.py", line 144, in new
return HMAC(key, msg, digestmod)
File "C:\Users\lclar\AppData\Local\Programs\Python\Python36-32\lib\hmac.py", line 42, in __init__
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
TypeError: key: expected bytes or bytearray, but got 'str'
Can someone please help me?
Thanks in advance.
I assuming the data type self.secret_key = secret_key is a string. For Python >= 3.4, hmac.new(key, msg=None, digestmod=''), the key must be of type bytes or bytearray per the docs: https://docs.python.org/3/library/hmac.html
Likewise to avoid the TypeError: Unicode-objects must be encoded before hashing error, do the same for the message variable as shown below:
signature = hmac.new(self.secret_key.encode(), message.encode(), hashlib.sha256).hexdigest()

multivariable non-linear curve_fit with scipy

I have been trying to use scipy.optimize curve_fit using multiple variables. It works fine with the test code I created but when I try to implement this on my actual data I keep getting the following error
TypeError: only arrays length -1 can be converted to python scalars
The shape of the arrays and the data types of their elements in my test code and actual code are exactly the same so I am confused as to why I get this error.
Test code:
import numpy as np
import scipy
from scipy.optimize import curve_fit
def func(x,a,b,c):
return a+b*x[0]**2+c*x[1]
x_0=np.array([1,2,3,4])
x_1=np.array([5,6,7,8])
X=scipy.array([x_0,x_1])
Y=func(X,3.1,2.2,2.1)
popt, pcov=curve_fit(func,X,Y)
Actual code:
f=open("Exp_Fresnal.csv", 'rb')
reader=csv.reader(f)
for row in reader:
Qz.append(row[0])
Ref.append(row[1])
Ref_F.append(row[2])
Qz_arr,Ref_Farr=scipy.array((Qz)),scipy.array((Ref_F))
x=scipy.array([Qz_arr,Ref_Farr]
def func(x,d,sig_int,sig_cp):
return x[1]*(x[0]*d*(math.exp((-sig_int**2)*(x[0]**2)/2)/(1-cmath.exp(complex(0,1)*x[0]*d)*math.exp((-sig_cp**2)*(x[0]**2)/2))))**2
Y=scipy.array((Ref))
popt, pcov=curve_fit(func,x,Y)
EDIT
Here is the full error message
Traceback (most recent call last):
File "DCM_03.py", line 46, in <module>
popt, pcov=curve_fit(func,x,Y)
File "//anaconda/lib/python2.7/site-packages/scipy/optimize/minpack.py", line 651, in curve_fit
res = leastsq(func, p0, args=args, full_output=1, **kwargs)
File "//anaconda/lib/python2.7/site-packages/scipy/optimize/minpack.py", line 377, in leastsq
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
File "//anaconda/lib/python2.7/site-packages/scipy/optimize/minpack.py", line 26, in _check_func
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
File "//anaconda/lib/python2.7/site-packages/scipy/optimize/minpack.py", line 453, in _general_function
return function(xdata, *params) - ydata
File "DCM_03.py", line 40, in func
return (0.062/(2*x))**4*(x*d*(math.exp((-sig_int**2)*(x**2)/2)/(1-cmath.exp(complex(0,1)*x*d)*math.exp((-sig_cp**2)*(x**2)/2))))**2
TypeError: only length-1 arrays can be converted to Python scalars
I figured out the issue. The problem for some reason was the use of math.exp and cmath.exp in the fitting function func. In place of these functions I used np.exp(). I am not completely sure the reason why though.

Google Maps API - Sign Geocode Requests with Python 3

I'm trying to create a script to generate signed urls using my Maps API license to geocode addresses, but I'm having trouble with my sign_url function.
import urllib
import urllib.parse
import base64
import hashlib
import hmac
import re
google_apis_url_http = 'http://maps.googleapis.com'
geocoding_endpoint_json = '/maps/api/geocode/json'
def encode_params(address, client, sensor):
return urllib.parse.urlencode({'address': address, 'client': client, 'sensor': sensor})
def sign_url(url, private_key):
decoded_key = base64.urlsafe_b64decode(private_key)
signature = hmac.new(decoded_key, url, hashlib.sha1)
encoded_signature = base64.urlsafe_b64encode(signature.digest())
return encoded_signature
with open("addresses.txt", "r") as inputFile:
inputList = inputFile.read().splitlines()
for line_address in inputList[0:1]:
comma_stripped_address = re.sub(",", "", line_address)
formatted_address = re.sub(" ", "+", comma_stripped_address)
encoded_params = encode_params(formatted_address, 'gme-********', 'false')
url_to_sign = geocoding_endpoint_json + '?' + encoded_params
print(sign_url(url_to_sign, '********'))
Here's the error I'm getting:
Traceback (most recent call last):
File "/Users/[me]/PycharmProjects/Google APIs/geocode_auth.py", line 31, in <module>
print(sign_url(url_to_sign, '********'))
File "/Users/[me]/PycharmProjects/Google APIs/geocode_auth.py", line 18, in sign_url
signature = hmac.new(decoded_key, url, hashlib.sha1)
File "/Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/hmac.py", line 131, in new
return HMAC(key, msg, digestmod)
File "/Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/hmac.py", line 73, in __init__
self.update(msg)
File "/Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/hmac.py", line 79, in update
raise TypeError("expected bytes, but got %r" % type(msg).__name__)
TypeError: expected bytes, but got 'str'
Anyone know why this TypeError is being raised? I'm modeling the script off of a demo done in Python v2, so it's possible something broke in v3...

Resources