not able to load the image and pass it to preprocessing for the model prediction - streamlit

I am trying to upload the image from the local system within the same directory. Post uploading, when I am passing through open cv split and merge for b,g, and r colors, i get the error ValueError: not enough values to unpack (expected 3, got 0)
Error :
this is the error that is showing is there any possibility to debug in the streamlit where I can track changes at different lines of code? (As in the image path,) when executed in a google collab as individual ipynb files run properly and I get by required classification
ValueError: not enough values to unpack (expected 3, got 0)
Traceback:
File "C:\Users\ADARSH\anaconda3\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 564, in _run_script
exec(code, module.__dict__)
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 76, in <module>
main()
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 68, in main
mask = imageToTensor('image')
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 44, in imageToTensor
b,g,r = cv2.split(bgr_img)
My entire streamlit app code
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
from sklearn.utils import shuffle
from tqdm import tqdm_notebook
import streamlit as st
from PIL import Image as impo
from fastai import *
from fastai.vision import *
from torchvision.models import *
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
def read_image(name):
image = st.file_uploader("Upload an "+ name, type=["png", "jpg", "jpeg",'tif'])
if image is not None:
im = impo.open(image)
im.filename = image.name
return image
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
image = read_image('image')
mask = imageToTensor('image')
if mask is not None:
get_prediction('mask')
if __name__ == "__main__":
main()

In your main function you are passing 'str' instead of variables, and also I think your read_image is not well structured.
What you should do is to first save the uploaded file in a directory and fetch the file from that directory and pass it to imageToTensor() as a parameter. That's one work around which will give you a total control over the file. Otherwise you will get other error messages after the first error is fixed.
You can automate some few lines of code in a separate python file to delete the uploaded file from the directory with a given duration.
Note: Keep an eye on the imports because I skipped them to keep the code short
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
# Refactured read_image()
def get_uploaded_image():
upload = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg",'tif'])
if upload is not None:
st.write(upload.name)
# Create a directory and save the image file before proceeding.
file_path = os.path.join("data/uploadedImages/", upload.name)
with open(file_path, "wb") as user_file:
user_file.write(upload.getbuffer())
return file_path # fixed indentation
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
# Holds the saved file path
user_image = get_uploaded_image()
if user_image is not None:
# Pass the path to imageToTensor() as a parameter.
mask = imageToTensor(user_image)
get_prediction(mask)
if __name__ == "__main__":
main()
output:

Related

bokeh selected.on_change not working for my current setup

Basically, this is an interactive heatmap but the twist is that the source is updated by reading values from a file that gets updated regularly.
dont bother about the class "generator", it is just for keeping data and it runs regularly threaded
make sure a file named "Server_dump.txt" exists in the same directory of the script with a single number greater than 0 inside before u execute the bokeh script.
what basically happens is i change a number inside the file named "Server_dump.txt" by using echo 4 > Server_dump.txt on bash,
u can put any number other than 4 and the script automatically checks the file and plots the new point.
if u don't use bash, u could use a text editor , replace the number and save, and all will be the same.
the run function inside the generator class is the one which checks if this file was modified , reads the number, transforms it into x& y coords and increments the number of taps associated with these coords and gives the source x,y,taps values based on that number.
well that function works fine and each time i echo a number , the correct rectangle is plotted but,
now I want to add the functionality of that clicking on a certain rectangle triggers a callback to plot a second graph based on the coords of the clicked rectangle but i can't even get it to trigger even though i have tried other examples with selected.on_change in them and they worked fine.
*if i increase self.taps for a certain rect by writing the number to the file multiple times, color gets updated but if i hover over the rect it shows me the past values and not the latest value only .
my bokeh version is 1.0.4
from functools import partial
from random import random,randint
import threading
import time
from tornado import gen
from os.path import getmtime
from math import pi
import pandas as pd
from random import randint, random
from bokeh.io import show
from bokeh.models import LinearColorMapper, BasicTicker, widgets, PrintfTickFormatter, ColorBar, ColumnDataSource, FactorRange
from bokeh.plotting import figure, curdoc
from bokeh.layouts import row, column, gridplot
source = ColumnDataSource(data=dict(x=[], y=[], taps=[]))
doc = curdoc()
#sloppy data receiving function to change data to a plottable shape
class generator(threading.Thread):
def __init__(self):
super(generator, self).__init__()
self.chart_coords = {'x':[],'y':[],'taps':[]}
self.Pi_coords = {}
self.coord = 0
self.pos = 0
self.col = 0
self.row = 0
self.s = 0
self.t = 0
def chart_dict_gen(self,row, col):
self.col = col
self.row = row+1
self.chart_coords['x'] = [i for i in range(1,cla.row)]
self.chart_coords['y'] = [i for i in range(cla.col, 0, -1)] #reversed list because chart requires that
self.chart_coords['taps']= [0]*(row * col)
self.taps = [[0 for y in range(col)] for x in range(row)]
def Pi_dict_gen(self,row,col):
key = 1
for x in range(1,row):
for y in range(1,col):
self.Pi_coords[key] = (x,y)
key = key + 1
def Pi_to_chart(self,N):
x,y = self.Pi_coords[N][0], self.Pi_coords[N][1]
return x,y
def run(self):
while True:
if(self.t == 0):
self.t=1
continue
time.sleep(0.1)
h = getmtime("Server_dump.txt")
if self.s != h:
self.s = h
with open('Server_dump.txt') as f:
m = next(f)
y,x = self.Pi_to_chart(int(m))
self.taps[x][y] += 1
# but update the document from callback
doc.add_next_tick_callback(partial(update, x=x, y=y, taps=self.taps[x][y]))
cla = generator()
cla.chart_dict_gen(15,15)
cla.Pi_dict_gen(15, 15)
x = cla.chart_coords['x']
y = cla.chart_coords['y']
taps = cla.chart_coords['taps']
#gen.coroutine
def update(x, y, taps):
taps += taps
print(x,y,taps)
source.stream(dict(x=[x], y=[y], taps=[taps]))
colors = ["#CCEBFF","#B2E0FF","#99D6FF","#80CCFF","#66c2FF","#4DB8FF","#33ADFF","#19A3FF", "#0099FF", "#008AE6", "#007ACC","#006BB2", "#005C99", "#004C80", "#003D66", "#002E4C", "#001F33", "#000F1A", "#000000"]
mapper = LinearColorMapper(palette=colors, low= 0, high= 15) #low = min(cla.chart_coords['taps']) high = max(cla.chart_coords['taps'])
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(title="Tou",
x_range=list(map(str,x)),
y_range=list(map(str,reversed(y))),
x_axis_location="above",
plot_width=900, plot_height=400,
tools=TOOLS, toolbar_location='below',
tooltips=[('coords', '#y #x'), ('taps', '#taps%')])
p.grid.grid_line_color = "#ffffff"
p.axis.axis_line_color = "#ef4723"
p.axis.major_tick_line_color = "#af0a36"
p.axis.major_label_text_font_size = "7pt"
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.rect(x="x", y="y",
width=0.9, height=0.9,
source=source,
fill_color={'field': 'taps', 'transform': mapper},
line_color = "#ffffff",
)
color_bar = ColorBar(color_mapper=mapper,
major_label_text_font_size="7pt",
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"),
label_standoff=6, border_line_color=None, location=(0, 0))
curdoc().theme = 'dark_minimal'
def ck(attr, old, new):
print('here') #doesn't even print hi in the terminal if i click anywhere
source.selected.on_change('indices', ck)
p.add_layout(color_bar, 'right')
doc.add_root(p)
thread = cla
thread.start()
i wanted even to get a printed hi in the terminal but nothing
You have not actually added any selection tool at all to your plot, so no selection is ever made. You have specified:
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
Those are the only tools that will be added, and none of them make selections, there for nothing will cause source.selection.indices to ever be updated. If you are looking for selections based on tap, you must add a TapTool, e.g. with
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom,tap"
Note that there will not be repeated callbacks if you tap the same rect multiple times. The callback only fires when the selection changes and clicking the same glyph twice in a row results in an identical selection.

Patch glyph not updated when using multiple ColumnDataSources in bokeh app

I am trying to use the bokeh server to plot a time series together with a shaded percentile band around, and this, since bokeh does not support the fill_between function from matplotlib, requires the construction of a patch object of double dimension. Hence, I need two ColumnDataSources to hold the data. However, only the first curve is rendered correctly when the data changes. Although the data_source of the GlyphRenderer is updated, the figure does not change. I use bokeh 0.12.3, and have tried with several servers and browsers. A complete, and reasonably minimal example:
import numpy as np
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.layouts import column
from bokeh.io import curdoc
from bokeh.models.widgets import Select
class AppData:
def __init__(self, n):
self.p_source = None
self.c_source = None
self.x = np.linspace(0, 10, 20)
self.n = n
self.ys = [np.sin(self.x) - i for i in range(self.n)]
self.line = None
self.patch = None
def update_module(self, a, b):
assert b - a == 5
p_data = dict() if self.p_source is None else self.p_source.data
c_data = dict() if self.c_source is None else self.c_source.data
ys = [self.ys[j] for j in range(a, b)]
if "x" not in c_data:
c_data["x"] = self.x
p_data["x"] = c_data["x"].tolist() + c_data["x"][::-1].tolist()
n_r = len(ys[0])
n_p = 2*n_r
if "ys" not in p_data:
p_data["ys"] = np.empty((n_p))
p_data["ys"][:n_r] = ys[0]
p_data["ys"][n_r:] = np.flipud(ys[-1])
c_data["y"] = ys[2]
if self.p_source is None:
self.p_source = ColumnDataSource(data=p_data)
else:
self.p_source.data.update(p_data)
if self.c_source is None:
self.c_source = ColumnDataSource(data=c_data)
else:
self.c_source.data.update(c_data)
if self.line is not None:
print(max(self.line.data_source.data["y"]))
print(max(self.patch.data_source.data["ys"])) # The value changes, but the figure does not!
# initialize
app_data = AppData(10)
app_data.update_module(4, 4 + 5)
s1 = figure(width=500, plot_height=125, title=None, toolbar_location="above")
app_data.line = s1.line("x", "y", source=app_data.c_source)
app_data.patch = s1.patch("x", "ys", source=app_data.p_source, alpha=0.3, line_width=0)
select = Select(title="Case", options=[str(i) for i in range(5)], value="4")
def select_case(attrname, old, new):
a = int(select.value)
app_data.update_module(a, a + 5)
select.on_change('value', select_case)
layout = column(select, s1)
curdoc().add_root(layout)
curdoc().title = "Example of patches not being updated"
I am certainly not very experienced in using bokeh, so I could very well be using the system wrong. However, any help on this matter would be of great help!

OS X - How to create QLabel which just fits its text with exotic character?

In the following code, characterLabel is small a bit.
But characterLabelEntire is too big.
How to create QLabel which just fits its text?
from PySide.QtCore import *
from PySide.QtGui import *
targetChar = unichr(0x0e0e)
# Setting
font = QFont()
font.setPointSize(50)
sizePolicy = QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
# Following code will create a character label.
# But it seems the size of the label is small a bit.
# Becasue some of the region of the character is lost.
characterLabel = QLabel(targetChar)
characterLabel.setFont(font)
characterLabel.setSizePolicy(sizePolicy)
# Following code will create a character label, but it's too big.
characterLabelEntire = QLabel(targetChar)
characterLabelEntire.setFont(font)
characterLabelEntire.setFixedSize(100, 100)
characterLabel.setSizePolicy(sizePolicy)
# Show and Raise
characterLabel.show()
characterLabelEntire.show()
characterLabel.raise_()
characterLabelEntire.raise_()
try:
app = QApplication([])
app.exec_()
except:
pass
It seems Exotic character designer break font rule?
I found the following article.
http://doc.qt.io/qt-4.8/qfontmetrics.html#descent
Anyway, in my environment(OS X 10.11), larger descent is needed to show the entire character image.
from PySide.QtCore import *
from PySide.QtGui import *
try:
app = QApplication([])
except:
pass
targetChar = unichr(0x0e0e)
# Setting
font = QFont()
font.setPointSize(100)
# Following code will create a character label.
characterLabel = QLabel(targetChar)
characterLabel.setFont(font)
fm = characterLabel.fontMetrics()
pixelsWide = fm.width(targetChar)
pixelsHigh = fm.ascent() + fm.descent() * 3
characterLabel.setFixedSize(pixelsWide, pixelsHigh)
characterLabel.show()
characterLabel.raise_()
try:
app.exec_()
except:
pass

Plotting with date times and matplotlib

So, I'm using a function from this website to (try) to make stick plots of some netCDF4 data. There is an excerpt of my code below. I got my data from here.
The stick_plot(time,u,v) function is EXACTLY as it appears in the website I linked which is why I did not show a copy of that function below.
When I run my code I get the following error. Any idea on how to get around this?
AttributeError: 'numpy.float64' object has no attribute 'toordinal'
The description of time from the netCDF4 file:
<type 'netCDF4._netCDF4.Variable'>
float64 time(time)
long_name: time
standard_name: time
units: days since 1900-01-01 00:00:00Z
axis: T
ancillary_variables: time_quality_flag
data_min: 2447443.375
data_max: 2448005.16667
unlimited dimensions:
current shape = (13484,)
filling off
Here is an excerpt of my code:
imports:
import matplotlib.pyplot as plot
import numpy as np
from netCDF4 import Dataset
import os
from matplotlib.dates import date2num
from datetime import datetime
trying to generate the plots:
path = '/Users/Kyle/Documents/Summer_Research/east_coast_currents/'
currents = [x for x in os.listdir('%s' %(path)) if '.DS' not in x]
for datum in currents:
working_data = Dataset('%s' %(path+datum), 'r', format = 'NETCDF4')
u = working_data.variables['u'][:][:100]
v = working_data.variables['v'][:][:100]
time = working_data.variables['time'][:][:100]
q = stick_plot(time,u,v)
ref = 1
qk = plot.quiverkey(q, 0.1, 0.85, ref,
"%s N m$^{-2}$" % ref,
labelpos='N', coordinates='axes')
_ = plot.xticks(rotation=70)
Joe Kington answered my question. The netCDF4 file read the times in as a datetime object. All I had to do was replace date2num(time) with time which fixed everything.

sign recognition like hand written digits example in scikit-learn (python)

I watch out this example: http://scikit-learn.org/stable/auto_examples/plot_digits_classification.html#example-plot-digits-classification-py
on handwritten digits in scikit-learn python library.
i would like to prepare a 3d array (N * a* b) where N is my images number (75) and a* b is the matrix of an image (like in the example a 8x8 shape).
My problem is: i have signs in a different shapes for every image: (202, 230), (250, 322).. and give me
this error: ValueError: array dimensions must agree except for d_0 in this code:
#here there is the error:
grigiume = np.dstack(listagrigie)
print(grigiume.shape)
grigiume=np.rollaxis(grigiume,-1)
print(grigiume.shape)
There is a manner to resize all images in a standard size (i.e. 200x200) or a manner to have a 3d array with matrix(a,b) where a != from b and do not give me an error in this code:
data = digits.images.reshape((n_samples, -1))
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
My code:
import os
import glob
import numpy as np
from numpy import array
listagrigie = []
path = 'resize2/'
for infile in glob.glob( os.path.join(path, '*.jpg') ):
print("current file is: " + infile )
colorato = cv2.imread(infile)
grigiscala = cv2.cvtColor(colorato,cv2.COLOR_BGR2GRAY)
listagrigie.append(grigiscala)
print(len(listagrigie))
#here there is the error:
grigiume = np.dstack(listagrigie)
print(grigiume.shape)
grigiume=np.rollaxis(grigiume,-1)
print(grigiume.shape)
#last step
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print "Classification report for classifier %s:\n%s\n" % (
classifier, metrics.classification_report(expected, predicted))
print "Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)
for index, (image, prediction) in enumerate(
zip(digits.images[n_samples / 2:], predicted)[:4]):
pl.subplot(2, 4, index + 5)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction: %i' % prediction)
pl.show()
You have to resize all your images to a fixed size. For instance using the Image class of PIL or Pillow:
from PIL import Image
image = Image.open("/path/to/input_image.jpeg")
image.thumbnail((200, 200), Image.ANTIALIAS)
image.save("/path/to/output_image.jpeg")
Edit: the above won't work, try instead resize:
from PIL import Image
image = Image.open("/path/to/input_image.jpeg")
image = image.resize((200, 200), Image.ANTIALIAS)
image.save("/path/to/output_image.jpeg")
Edit 2: there might be a way to preserve the aspect ratio and pad the rest with black pixels but I don't know how to do in a few PIL calls. You could use PIL.Image.thumbnail and use numpy to do the padding though.

Resources