Plotting with date times and matplotlib - datetime

So, I'm using a function from this website to (try) to make stick plots of some netCDF4 data. There is an excerpt of my code below. I got my data from here.
The stick_plot(time,u,v) function is EXACTLY as it appears in the website I linked which is why I did not show a copy of that function below.
When I run my code I get the following error. Any idea on how to get around this?
AttributeError: 'numpy.float64' object has no attribute 'toordinal'
The description of time from the netCDF4 file:
<type 'netCDF4._netCDF4.Variable'>
float64 time(time)
long_name: time
standard_name: time
units: days since 1900-01-01 00:00:00Z
axis: T
ancillary_variables: time_quality_flag
data_min: 2447443.375
data_max: 2448005.16667
unlimited dimensions:
current shape = (13484,)
filling off
Here is an excerpt of my code:
imports:
import matplotlib.pyplot as plot
import numpy as np
from netCDF4 import Dataset
import os
from matplotlib.dates import date2num
from datetime import datetime
trying to generate the plots:
path = '/Users/Kyle/Documents/Summer_Research/east_coast_currents/'
currents = [x for x in os.listdir('%s' %(path)) if '.DS' not in x]
for datum in currents:
working_data = Dataset('%s' %(path+datum), 'r', format = 'NETCDF4')
u = working_data.variables['u'][:][:100]
v = working_data.variables['v'][:][:100]
time = working_data.variables['time'][:][:100]
q = stick_plot(time,u,v)
ref = 1
qk = plot.quiverkey(q, 0.1, 0.85, ref,
"%s N m$^{-2}$" % ref,
labelpos='N', coordinates='axes')
_ = plot.xticks(rotation=70)

Joe Kington answered my question. The netCDF4 file read the times in as a datetime object. All I had to do was replace date2num(time) with time which fixed everything.

Related

not able to load the image and pass it to preprocessing for the model prediction

I am trying to upload the image from the local system within the same directory. Post uploading, when I am passing through open cv split and merge for b,g, and r colors, i get the error ValueError: not enough values to unpack (expected 3, got 0)
Error :
this is the error that is showing is there any possibility to debug in the streamlit where I can track changes at different lines of code? (As in the image path,) when executed in a google collab as individual ipynb files run properly and I get by required classification
ValueError: not enough values to unpack (expected 3, got 0)
Traceback:
File "C:\Users\ADARSH\anaconda3\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 564, in _run_script
exec(code, module.__dict__)
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 76, in <module>
main()
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 68, in main
mask = imageToTensor('image')
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 44, in imageToTensor
b,g,r = cv2.split(bgr_img)
My entire streamlit app code
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
from sklearn.utils import shuffle
from tqdm import tqdm_notebook
import streamlit as st
from PIL import Image as impo
from fastai import *
from fastai.vision import *
from torchvision.models import *
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
def read_image(name):
image = st.file_uploader("Upload an "+ name, type=["png", "jpg", "jpeg",'tif'])
if image is not None:
im = impo.open(image)
im.filename = image.name
return image
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
image = read_image('image')
mask = imageToTensor('image')
if mask is not None:
get_prediction('mask')
if __name__ == "__main__":
main()
In your main function you are passing 'str' instead of variables, and also I think your read_image is not well structured.
What you should do is to first save the uploaded file in a directory and fetch the file from that directory and pass it to imageToTensor() as a parameter. That's one work around which will give you a total control over the file. Otherwise you will get other error messages after the first error is fixed.
You can automate some few lines of code in a separate python file to delete the uploaded file from the directory with a given duration.
Note: Keep an eye on the imports because I skipped them to keep the code short
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
# Refactured read_image()
def get_uploaded_image():
upload = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg",'tif'])
if upload is not None:
st.write(upload.name)
# Create a directory and save the image file before proceeding.
file_path = os.path.join("data/uploadedImages/", upload.name)
with open(file_path, "wb") as user_file:
user_file.write(upload.getbuffer())
return file_path # fixed indentation
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
# Holds the saved file path
user_image = get_uploaded_image()
if user_image is not None:
# Pass the path to imageToTensor() as a parameter.
mask = imageToTensor(user_image)
get_prediction(mask)
if __name__ == "__main__":
main()
output:

Networkx read from pandas creates 2 nodes for same source

I'm trying to read in from a pandas dataframe using from_pandas_edgelist with the following code:
input = df_from_string("""
        source, target, size
            abc, xyz, 0.25
            abc, def, 0.35
            xyz, ghi, 0.40
 """)
G = nx.from_pandas_edgelist(input, source='source', target='target', edge_attr='size', create_using=nx.DiGraph())
nx.draw(G, with_labels=True)
plt.show()
The result I want is:
abc -> xyz -> ghi.
However currently I am only getting:
abc -> xyz
xyz -> ghi
don't use names such as input for variables. This can be confused with python's input function.
check if the dataframe is loaded correctly (df_from_string doesn't seem to be a pandas function), so if you defined it yourself, either post it in your question for debugging, or verify that it does what it should do.
finally, if you post a problem with code, make sure that the code that you post reproduces that problem, and doesn't throw other errors.
Meanwhile, if you generate the dataframe in a more standard approach:
import networkx as nx, pandas as pd, matplotlib.pyplot as plt
df = pd.DataFrame(
{
'source': ['abc', 'abc', 'xyz'],
'target':['xyz', 'def', 'ghi'],
'size':[0.25,0.35,0.4]
}
)
generating the graph works just fine:
G = nx.from_pandas_edgelist(df, source='source', target='target', edge_attr='size', create_using=nx.DiGraph())[![enter image description here][1]][1]
nx.draw(G, with_labels=True)
plt.show()

Format NaN value with CustomJShover bokeh plot

I would like not to show NaN values (made with numpy.nan) of the matrix in Bokeh backend. I tried using CustomJSHover but I wasn't able to do anything because I got errors also for a simple example.
code:
import numpy as np
import xarray as xr
import holoviews as hv
import geoviews as gv
hv.extension('bokeh','matplotlib')
import cartopy.crs as crs
from bokeh.models import HoverTool, CustomJSHover
x,y = np.mgrid[-50:51, -50:51] * 0.1
r = 0.5*np.sin(np.pi +3*x**2+y**2)+0.5
r[r<0.5]=np.nan
coords=np.arange(0,101)
custom=CustomJSHover(code="""
return value + " tot"
""")
tooltips=[
("value","#image{1.1}"), # GIVES RIGHT VALUE BUT WITH NaNs (0.1..0.6.. etc)
("value","#image{custom}") # GIVES some strange 0th, 1st, ... or NaN
]
hover = HoverTool(tooltips=tooltips, formatters={'image' : custom})
ds = xr.Dataset({'R': (['x', 'y'],r)},coords={'x': (['x'], coords),'y': (['y'], coords)})
ensemble = gv.Image(ds, kdims=['x', 'y'],vdims=[ 'R']).opts(tools=[hover])
ensemble
I would like the NaN values are not being shown in {2.1} format and NaN values are white, so that they are not shown at all in the hover.

How to use the format parameter of sliders?

Sliders have format property, see
https://docs.bokeh.org/en/latest/docs/reference/models/widgets.sliders.html
A) Where is the documentation for this property?
B) Is there an example of using the format attribute?
EDIT: is there a way to pass a function that takes the slider value and returns a string?
Formatting documentation can be found on this page with multiple examples. The sliders value can be used by calling slider.value.
I also edited an example where I added a formatter for the amplitude slider. The slider values in this example are used to change the sine wave.
You can run this example by using this command: bokeh serve script.py --show
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
# Set up data
N = 200
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
# Set up plot
plot = figure(plot_height=400, plot_width=400, title="my sine wave",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
# Set up widgets
text = TextInput(title="title", value='my sine wave')
offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1)
amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0, step=0.0000001, format='0.000f') #Slider with different formatting
phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi)
freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1)
# Set up callbacks
def update_title(attrname, old, new):
plot.title.text = text.value
text.on_change('value', update_title)
def update_data(attrname, old, new):
# Get the current slider values
a = amplitude.value
b = offset.value
w = phase.value
k = freq.value
# Generate the new curve
x = np.linspace(0, 4*np.pi, N)
y = a*np.sin(k*x + w) + b
source.data = dict(x=x, y=y)
for w in [offset, amplitude, phase, freq]:
w.on_change('value', update_data)
# Set up layouts and add to document
inputs = column(text, offset, amplitude, phase, freq)
curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "Sliders"

ImageView.setImage axes parameter does not switch X-Y dimensions

I have modified the ImageView example by adding the statement data[:, ::10, :] = 0, which sets every tenth element of the middle dimension to 0. The program now shows horizontal lines. This is consistent with the documentation of the ImageView.setImage function: the default axes dictionary is {'t':0, 'x':1, 'y':2, 'c':3}. However, when I change this to {'t':0, 'x':2, 'y':1, 'c':3}, nothing changes where I would expect to get vertical rows.
So my question is: how can I give the row dimension a higher precedence in PyQtGraph? Of course I can transpose all my arrays myself before passing them to the setImage function but I prefer not to. Especially since both Numpy and Qt use the row/column convention and not X before Y. I don't see why PyQtGraph chooses the latter.
For completeness, find my modified ImageView example below.
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
app = QtGui.QApplication([])
## Create window with ImageView widget
win = QtGui.QMainWindow()
win.resize(800,800)
imv = pg.ImageView()
win.setCentralWidget(imv)
win.show()
win.setWindowTitle('pyqtgraph example: ImageView')
## Create random 3D data set with noisy signals
img = pg.gaussianFilter(np.random.normal(size=(200, 200)), (5, 5)) * 20 + 100
img = img[np.newaxis,:,:]
decay = np.exp(-np.linspace(0,0.3,100))[:,np.newaxis,np.newaxis]
data = np.random.normal(size=(100, 200, 200))
data += img * decay
data += 2
## Add time-varying signal
sig = np.zeros(data.shape[0])
sig[30:] += np.exp(-np.linspace(1,10, 70))
sig[40:] += np.exp(-np.linspace(1,10, 60))
sig[70:] += np.exp(-np.linspace(1,10, 30))
sig = sig[:,np.newaxis,np.newaxis] * 3
data[:,50:60,50:60] += sig
data[:, ::10, :] = 0 # Make image a-symmetrical
## Display the data and assign each frame a time value from 1.0 to 3.0
imv.setImage(data, xvals=np.linspace(1., 3., data.shape[0]),
axes={'t':0, 'x':2, 'y':1, 'c':3}) # doesn't help
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
Looking through ImageView.py, setImage() parses the axes dictionary and based on presence of 't' it builds the z-axis/frame slider, and that's it. Rearranging the axes seems unimplemented yet.

Resources