NetworkX: draw_networkx not drawing labels:, draw results in'_AxesStack' object is not callable - plot

Trying to draw two nodes graph with latest version of networkx-3.0 and latest version of matplotlib-3.6.3. Drawing does not show labels on the plot:
import networkx as nx
import matplotlib.pyplot as plt
G=nx.Graph()
G.add_node(1, text='foo')
G.add_node(2, text='bar')
G.add_edge(1,2)
print("Node labels: ", nx.get_node_attributes(G, 'text'))
nx.draw_networkx(G, with_labels=True)
>> Node labels: {1: 'foo', 2: 'bar'}
Shows graph without labels. Why?
And this results in error:
nx.draw(G)
plt.show()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_16652\3657615245.py in <module>
----> 1 nx.draw(G)
2 plt.show()
~\Anaconda3\lib\site-packages\networkx\drawing\nx_pylab.py in draw(G, pos, ax, **kwds)
111 cf.set_facecolor("w")
112 if ax is None:
--> 113 if cf._axstack() is None:
114 ax = cf.add_axes((0, 0, 1, 1))
115 else:
TypeError: '_AxesStack' object is not callable
<Figure size 640x480 with 0 Axes>

To answer the first question: the nx graph doesn't "know" that you want to look at its text attribute for the label, so it just goes with the initial node name.
Instead, you could pass a relabeled graph into the draw function
import networkx as nx
import matplotlib.pyplot as plt
G=nx.Graph()
G.add_node(1, text='foo')
G.add_node(2, text='bar')
G.add_edge(1,2)
print("Node labels: ", nx.get_node_attributes(G, 'text'))
nx.draw_networkx(nx.relabel_nodes(G, nx.get_node_attributes(G, 'text')),
with_labels=True, node_color = 'orange')
plt.show()
The result:

Related

not able to load the image and pass it to preprocessing for the model prediction

I am trying to upload the image from the local system within the same directory. Post uploading, when I am passing through open cv split and merge for b,g, and r colors, i get the error ValueError: not enough values to unpack (expected 3, got 0)
Error :
this is the error that is showing is there any possibility to debug in the streamlit where I can track changes at different lines of code? (As in the image path,) when executed in a google collab as individual ipynb files run properly and I get by required classification
ValueError: not enough values to unpack (expected 3, got 0)
Traceback:
File "C:\Users\ADARSH\anaconda3\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 564, in _run_script
exec(code, module.__dict__)
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 76, in <module>
main()
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 68, in main
mask = imageToTensor('image')
File "C:\Users\ADARSH\streamlit\deploy_test.py", line 44, in imageToTensor
b,g,r = cv2.split(bgr_img)
My entire streamlit app code
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
from sklearn.utils import shuffle
from tqdm import tqdm_notebook
import streamlit as st
from PIL import Image as impo
from fastai import *
from fastai.vision import *
from torchvision.models import *
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
def read_image(name):
image = st.file_uploader("Upload an "+ name, type=["png", "jpg", "jpeg",'tif'])
if image is not None:
im = impo.open(image)
im.filename = image.name
return image
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
image = read_image('image')
mask = imageToTensor('image')
if mask is not None:
get_prediction('mask')
if __name__ == "__main__":
main()
In your main function you are passing 'str' instead of variables, and also I think your read_image is not well structured.
What you should do is to first save the uploaded file in a directory and fetch the file from that directory and pass it to imageToTensor() as a parameter. That's one work around which will give you a total control over the file. Otherwise you will get other error messages after the first error is fixed.
You can automate some few lines of code in a separate python file to delete the uploaded file from the directory with a given duration.
Note: Keep an eye on the imports because I skipped them to keep the code short
class MyImageItemList(ImageList):
def open(self, fn:PathOrStr)->Image:
img = readCroppedImage(fn.replace('/./','').replace('//','/'))
# This ndarray image has to be converted to tensor before passing on as fastai Image, we can use pil2tensor
return vision.Image(px=pil2tensor(img, np.float32))
# Refactured read_image()
def get_uploaded_image():
upload = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg",'tif'])
if upload is not None:
st.write(upload.name)
# Create a directory and save the image file before proceeding.
file_path = os.path.join("data/uploadedImages/", upload.name)
with open(file_path, "wb") as user_file:
user_file.write(upload.getbuffer())
return file_path # fixed indentation
def imageToTensor(image):
sz = 68
bgr_img = cv2.imread(image)
b,g,r = cv2.split(bgr_img)
rgb_img = cv2.merge([r,g,b])
# crop to center to the correct size and convert from 0-255 range to 0-1 range
H,W,C = rgb_img.shape
rgb_img = rgb_img[(H-sz)//2:(sz +(H-sz)//2),(H-sz)//2:(sz +(H-sz)//2),:] / 256
return vision.Image(px=pil2tensor(rgb_img, np.float32))
def learn_infernce():
return load_learner('./')
def get_prediction(image):
if st.button('Classify'):
pred, pred_idx, probs = learn_inference.predict(image)
classes = ['negative', 'tumor']
st.write(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
else:
st.write(f'Click the button to classify')
def main():
st.set_page_config(page_title='Cancer detection', page_icon=None, layout='centered', initial_sidebar_state='auto')
# Holds the saved file path
user_image = get_uploaded_image()
if user_image is not None:
# Pass the path to imageToTensor() as a parameter.
mask = imageToTensor(user_image)
get_prediction(mask)
if __name__ == "__main__":
main()
output:

rotate a pointcloud in z axis

I am trying to rotate a pcd but I get the following error, how do I fix the same -
import open3d as o3d
import numpy as np
xyz = o3d.io.read_point_cloud("data.pcd")
xyz = xyz.rotate(xyz.get_rotation_matrix_from_xyz((0.7 * np.pi, 0, 0.6 * np.pi)),center=True)
Error -
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: get_rotation_matrix_from_xyz(): incompatible function arguments. The following argument types are supported:
1. (rotation: numpy.ndarray[float64[3, 1]]) -> numpy.ndarray[float64[3, 3]]
Invoked with: array([[-0.30901699, -0.95105652, 0. ],
[-0.55901699, 0.18163563, -0.80901699],
[ 0.76942088, -0.25 , -0.58778525]]); kwargs: center=True
how do i fix the same
The center argument is not boolean, but should describe the rotation center (see docs):
center (numpy.ndarray[float64[3, 1]]) – Rotation center used for transformation
This would rotate around the origin (0,0,0):
import open3d as o3d
import numpy as np
xyz = o3d.io.read_point_cloud("data.pcd")
R = xyz.get_rotation_matrix_from_xyz((0.7 * np.pi, 0, 0.6 * np.pi))
xyz = xyz.rotate(R, center=(0,0,0))

How do I use custom labels for ticks in Bokeh?

I understand how you specify specific ticks to show in Bokeh, but my question is if there is a way to assign a specific label to show versus the position. So for example
plot.xaxis[0].ticker=FixedTicker(ticks=[0,1])
will only show the x-axis labels at 0 and 1, but what if instead of showing 0 and 1 I wanted to show Apple and Orange. Something like
plot.xaxis[0].ticker=FixedTicker(ticks=[0,1], labels=['Apple', 'Orange'])
A histogram won't work for the data I am plotting. Is there anyway to use custom labels in Bokeh like this?
Fixed ticks can just be passed directly as the "ticker" value, and major label overrides can be provided to explicitly supply custom labels for specific values:
from bokeh.plotting import figure, output_file, show
p = figure()
p.circle(x=[1,2,3], y=[4,6,5], size=20)
p.xaxis.ticker = [1, 2, 3]
p.xaxis.major_label_overrides = {1: 'A', 2: 'B', 3: 'C'}
output_file("test.html")
show(p)
EDIT: Updated for Bokeh 0.12.5 but also see simpler method in the other answer.
This worked for me:
import pandas as pd
from bokeh.charts import Bar, output_file, show
from bokeh.models import TickFormatter
from bokeh.core.properties import Dict, Int, String
class FixedTickFormatter(TickFormatter):
"""
Class used to allow custom axis tick labels on a bokeh chart
Extends bokeh.model.formatters.TickFormatte
"""
JS_CODE = """
import {Model} from "model"
import * as p from "core/properties"
export class FixedTickFormatter extends Model
type: 'FixedTickFormatter'
doFormat: (ticks) ->
labels = #get("labels")
return (labels[tick] ? "" for tick in ticks)
#define {
labels: [ p.Any ]
}
"""
labels = Dict(Int, String, help="""
A mapping of integer ticks values to their labels.
""")
__implementation__ = JS_CODE
skills_list = ['cheese making', 'squanching', 'leaving harsh criticisms']
pct_counts = [25, 40, 1]
df = pd.DataFrame({'skill':skills_list, 'pct jobs with skill':pct_counts})
p = Bar(df, 'index', values='pct jobs with skill', title="Top skills for ___ jobs", legend=False)
label_dict = {}
for i, s in enumerate(skills_list):
label_dict[i] = s
p.xaxis[0].formatter = FixedTickFormatter(labels=label_dict)
output_file("bar.html")
show(p)
This can be dealt with as categorical data, see bokeh documentation.
from bokeh.plotting import figure, show
categories = ['A', 'B','C' ]
p = figure(x_range=categories)
p.circle(x=categories, y=[4, 6, 5], size=20)
show(p)

RuntimeWarning: invalid value encountered in true_divide

I have to make a program using NLMS method, to minimize the noise in ECG signal.
from __future__ import division
from numpy import *
import numpy as np
#import matplotlib as plt
import matplotlib.pyplot as plt
import os
clear = lambda: os.system('cls')
clear()
meu=1e-05
file=open('ecg.txt','r') #this file i attached at below
data=file.readlines()
x=data[:1000]
xx=data[1001:2001]
N=len(x)
NN=len(xx)
t=np.zeros((N,1),float)
X=np.zeros((4,1),float)
Z=np.array([0,0,0,0],float)
w=random.rand(4,1)
y=np.zeros((N,1),float)
yp=np.zeros((N,1),float)
e=np.zeros((N,1))
j=np.zeros((N,1),float)
for n in range(len(x)):
X[1:len(X)-1]=X[0:len(X)-2]
X[0]=x[n]
sum=0
for k in range(len(X)):
sum+=(w[k].T*X[k])
y[n]=sum
e[n]=subtract(X[0],y[n])
j[n]=e[n]*e[n]
for l in range(len(X)):
w[l] = w[l]+(meu*e[n]*X[l])/((X[l]*X[l])*(X[l]*X[l])); #when division takes place it give error
MSE=mean(j,1)
plt.plot(10 * log10(MSE))
plt.title('MSE')
plt.show()
#print(' output\n')
#print(y,'\n')
#print(' data\n')
#print(x,'\n')
print(w)
#y.astype(float)
#pp=np.int16(y)
plt.plot(x)
plt.yscale('log')
plt.title('x')
plt.show()
plt.plot(y)
plt.yscale('log')
plt.title('y')
plt.show()
plt.plot(w)
plt.yscale('log')
plt.title('w')
plt.show()
plt.plot(e)
#plt.yscale('log')
plt.title('e')
plt.show()
plt.plot(e)
plt.plot(x)
plt.plot(y)
plt.show()
plt.plot(yp)
plt.title('yp')
plt.show()
#plt.plot(t)
#plt.yscale('log')
#plt.show()
I keep getting this error:
Warning (from warnings module):
w[l] = w[l]+(meu*e[n]*X[l])/((X[l]*X[l])*(X[l]*X[l]));
RuntimeWarning: invalid value encountered in true_divide
Result:
>>w
array([[ 0.86035037],
[ 0.35119551],
[ 0.40570589],
[ nan]])
>>X
array([[ 0.19258605],
[ 0.19442064],
[ 0.19243968],
[ 0. ]])
>>w[l]
array([ nan])
>>X[l]
array([ 0.])
I can't figure it out, what is wrong with the code?
ECG text file:
click this link to view the file

networkx, undirected graph: for one source node, find directly connected neighbors from target nodes list

In an undirected graph, for a given source node ('sa' in code / pic below) and a list of target nodes (tlist=['ta','tb','tc','td','te','tf']) I am trying to find the subset of directly connected target nodes, i.e. if they are connected via another target node, they are not going into the subset.
So for undirected graph G:
import networkx as nx
import matplotlib.pyplot as plt
G = nx.Graph()
G.add_path(['te','og','oe','sa','oa','ta','tb'])
#G = nx.Graph()
G.add_path(['tf','oe'])
G.add_path(['sa','of','td','od'])
G.add_path(['sa','ob','tc','oc','td'])
val_map = {'sa': 1.0,
'ta': 0.5714285714285714,
'tb': 0.5714285714285714,
'tc': 0.5714285714285714,
'td': 0.5714285714285714,
'te': 0.5714285714285714,
'tf': 0.5714285714285714
}
values = [val_map.get(node, 0.25) for node in G.nodes()]
nx.draw(G, cmap=plt.get_cmap('jet'), node_color=values,with_labels=True)
plt.show()
the resulting subset of target nodes should be ['ta','tc','td','te','tf']
Thanks in advance!
ok sorry for my bad programming style, but this is only a draft. Anyhow it seems to work, please test on other undirected graphs and improve if necessary:
import networkx as nx
import matplotlib.pyplot as plt
G = nx.Graph()
G.add_path(['te','og','oe','sa','oa','ta','tb'])
G.add_path(['tf','oe'])
G.add_path(['sa','of','td','od'])
G.add_path(['sa','ob','tc','oc','td'])
tlist=['ta','tb','tc','td','te','tf']
def deduplicate_list(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
def nearest_connected_neighbors(graph,sourcenode,targetnodes):
templist=[]
endendlist=[]
searchlist=[]
tlist=targetnodes
G=graph
nlist=G.neighbors(sourcenode)
donelist=[sourcenode]
while len(nlist)>0:
for n in nlist:
donelist.append(n)
if n in tlist:
endendlist.append(n)
endendlist=deduplicate_list(endendlist)
searchlist = list(set(nlist) - set(endendlist))
for n in searchlist:
templist.extend(G.neighbors(n))
templist=deduplicate_list(templist)
nlist=[]
nlist=list(set(templist) - set(donelist))
return endendlist
print nearest_connected_neighbors(G,'sa',tlist)

Resources