Plotting data in nested for loop and function and then combine all the plots in one plot - plot

I am trying to plot a simple nested function and mwe goes like this: In the end I get just an empty plot
import matplotlib.pyplot as plt
k = 1.38e-23;
h = 6.6e-34;
T = 7;
Temp = np.array([7, 0.268, 0.02025])
Freq = np.arange(1, 10, 2)
for T in Temp:
for f in Freq:
def quanta(f,T):
return (f*T)
final = quanta(f,T)
plt.plot(f, final)

Related

Why does my model predict the same label?

I am training a graph convolution neural network to classify EEG signals into emotion classes. The input of my data is an array of size [12803216]-->[number of subjects * numbers of channels (nodes) * features of each node]. The output should be class 0(Negative) or class 1(Positive).The data is slightly imbalanced (45% class 0 and 55% class 1). The problem is that my model always predict label 0 as output for all inputs in the training stage regardless of the convolution function.
What is wrong with my code and how can I fix it? Any comments are welcome.
connectivity at the below code is predefined based at the connections of the 32 electrodes(nodes)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, SAGEConv, ResGatedGraphConv, global_mean_pool, BatchNorm
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, f1_score, accuracy_score
labels = np.load("/content/drive/MyDrive/ValenceLabels_thres_5.npy")
labels = np.array(labels, dtype='int64')
labels.shape
class EEGraph(nn.Module):
def __init__(self, embedding_dim, first_conv, n_layers, conv_layer):
super(EEGraph, self).__init__()
self.n_layers = n_layers
self.convs = []
self.bns = []
d_in = embedding_dim
d_out = first_conv
for i in range(n_layers):
self.convs.append(conv_layer(d_in, d_out))
self.bns.append(BatchNorm(d_out, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True))
if i < n_layers - 1:
d_in, d_out = d_out, 2*d_out
self.convs = torch.nn.ModuleList(self.convs)
self.bns = torch.nn.ModuleList(self.bns)
self.project = nn.Linear(d_out, 3) # d_in beacu
self.project.apply(lambda x: nn.init.xavier_normal_(x.weight, gain=1) if type(x) == nn.Linear else None)
def forward(self, x, edge_index):
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
x = conv(x, edge_index).permute(0, 2, 1)
x = bn(x)
x = F.dropout(F.leaky_relu(x, negative_slope=0.01), p=0.5, training=self.training).permute(0, 2, 1)
out = x.mean(dim=1).squeeze(dim=-1)
out = self.project(out)
return F.softmax(out, dim=-1)
device = torch.device("cuda")
connectivity = [[channel_order.index(e[0]), channel_order.index(e[1])] for e in edges]
connectivity = torch.tensor(connectivity).t().contiguous().to(device)
best_f1_score = -1
best_trial_name = None
n_epochs = 500
lr = 1e-3
weight_decay = 1e-5
batch_size = 63
criterion = nn.CrossEntropyLoss()
for node_dim in [16]:
node_features = np.load(f"/content/deap_graph_valence{node_dim}_1.npy")
A, Xte, yA, yte = train_test_split(node_features, labels, test_size=0.2, shuffle=True, stratify=labels, random_state=0)
Xtr, Xtr_valid, ytr, ytr_valid = train_test_split(A, yA, test_size=0.2, shuffle=True, stratify=yA, random_state=0)
Xtr = torch.tensor(Xtr).float().to(device)
Xtr_valid = torch.tensor(Xtr_valid).float().to(device)
Xte = torch.tensor(Xte).float().to(device)
ytr = torch.tensor(ytr).to(device)
#ytr_valid = torch.tensor(ytr_valid).to(device)
#yte = torch.tensor(yte).to(device)
for conv_fn in [GCNConv, SAGEConv, ResGatedGraphConv]:
for n_layers in range(1, 4):
for conv_dim in [32, 64, 128,256]:
trial_name = f"node_dim_{node_dim}-conv_fn_{conv_fn.__name__}-conv_layers_{n_layers}-conv_dim_{conv_dim}"
print(f"#: {trial_name}")
model = EEGraph(embedding_dim=Xtr.shape[-1],
first_conv=conv_dim,
n_layers=n_layers,
conv_layer=conv_fn).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
for epoch in range(n_epochs):
model.train()
indices = torch.randperm(len(Xtr))
for j, batch in enumerate(indices.view(-1, 63)):
optimizer.zero_grad()
batch_input = Xtr[batch]
outputs = model(batch_input, connectivity)
loss = criterion(outputs, ytr[batch])
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
outputs = model(Xtr_valid, connectivity)
output_classes = torch.argmax(outputs, dim=-1).cpu().numpy()
f1 = f1_score(ytr_valid, output_classes, average="macro")
if f1 > best_f1_score:
best_trial_name = trial_name
best_f1_score = f1
print("-"*100)
print(f"Best model so far: {best_trial_name}")
print(f"Best F1 Score: %{100*best_f1_score:.2f}")
test_outputs = model(Xte, connectivity)
test_output_classes = torch.argmax(test_outputs, dim=-1).cpu().numpy()
print(classification_report(yte, test_output_classes, target_names=["Negative", "Positive"]))
print("-"*100)
print()

How to create a DataSet of 1000 graphs in python

I need to create a dataset of 1000 graphs. I used the following code:
data_list = []
ngraphs = 1000
for i in range(ngraphs):
num_nodes = randint(10,500)
num_edges = randint(10,num_nodes*(num_nodes - 1))
f1 = np.random.randint(10, size=(num_nodes))
f2 = np.random.randint(10,20, size=(num_nodes))
f3 = np.random.randint(20,30, size=(num_nodes))
f_final = np.stack((f1,f2,f3), axis=1)
capital = 2*f1 + f2 - f3
f1_t = torch.from_numpy(f1)
f2_t = torch.from_numpy(f2)
f3_t = torch.from_numpy(f3)
capital_t = torch.from_numpy(capital)
capital_t = capital_t.type(torch.LongTensor)
x = torch.from_numpy(f_final)
x = x.type(torch.LongTensor)
edge_index = torch.randint(low=0, high=num_nodes, size=(num_edges,2), dtype=torch.long)
edge_attr = torch.randint(low=0, high=50, size=(num_edges,1), dtype=torch.long)
data = Data(x = x, edge_index = edge_index.t().contiguous(), y = capital_t, edge_attr=edge_attr )
data_list.append(data)
This works. But when I run my training function as follows:
for epoch in range(1, 500):
loss = train()
print(f'Loss: {loss:.4f}')
I keep getting the following error:
RuntimeError Traceback (most recent call
last) in ()
1 for epoch in range(1, 500):
----> 2 loss = train()
3 print(f'Loss: {loss:.4f}')
5 frames /usr/local/lib/python3.7/dist-packages/torch/nn/functional.py
in linear(input, weight, bias) 1845 if
has_torch_function_variadic(input, weight): 1846 return
handle_torch_function(linear, (input, weight), input, weight,
bias=bias)
-> 1847 return torch._C._nn.linear(input, weight, bias) 1848 1849
RuntimeError: expected scalar type Float but found Long
Can someone help me troubleshoot this. Or make a 1000 graph dataset that doesn't throw this error.
Change your x and y tensor into FloatTensor, since Linear layer in python only accept FloatTensor inputs

How to plot a vertical line on a bar plot in Bokeh?

Based on the first example of the user-guide of Bokeh,
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.models import Span
output_file("bars.html")
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
counts = [5, 3, 4, 2, 4, 6]
p = figure(x_range=fruits, plot_height=250, title="Fruit Counts",
toolbar_location=None, tools="")
p.vbar(x=fruits, top=counts, width=0.9)
# these two lines
vline = Span(location='Apples', dimension='height', line_color='blue', line_width=4)
p.renderers.extend([vline])
p.xgrid.grid_line_color = None
p.y_range.start = 0
show(p)
I am trying to add a vertical line to a bar plot whose x-range are categories. However, this does not seem to be possible, as this raises an error "ValueError: expected a value of type Real, got Apples of type str".
location='Apples' does not work as intended as it expected a number.
One solution is to convert the categorical value to the corresponding numeric value on the plot:
index = p.x_range.factors.index("Apples")
delta = (p.x_range.end - p.x_range.start)/p.x_range.factors.length;
location = delta/2 + index;
If the plot is dynamic (e.g. values are not known when the plot is built), then use an auxiliary JS function to do the conversion:
function _value_to_location(x_range, value) {
var index = x_range.factors.findIndex(x => x == value)
var delta = (x_range.end - x_range.start)/x_range.factors.length;
return delta/2 + index;
};
...
vline.location = _value_to_location(figure.x_range, "Apples");

How to use pykalman filter_update for online regression

I want to use Kalman regression recursively on an incoming stream of price data using kf.filter_update() but I can't make it work. Here's the example code framing the problem:
The dataset (i.e. the stream):
DateTime CAT DOG
2015-01-02 09:01:00, 1471.24, 9868.76
2015-01-02 09:02:00, 1471.75, 9877.75
2015-01-02 09:03:00, 1471.81, 9867.70
2015-01-02 09:04:00, 1471.59, 9849.03
2015-01-02 09:05:00, 1471.45, 9840.15
2015-01-02 09:06:00, 1471.16, 9852.71
2015-01-02 09:07:00, 1471.30, 9860.24
2015-01-02 09:08:00, 1471.39, 9862.94
The data is read into a Pandas dataframe and the following code simulates the stream by iterating over the df:
df = pd.read_csv('data.txt')
df.dropna(inplace=True)
history = {}
history["spread"] = []
history["state_means"] = []
history["state_covs"] = []
for idx, row in df.iterrows():
if idx == 0: # Initialize the Kalman filter
delta = 1e-9
trans_cov = delta / (1 - delta) * np.eye(2)
obs_mat = np.vstack([df.iloc[0].CAT, np.ones(df.iloc[0].CAT.shape)]).T[:, np.newaxis]
kf = KalmanFilter(n_dim_obs=1, n_dim_state=2,
initial_state_mean=np.zeros(2),
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat,
observation_covariance=1.0,
transition_covariance=trans_cov)
state_means, state_covs = kf.filter(np.asarray(df.iloc[0].DOG))
history["state_means"], history["state_covs"] = state_means, state_covs
slope=state_means[:, 0]
print "SLOPE", slope
else:
state_means, state_covs = kf.filter_update(history["state_means"][-1], history["state_covs"][-1], observation = np.asarray(df.iloc[idx].DOG))
history["state_means"].append(state_means)
history["state_covs"].append(state_covs)
slope=state_means[:, 0]
print "SLOPE", slope
The Kalman filter initializes properly and I get the first regression coefficient, but the subsequent updates throws an exception:
Traceback (most recent call last):
SLOPE [ 6.70319125]
File "C:/Users/.../KalmanUpdate_example.py", line 50, in <module>
KalmanOnline(df)
File "C:/Users/.../KalmanUpdate_example.py", line 43, in KalmanOnline
state_means, state_covs = kf.filter_update(history["state_means"][-1], history["state_covs"][-1], observation = np.asarray(df.iloc[idx].DOG))
File "C:\Python27\Lib\site-packages\pykalman\standard.py", line 1253, in filter_update
2, "observation_matrix"
File "C:\Python27\Lib\site-packages\pykalman\standard.py", line 38, in _arg_or_default
+ ' You must specify it manually.') % (name,)
ValueError: observation_matrix is not constant for all time. You must specify it manually.
Process finished with exit code 1
It seems intuitively clear that the observation matrix is required (it's provided in the initial step, but not in the updating steps), but I cannot figure out how to set it up properly. Any feedback would be highly appreciated.
Pykalman allows you to declare the observation matrix in two ways:
[n_timesteps, n_dim_obs, n_dim_obs] - once for the whole estimation
[n_dim_obs, n_dim_obs] - separately for each estimation step
In your code you used the first option (that's why "observation_matrix is not constant for all time"). But then you used filter_update in the loop and Pykalman could not understand what to use as the observation matrix in each iteration.
I would declare the observation matrix as a 2-element array:
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('data.txt')
df.dropna(inplace=True)
n = df.shape[0]
n_dim_state = 2;
history_state_means = np.zeros((n, n_dim_state))
history_state_covs = np.zeros((n, n_dim_state, n_dim_state))
for idx, row in df.iterrows():
if idx == 0: # Initialize the Kalman filter
delta = 1e-9
trans_cov = delta / (1 - delta) * np.eye(2)
obs_mat = [df.iloc[0].CAT, 1]
kf = KalmanFilter(n_dim_obs=1, n_dim_state=2,
initial_state_mean=np.zeros(2),
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat,
observation_covariance=1.0,
transition_covariance=trans_cov)
history_state_means[0], history_state_covs[0] = kf.filter(np.asarray(df.iloc[0].DOG))
slope=history_state_means[0, 0]
print "SLOPE", slope
else:
obs_mat = np.asarray([[df.iloc[idx].CAT, 1]])
history_state_means[idx], history_state_covs[idx] = kf.filter_update(history_state_means[idx-1],
history_state_covs[idx-1],
observation = df.iloc[idx].DOG,
observation_matrix=obs_mat)
slope=history_state_means[idx, 0]
print "SLOPE", slope
plt.figure(1)
plt.plot(history_state_means[:, 0], label="Slope")
plt.grid()
plt.show()
It results in the following output:
SLOPE 6.70322464199
SLOPE 6.70512037269
SLOPE 6.70337808649
SLOPE 6.69956406785
SLOPE 6.6961767953
SLOPE 6.69558438828
SLOPE 6.69581682668
SLOPE 6.69617670459
The Pykalman is not really good documented and there are mistakes on the official page. That's why I recomend to test the result using the offline estimation in one step. In this case the observation matrix has to be declared as you did it in your code.
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('data.txt')
df.dropna(inplace=True)
delta = 1e-9
trans_cov = delta / (1 - delta) * np.eye(2)
obs_mat = np.vstack([df.iloc[:].CAT, np.ones(df.iloc[:].CAT.shape)]).T[:, np.newaxis]
kf = KalmanFilter(n_dim_obs=1, n_dim_state=2,
initial_state_mean=np.zeros(2),
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat,
observation_covariance=1.0,
transition_covariance=trans_cov)
state_means, state_covs = kf.filter(df.iloc[:].DOG)
print "SLOPE", state_means[:, 0]
plt.figure(1)
plt.plot(state_means[:, 0], label="Slope")
plt.grid()
plt.show()
The result is the same.

How to plot a Histogram in jython using ptolemy?

Does anyone know how to plot an array of float numbers, 30 elements, as a histogram with ptolemy in Jython?
thank you
from javax.swing import JButton, JFrame, JPanel, JLabel, JMenuBar
from java.awt import GridBagLayout,GridBagConstraints
from java.awt import BorderLayout as BorderLayout
from javax.swing import WindowConstants
from ptolemy import *
from ptolemy.plot import Plot as Plot
from RainfallAnalysis import RainfallAnalysis
from jarray import array;
class Histogram(Plot):
dataset = 0;
theJFrame = JFrame();
def __init__(self):
self.theJFrame.setSize(400, 350); #outer box
self.setSize(350, 300); #graph window
self.setButtons(True); #buttons to print, edit, etc.
self.setMarksStyle("none"); #do not show marks at points
##
# Draw a histogram.
# It is assumed that all bins are of equal size.
# #param name The name to give this histogram in the key
# #param xMin minimum of x-range covered by histogram
# #param xMax maximum of x-range covered by histogram
# #param y array of bin heights; length of array is used to give number of points
def drawHistogram(self,name, xMin, xMax,y):
binWidth = (xMax - xMin)/y.__len__();
self.setBars(binWidth,0.0);
self.setConnected(False); # do not join bars with a line
first = True;
self.setYLabel("Rain Measurement");
self.setXLabel("days");
for i in range(y.__len__()): #loop to add bars to plot
x = i
self.addPoint(self.dataset, x, y[i], not first);
first = False;
self.addLegend(self.dataset, name);
self.dataset = self.dataset+1;
def showIt(self):
gridbag = GridBagLayout();
c = GridBagConstraints();
self.theJFrame.getContentPane().setLayout(gridbag);
c.gridx = 0;
c.gridy = 0;
c.gridwidth = 1;
gridbag.setConstraints(self, c);
self.theJFrame.getContentPane().add(self);
self.theJFrame.setVisible(True);
self.theJFrame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
if __name__ == '__main__':
h = Histogram();
rf = RainfallAnalysis();
min = rf.getMin();
max = rf.getMax();
data = rf.getData();
h.drawHistogram("rainfall",min,max,data);
h.showIt();
And data is simply and array of double numbers.

Resources