Matplotlib: Cursor snap to plotted data with datetime axis - datetime

I have a plot of 3 data sets that have datetime objetcs on the x axis.
I want to have a cursor that snaps to the data and shows the precise x and y value.
I already have a "snap to cursor", but that only works for scalar x axes.
Can anyone help me to modify the snap to cursor so that it works for datetime x axes as well?
Here are my data plots:
import numpy as np
import matplotlib.pyplot as plot
import matplotlib.ticker as mticker
import matplotlib.dates as dates
import datetime
import Helpers
fig = plot.figure(1)
DAU = ( 2, 20, 25, 60, 190, 210, 18, 196, 212)
WAU = ( 50, 160, 412, 403, 308, 379, 345, 299, 258)
MAU = (760, 620, 487, 751, 612, 601, 546, 409, 457)
firstDay = datetime.datetime(2012,1,15)
#create an array with len(DAU) entries from given starting day
dayArray = [firstDay + datetime.timedelta(days = i) for i in xrange(len(DAU))]
line1 = plot.plot(dayArray, DAU, 'o-', color = '#336699')
line2 = plot.plot(dayArray, WAU, 'o-', color = '#993333')
line3 = plot.plot(dayArray, MAU, 'o-', color = '#89a54e')
ax = plot.subplot(111)
dateLocator = mticker.MultipleLocator(2)
dateFormatter = dates.DateFormatter('%d.%m.%Y')
ax.xaxis.set_major_locator(dateLocator)
ax.xaxis.set_major_formatter(dateFormatter)
fig.autofmt_xdate(rotation = 90, ha = 'center')
yMax = max(np.max(DAU), np.max(WAU), np.max(MAU))
yLimit = 100 - (yMax % 100) + yMax
plot.yticks(np.arange(0, yLimit + 1, 100))
plot.title('Active users', weight = 'bold')
plot.grid(True, axis = 'both')
plot.subplots_adjust(bottom = 0.2)
plot.subplots_adjust(right = 0.82)
legend = plot.legend((line1[0], line2[0], line3[0]),
('DAU',
'WAU',
'MAU'),
'upper left',
bbox_to_anchor = [1, 1],
shadow = True)
frame = legend.get_frame()
frame.set_facecolor('0.80')
for t in legend.get_texts():
t.set_fontsize('small')
#THIS DOES NOT WORK
cursor = Helpers.SnaptoCursor(ax, dayArray, DAU, 'euro daily')
plot.connect('motion_notify_event', cursor.mouse_move)
plot.show()
And this is my module "Helper" that contains the "SnaptoCursor" class:
(I got the basic SnaptoCursor class from somewhere else and modified it a little bit)
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plot
def minsec(sec, unused):
"""
Returns a string of the input seconds formatted as mm'ss''.
"""
minutes = sec // 60
sec = sec - minutes * 60
return '{0:02d}\'{1:02d}\'\''.format(int(minutes), int(sec))
class SnaptoCursor():
"""
A cursor with crosshair snaps to the nearest x point.
For simplicity, I'm assuming x is sorted.
"""
def __init__(self, ax, x, y, formatting, z = None):
"""
ax: plot axis
x: plot spacing
y: plot data
formatting: string flag for desired formatting
z: optional second plot data
"""
self.ax = ax
self.lx = ax.axhline(color = 'k') #the horiz line
self.ly = ax.axvline(color = 'k') #the vert line
self.x = x
self.y = y
self.z = z
# text location in axes coords
self.txt = ax.text(0.6, 0.9, '', transform = ax.transAxes)
self.formatting = formatting
def format(self, x, y):
if self.formatting == 'minsec':
return 'x={0:d}, y='.format(x) + minsec(y, 0)
elif self.formatting == 'daily euro':
return u'day {0:d}: {1:.2f}€'.format(x, y)
def mouse_move(self, event):
if not event.inaxes: return
mouseX, mouseY = event.xdata, event.ydata
#searchsorted: returns an index or indices that suggest where x should be inserted
#so that the order of the list self.x would be preserved
indx = np.searchsorted(self.x, [mouseX])[0]
mouseX = self.x[indx]
#if z wasn't defined
if self.z == None:
mouseY = self.y[indx]
#if z was defined: compare the distance between mouse and the two plots y and z
#and use the nearest one
elif abs(mouseY - self.y[indx]) < abs(mouseY - self.z[indx]):
mouseY = self.y[indx]
else:
mouseY = self.z[indx]
#update the line positions
self.lx.set_ydata(mouseY)
self.ly.set_xdata(mouseX)
self.txt.set_text(self.format(mouseX, mouseY))
plot.draw()
Of course this does not work since I am calling the SnaptoCursor with the datetime array "dayArray", which is supposed to be compared to the mouse coordinates later on. And these data types are not comparable.

I got it!!!
The problems where these two lines in the init method of the SnaptoCursor class:
self.lx = ax.axhline(color = 'k') #the horiz line
self.ly = ax.axvline(color = 'k') #the vert line
They were somehow messing up the datetime x axis (that has ordinals up to 730,000 e.g.), so you just have to initialize the lines' coordinates:
self.lx = ax.axhline(y = min(y), color = 'k') #the horiz line
self.ly = ax.axvline(x = min(x), color = 'k') #the vert line
Then it works just fine!
I'll be posting my complete SnaptoCursor class now that I have modified so it accepts individual formatting strings, and it can take up to 3 input data plots - that get snapped to according to your mouse position.
def percent(x, unused):
"""
Returns a string of the float number x formatted as %.
"""
return '{0:1.2f}%'.format(x * 100)
def minsec(sec, unused):
"""
Returns a string of the input seconds formatted as mm'ss''.
"""
minutes = sec // 60
sec = sec - minutes * 60
return '{0:02d}\'{1:02d}\'\''.format(int(minutes), int(sec))
class SnaptoCursor():
"""
A cursor with crosshair snaps to the nearest x point.
For simplicity, I'm assuming x is sorted.
"""
def __init__(self, ax, x, y, formatting, y2 = None, y3 = None):
"""
ax: plot axis
x: plot spacing
y: plot data
formatting: string flag for desired formatting
y2: optional second plot data
y3: optional third plot data
"""
self.ax = ax
self.lx = ax.axhline(y = min(y), color = 'k') #the horiz line
self.ly = ax.axvline(x = min(x), color = 'k') #the vert line
self.x = x
self.y = y
self.y2 = y2
self.y3 = y3
# text location in axes coords
self.txt = ax.text(0.6, 0.9, '', transform = ax.transAxes)
self.formatting = formatting
def format(self, x, y):
if self.formatting == 'minsec':
return 'x={0:d}, y='.format(x) + minsec(y, 0)
if self.formatting == 'decimal':
return 'x={0:d}, y={1:d}'.format(x, int(y))
elif self.formatting == 'date decimal':
return 'x={0:%d.%m.%Y}, y={1:d}'.format(x, int(y))
elif self.formatting == 'decimal percent':
return 'x={0:d}, y={1:d}%'.format(x, int(y * 100))
elif self.formatting == 'float':
return 'x={0:d}, y={1:.2f}'.format(x, y)
elif self.formatting == 'float percent':
return 'x={0:d}, y='.format(x) + percent(y, 0)
elif self.formatting == 'daily euro':
return u'day {0:d}: {1:.2f}€'.format(x, y)
def mouse_move(self, event):
if not event.inaxes:
return
mouseX, mouseY = event.xdata, event.ydata
if type(self.x[0]) == datetime.datetime:
mouseX = dates.num2date(int(mouseX)).replace(tzinfo = None)
#searchsorted: returns an index or indices that suggest where mouseX should be inserted
#so that the order of the list self.x would be preserved
indx = np.searchsorted(self.x, [mouseX])[0]
#if indx is out of bounds
if indx >= len(self.x):
indx = len(self.x) - 1
#if y2 wasn't defined
if self.y2 == None:
mouseY = self.y[indx]
#if y2 was defined AND y3 wasn't defined
elif self.y3 == None:
if abs(mouseY - self.y[indx]) < abs(mouseY - self.y2[indx]):
mouseY = self.y[indx]
else:
mouseY = self.y2[indx]
#if y2 AND y3 were defined
elif abs(mouseY - self.y2[indx]) < abs(mouseY - self.y[indx]):
if abs(mouseY - self.y2[indx]) < abs(mouseY - self.y3[indx]):
mouseY = self.y2[indx]
else:
mouseY = self.y3[indx]
#lastly, compare y with y3
elif abs(mouseY - self.y[indx]) < abs(mouseY - self.y3[indx]):
mouseY = self.y[indx]
else:
mouseY = self.y3[indx]
#update the line positions
self.lx.set_ydata(mouseY)
self.ly.set_xdata(mouseX)
self.txt.set_text(self.format(mouseX, mouseY))
plot.draw()

Related

How to plot learning curve for my gan in python

I want to plot the learning curve (loss & accuracy plot) for my gan? I try different solution but still can't plot the learning curve for my gan. I tried to use fit and history function but I get error and can't plot loss & accuracy curve. If anyone can help?
I run my code in google colab. Since I only train the discriminator I tried to use fit & history function in the definition of discriminator & gan but still get error
from tensorflow.keras.layers import Dense, Conv2DTranspose, LeakyReLU, Reshape, BatchNormalization, Activation, Conv2D
from tensorflow.keras.models import Model, Sequential
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.layers import Conv2D, Flatten, Dropout
from tensorflow.keras import optimizers
import tensorflow as tf
def image_generator():
generator = Sequential()
generator.add(Dense(256*4*4, input_shape = (100,)))
#generator.add(BatchNormalization())
generator.add(LeakyReLU())
generator.add(Reshape((4,4,256)))
generator.add(Conv2DTranspose(128,kernel_size=3, strides=2, padding = "same"))
#generator.add(BatchNormalization())
generator.add(LeakyReLU(alpha=0.2))
generator.add(Conv2DTranspose(128,kernel_size=3, strides=2, padding = "same"))
#generator.add(BatchNormalization())
generator.add(LeakyReLU(alpha=0.2))
generator.add(Conv2DTranspose(128,kernel_size=3, strides=2, padding = "same"))
#generator.add(BatchNormalization())
generator.add(LeakyReLU(alpha=0.2))
generator.add(Conv2D(3,kernel_size=3, padding = "same", activation='tanh'))
return(generator)
model_generator = image_generator()
model_generator.summary()
# Define input data
def generate_data_entry(n_samples):
X = np.random.randn(100 * n_samples)
X = X.reshape(n_samples, 100)
return X
def create_data_fake(model_generator, n_samples):
input = generate_data_entry(n_samples)
X = model_generator.predict(input)
y = np.zeros((n_samples, 1))
return X,y
number_samples = 4
X,_ = create_data_fake(model_generator, number_samples)
# Visualize results
for i in range(number_samples):
plt.subplot(2, 2, 1 + i)
plt.axis('off')
plt.imshow(X[i])
def images_discriminator():
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=3, padding = "same", input_shape = (32,32,3)))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Conv2D(128, kernel_size=3,strides=(2,2), padding = "same"))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Conv2D(128, kernel_size=3,strides=(2,2), padding = "same"))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Conv2D(256, kernel_size=3, strides=(2,2), padding = "same"))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Flatten())
discriminator.add(Dropout(0.4))
discriminator.add(Dense(1, activation='sigmoid'))
opt = tf.keras.optimizers.Adam(learning_rate=0.0002 ,beta_1=0.5)
discriminator.compile(loss='binary_crossentropy', optimizer= opt , metrics = ['accuracy'])
return(discriminator)
model_discriminator = images_discriminator()
model_discriminator.summary()
from tensorflow.keras.datasets import cifar10
import random
import tensorflow as tf
import numpy as np
from tensorflow.keras import backend
import pandas as pd
import matplotlib.pyplot as plt
import keras
%matplotlib inline
(Xtrain, Ytrain), (testx, testy) = cifar10.load_data()
def load_images():
(Xtrain, Ytrain), (testx, testy) = cifar10.load_data()
# we stayed with the dogs
indice = np.where(Ytrain == 0)
indice = indice[0]
Xtrain = Xtrain[indice, :,:,:]
# Normalize the data
X = Xtrain.astype('float32')
X = (X - 127.5) / 127.5
return X
print(load_images().shape)
def load_data_reales(dataset, n_samples):
ix = np.random.randint(0, dataset.shape[0], n_samples)
X = dataset[ix]
y = np.ones((n_samples, 1))
return X,y
def load_data_fake(n_samples):
X = np.random.rand(32 * 32 * 3 * n_samples)
X = -1 + X * 2
X = X.reshape((n_samples, 32,32,3))
y = np.zeros((n_samples, 1))
return X,y
def train_discriminator(model, dataset, n_iterations=20, batch = 128):
medium_batch = int(batch/2)
for i in range(n_iterations):
X_real, y_real = load_data_reales(dataset, medium_batch)
los_real, acc_real = model.train_on_batch(X_real, y_real)
X_fake, y_fake = load_data_fake(medium_batch)
los_fake, acc_fake = model.train_on_batch(X_fake, y_fake)
print(str(i+1) + ' Real:' + str(acc_real*100) + ', Fake:' + str(acc_fake*100))
dataset = load_images()
train_discriminator(model_discriminator, dataset)
def my_loss_function(x_real,x_fake):
la=tf.keras.losses.BinaryCrossentropy()
my_loss_function = la(x_real, x_fake)
return my_loss_function
def create_gan(discriminator, generator):
discriminator.trainable=False
gan = Sequential()
gan.add(generator)
gan.add(discriminator)
opt = tf.keras.optimizers.Adam(learning_rate=0.0002,beta_1=0.5)
return gan
gan = create_gan(model_discriminator,model_generator)
gan.summary()
def show_images_generated(data_fake, epoch):
now = datetime.now()
now = now.strftime("%Y%m%d_%H%M%S")
# we make the data go from 0 to 1
data_fake = (data_fake + 1) / 2.0
for i in range(10):
plt.imshow(data_fake[i])
plt.axis('off')
name = str(epoch) + '_generated_image_' + str(i) + '.png'
plt.savefig(name, bbox_inches='tight')
plt.close()
def evaluate_and_save(model_generator, epoch, medium_dataset):
# We save the model
now = datetime.now()
now = now.strftime("%Y%m%d_%H%M%S")
name = str(epoch) + '_' + str(now)+"_model_generator_" + '.h5'
model_generator.save(name)
# We generate new data
X_real,Y_real = load_data_reales(dataset, medium_dataset)
X_fake, Y_fake = create_data_fake(model_generator,medium_dataset)
# We evaluate the model
_, acc_real = model_discriminator.evaluate(X_real, Y_real)
_, acc_fake = model_discriminator.evaluate(X_fake, Y_fake)
print('Acc Real:' + str(acc_real*100) + '% Acc Fake:' + str(acc_fake*100)+'%')
def training(data, model_generator, model_discriminator, epochs, n_batch, beginning = 0):
dimension_batch = int(data.shape[0]/n_batch)
medium_dataset = int(n_batch/2)
# We iterate over the epochs
for epoch in range(beginning, beginning + epochs):
# We iterate over all batches
for batch in range(n_batch):
# We load all the real data
X_real,Y_real = load_data_reales(dataset, medium_dataset)
# We train the discriminator withEnrenamos discriminator with data reales
cost_discriminator_real, _ = model_discriminator.train_on_batch(X_real, Y_real)
X_fake, Y_fake = create_data_fake(model_generator,medium_dataset)
cost_discriminator_fake, _ = model_discriminator.train_on_batch(X_fake, Y_fake)
# We generate input images for the GAN
X_gan = generate_data_entry(medium_dataset)
Y_gan = np.ones((medium_dataset, 1))
# We train the GAN with fake data
cost_gan = gan.train_on_batch(X_gan, Y_gan)
# Every 10 epochs we show the results and cost
if (epoch+1) % 10 == 0:
evaluate_and_save(model_generator,epoch = epoch, medium_dataset= medium_dataset)
show_images_generated(X_fake, epoch = epoch)
training(dataset, model_generator, model_discriminator, epochs = 10, n_batch=128, beginning = 0)
X_fake, _ = create_data_fake(n_samples=49, model_generator=model_generator)
X_fake = (X_fake+1)/2
for i in range(49):
plt.subplot(7,7,i+1)
plt.axis('off')
plt.imshow(X_fake[i])
# We generate new data
X_real1,Y_real1 = load_data_reales(dataset, int(15))
X_fake1, Y_fake1 = create_data_fake(model_generator,int(15))
# We evaluate the model
_, acc_real = model_discriminator.evaluate(X_real1, Y_real1)
_, acc_fake = model_discriminator.evaluate(X_fake1, Y_fake1)
print('Acc Real:' + str(acc_real*100) + '% Acc Fake:' + str(acc_fake*100)+'%')

deconvolution (convTranspose2D) intialization with bilinear interpolation in pytorch?

I am running PyTorch implementation of this article (https://arxiv.org/pdf/1604.03650.pdf).
In the article, it says to initialize the deconv layers with bilinear interpolation which is not in the code. it is told that if in deconvolution, we are scaling the data by factor S, the initial weights are defined as:
enter image description here
Does anyone know how can I implement it?
This is the neural network model designed based on the article:
In another word, I don't know how to initialization the deconvolution layers (for example deconv1 layer in this code).
import torch
import torch.nn as nn
import numpy as np
import torchvision
import torch.nn.functional as F
cfg = [64, 128, 256, 512, 512]
class Deep3d(nn.Module):
def __init__(self, in_channels=3, out_channels=3, device=torch.device('cpu')):
super(Deep3d, self).__init__()
self.device = device
vgg16 = torchvision.models.vgg16_bn(pretrained=True)
modules = []
layer = []
for l in vgg16.features:
if isinstance(l, nn.MaxPool2d):
layer.append(l)
modules.append(layer)
layer = []
else:
layer.append(l)
scale = 1
deconv = []
layer = []
for m in range(len(modules)):
layer.append(nn.Conv2d(cfg[m], cfg[m], kernel_size=3, stride=1, padding=True))
layer.append(nn.ReLU(inplace=True))
layer.append(nn.Conv2d(cfg[m], cfg[m], kernel_size=3, stride=1, padding=True))
layer.append(nn.ReLU(inplace=True))
if(m==0):
layer.append(nn.ConvTranspose2d(cfg[m], 65, kernel_size=1, stride=1, padding=(0,0)))
else:
scale *=2
layer.append(nn.ConvTranspose2d(cfg[m], 65, kernel_size=scale*2, stride=scale, padding=(scale//2, scale//2)))
deconv.append(layer) # add blocks of layers to deconv part of the network
layer = []
self.module_1 = nn.Sequential(*modules[0])
self.module_2 = nn.Sequential(*modules[1])
self.module_3 = nn.Sequential(*modules[2])
self.module_4 = nn.Sequential(*modules[3])
self.module_5 = nn.Sequential(*modules[4])
self.deconv_1 = nn.Sequential(*deconv[0])
self.deconv_2 = nn.Sequential(*deconv[1])
self.deconv_3 = nn.Sequential(*deconv[2])
self.deconv_4 = nn.Sequential(*deconv[3])
self.deconv_5 = nn.Sequential(*deconv[4])
self.linear_module = nn.Sequential(*[nn.Linear(15360,4096), # hyperparam choice
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096,1950)]) # 1950=65(disparity range)*10*3(10*3 is feature map size)
self.deconv_6 = nn.Sequential(*[nn.ConvTranspose2d(65,65,kernel_size=scale*2,stride=scale,padding=(scale//2,scale//2))])
self.upconv_final = nn.Sequential(*[nn.ConvTranspose2d(65,65,kernel_size=(4,4),stride=2,padding=(1,1)),
nn.ReLU(inplace=True),
nn.Conv2d(65,65,kernel_size=(3,3),stride=1,padding=(1,1)),
nn.Softmax(dim=1)])
for block in [self.deconv_1,self.deconv_2,self.deconv_3,self.deconv_4,self.deconv_5,self.deconv_6,self.linear_module,self.upconv_final]:
for m in block:
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, orig_x, x):
x_copy = orig_x
pred = []
out_1 = self.module_1(x)
out_2 = self.module_2(out_1)
out_3 = self.module_3(out_2)
out_4 = self.module_4(out_3)
out_5 = self.module_5(out_4)
# print(out_5.shape)
out_5_flatten = out_5.view(x_copy.shape[0],-1)
out_6 = self.linear_module(out_5_flatten)
p1 = self.deconv_1(out_1)
p2 = self.deconv_2(out_2)
p3 = self.deconv_3(out_3)
p4 = self.deconv_4(out_4)
p5 = self.deconv_5(out_5)
# print(p5.shape)
p6 = self.deconv_6(out_6.view(x_copy.shape[0],65,3,10))
pred.append(p1)
pred.append(p2)
pred.append(p3)
pred.append(p4)
pred.append(p5)
pred.append(p6)
out = torch.zeros(pred[0].shape).to(self.device)
for p in pred:
out = torch.add(out, p)
out = self.upconv_final(out) # to be elt wise multiplied with shifted left views
out = F.interpolate(out,scale_factor=4,mode='bilinear') # upscale to match left input size
new_right_image = torch.zeros(x_copy.size()).to(self.device)
stacked_shifted_view = None
stacked_out = None
for depth_map_idx in range(-33,32):
shifted_input_view = torch.zeros(x_copy.size()).to(self.device)
if depth_map_idx<0:
shifted_input_view[:,:,:,:depth_map_idx] = x_copy[:,:,:,-depth_map_idx:]
elif depth_map_idx==0:
shifted_input_view = x_copy
else:
shifted_input_view[:,:,:,depth_map_idx:] = x_copy[:,:,:,:-depth_map_idx]
if stacked_shifted_view is None:
stacked_shifted_view = shifted_input_view.unsqueeze(1)
else:
stacked_shifted_view = torch.cat((stacked_shifted_view,shifted_input_view.unsqueeze(1)),dim=1)
if stacked_out is None:
stacked_out = out[:,depth_map_idx+33:depth_map_idx+34,:,:].unsqueeze(1)
else:
stacked_out = torch.cat((stacked_out,out[:,depth_map_idx+33:depth_map_idx+34,:,:].unsqueeze(1)),dim=1)
softmaxed_stacked_shifted_view = stacked_shifted_view
mult_soft_shift_out = torch.mul(stacked_out,softmaxed_stacked_shifted_view)
final_rt_image = torch.sum(mult_soft_shift_out,dim=1)
return final_rt_image
# if(__name__=='__main__'):
# vgg16 = torchvision.models.vgg16(pretrained=True)
# model = Deep3d().to(torch.device('cpu'))
# out = model(torch.randn(10,3,384,1280),torch.randn(10,3,96,320))

How to embed plot inside wx.SplitterWindow (right panel)?

I'm trying to embed plot inside right panel of Splitter window, how to add plot inside splitter window. please find here the link for the dataset.
https://www.dropbox.com/s/ncy6dlpm79p578s/Dataset.zip?dl=0.
The file contains rows and columns of wavelength and reflectance.
import wx
from pylab import *
import asciitable
import matplotlib.pyplot as plt
import os
from wxmplot import ImageMatrixFrame
class RandomObj(object):
def __init__(self, name):
self.name = name
class SLI(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, size=(820, 450))
splitter = wx.SplitterWindow(self, style = wx.SP_BORDER)
leftPanel = wx.Panel(splitter, size=(400,100))
rightPanel = wx.Panel(splitter, size=(400,100))
####Tree Widget#####
self.tree = wx.TreeCtrl(leftPanel)
leftSizer = wx.BoxSizer(wx.VERTICAL)
leftSizer.Add(self.tree, 1, wx.EXPAND | wx.ALIGN_CENTER)
leftPanel.SetSizer(leftSizer)
rightSizer = wx.BoxSizer(wx.VERTICAL)
self.display = wx.StaticText(rightPanel, -1, '', (10, 10),
style=wx.ALIGN_CENTRE)
rightSizer.Add(self.display, -1, wx.EXPAND)
rightPanel.SetSizer(rightSizer)
splitter.SplitVertically(leftPanel, rightPanel)
##### Splitter ends ####
root = self.tree.AddRoot('Database')
self.tree.AppendItem(root, 'USGS')
files = []
self.dname = []
self.test = []
for dirname, dirnames, filenames in os.walk('.\USGS'):
for filename in filenames:
files.append(os.path.join(dirname, filename))
self.test.append(filename)
self.tree.AppendItem(self.tree.GetLastChild(root), filename)
self.dname = files[:]
self.tree.AppendItem(root,'ASTER')
for dirname, dirnames, filenames in os.walk('.\ASTER'):
for filename in filenames:
files.append(os.path.join(dirname, filename))
self.test.append(filename)
self.tree.AppendItem(self.tree.GetLastChild(root), filename)
self.dname = files[:]
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.ASTER, self.tree)
def ASTER(self, event):
self.item = event.GetItem()
value1 = self.tree.GetItemText(self.item)
value2 = 0
value3 = 1
self.item=None
for k in self.test:
if value1 == k:
value2 +=1
break
else:
value2 +=1
for i in self.dname:
if value3 == value2:
array =[]
f=open(i, 'r')
for j in xrange(27):
f.next()
for line in f:
array.append(line)
data = asciitable.read(array)
plot(data.col1, data.col2)
title(value1)
show()
break
else:
value3 +=1
app = wx.App(None)
frame = ImageMatrixFrame()
SLI().Show()
app.MainLoop()
how to insert plot window inside right panel of splitter.
I am not 100% sure I understand your code - there are some formatting and indentation problems for sure. I also am not familiar with asciitable. But, that said, a wxmplot.PlotPanel or ImagePanel can be embedded in a wxPython Frame that uses a Splitter. An example might look like the code below. I tried to make it short, but also tried to make it complete and using plain wxPython. For a more complete application, you'd probably want to put the reading of the datafiles into a separate class, etc. Anyway, this uses your Dataset folder, and should mostly work to show the concepts:
#!/usr/bin/env python
import os
import wx
from wxmplot import PlotPanel
# see https://gist.github.com/newville/e805a6454c4e4c0e010bf0b3cc796d52
from asciifile import read_ascii
LEFTSTYLE = wx.ALIGN_LEFT|wx.GROW|wx.ALL
def pack(window, sizer, expand=1.1):
"simple wxPython pack function"
tsize = window.GetSize()
msize = window.GetMinSize()
window.SetSizer(sizer)
sizer.Fit(window)
nsize = (int(1.1*max(msize[0], tsize[0])),
int(1.1*max(msize[1], tsize[1])))
window.SetSize(nsize)
class SpectraPlotterFrame(wx.Frame):
def __init__(self, data_folder):
wx.Frame.__init__(self, None, size=(800, 450))
self.SetTitle("Data File Plotter: {:s}".format(data_folder))
self.data_folder = data_folder
self.current_filename = None
splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
splitter.SetMinimumPaneSize(200)
# left side: ListBox of File Names
l_panel = wx.Panel(splitter)
l_sizer = wx.BoxSizer(wx.VERTICAL)
self.filelist = wx.ListBox(l_panel)
self.filelist.Bind(wx.EVT_LISTBOX, self.onFileChoice)
l_sizer.Add(self.filelist, 1, LEFTSTYLE, 5)
pack(l_panel, l_sizer)
# right side: Panel to choose plot array labels, make plot
r_panel = wx.Panel(splitter)
r_sizer = wx.GridBagSizer(3, 3)
self.xarr = wx.Choice(r_panel, choices=[], size=(175, -1))
self.yarr = wx.Choice(r_panel, choices=[], size=(175, -1))
xlabel = wx.StaticText(r_panel, label='X:', style=LEFTSTYLE)
ylabel = wx.StaticText(r_panel, label='Y:', style=LEFTSTYLE)
plot_btn = wx.Button(r_panel, label='Show Plot', size=(125, -1))
plot_btn.Bind(wx.EVT_BUTTON, self.onPlot)
self.plotpanel = PlotPanel(r_panel, size=(650, 450))
r_sizer.Add(xlabel, (0, 0), (1, 1), LEFTSTYLE, 2)
r_sizer.Add(self.xarr, (0, 1), (1, 1), LEFTSTYLE, 2)
r_sizer.Add(ylabel, (0, 2), (1, 1), LEFTSTYLE, 2)
r_sizer.Add(self.yarr, (0, 3), (1, 1), LEFTSTYLE, 2)
r_sizer.Add(plot_btn, (0, 4), (1, 1), LEFTSTYLE, 2)
r_sizer.Add(self.plotpanel, (1, 0), (1, 6), LEFTSTYLE, 2)
pack(r_panel, r_sizer)
splitter.SplitVertically(l_panel, r_panel, 1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(splitter, 1, LEFTSTYLE, 5)
pack(self, sizer)
wx.CallAfter(self.read_datafiles)
self.Show()
self.Raise()
def read_datafiles(self):
self.datasets = {}
dfolder = os.path.abspath(self.data_folder)
for fname in sorted(os.listdir(self.data_folder)):
try:
self.datasets[fname] = read_ascii(os.path.join(dfolder, fname))
except:
print("Could not read file {:s}".format(fname))
self.filelist.Append(fname)
def onFileChoice(self, event=None):
self.current_filename = fname = event.GetString()
for choice, default in ((self.xarr, 0), (self.yarr, 1)):
choice.Clear()
choice.AppendItems(self.datasets[fname].array_labels)
choice.SetSelection(default)
def onPlot(self, event=None):
x = self.xarr.GetSelection()
y = self.yarr.GetSelection()
xlab = self.xarr.GetStringSelection()
ylab = self.yarr.GetStringSelection()
if self.current_filename is not None:
dset = self.datasets[self.current_filename]
self.plotpanel.plot(dset.data[x], dset.data[y], xlabel=xlab,
ylabel=ylab, label=self.current_filename,
show_legend=True)
class SpectraPlotterApp(wx.App):
def __init__(self, data_folder='.', **kws):
self.data_folder = data_folder
wx.App.__init__(self, **kws)
def createApp(self):
frame = SpectraPlotterFrame(data_folder=self.data_folder)
self.SetTopWindow(frame)
def OnInit(self):
self.createApp()
return True
if __name__ == '__main__':
SpectraPlotterApp(data_folder='Dataset').MainLoop()

python 'str' object has no attribute 'config'

I tried to create a Gui with a grid like label, the label will randomly fill with number in random label with a click on the start button. I cannot get the code to recognize the random label and set text to it. The labels are create in a loop for the grid of '3 X 5'.
from tkinter import *
import random
lbl1 = {}
lbl2 = {}
lbl3 = {}
def fill_auto():
for i in range(1, 6):
rd_row = random.randrange(1, 6)
rd_col = random.randrange(1, 4)
rd_num = random.randrange(1, 16)
print(rd_row, rd_col, rd_num)
pos = str(rd_col) + str(rd_row)
box = 'lbl' + str(pos)
print(box)
box.config(text=rd_num)
root = Tk()
root.geometry('+0+0')
root.configure(bg='black')
for y in range(1, 6):
lbl1[str(y)] = Label(root, width=5, relief='solid')
lbl1[str(y)].grid(row=y, column=0)
lbl2[str(y)] = Label(root, width=5, relief='solid')
lbl2[str(y)].grid(row=y, column=1)
lbl3[str(y)] = Label(root, width=5, relief='solid')
lbl3[str(y)].grid(row=y, column=2)
btn = Button(root, text='start', command=fill_auto)
btn.grid(row=6, column=1)
root.mainloop()
If you want a grid of buttons, it would make sense to use a 2d list:
from tkinter import *
import random
# Create variables for these for the grid width/height
width = 3
height = 5
def fill_auto():
for i in range(1, 6):
rd_row = random.randrange(0, height)
rd_col = random.randrange(0, width)
rd_num = random.randrange(1, 16)
# Set the label text
matrix[rd_row][rd_col].config(text = str(rd_num))
root = Tk()
root.geometry('+0+0')
root.configure(bg='black')
# Helper function to create a label
def make_label(x, y):
l = Label(root, width=5, relief='solid')
l.grid(column=x, row=y)
return l;
# Using list comprehension to create 2d list
matrix = [[make_label(x,y) for x in range(width)] for y in range(height)]
btn = Button(root, text='start', command=fill_auto)
btn.grid(row=6, column=1)
root.mainloop()

"ValueError: year is out of range" when attempting to use matplotlib pyplot

I am attempting to get a matplotlib plotting function to be able to produce a graph with the x-axis set as a time axis. However, when I attempt to plot some values against UNIX times, I encounter the error ValueError: year is out of range. What is going wrong and how can it be addressed?
import os
import time
import matplotlib.dates
import matplotlib.pyplot
import shijian
def main():
data = [
[1484611200.0, 844.4333],
[1484524800.0, 783.3373],
[1484438400.0, 774.194 ],
[1484352000.0, 769.2299]
]
save_graph_matplotlib(
values = data,
line = True,
line_width = 0.5,
title_axis_x = "time",
title_axis_y = "value",
#time_axis_x = True
)
def save_graph_matplotlib(
values = None,
title = None,
title_axis_x = None,
title_axis_y = None,
filename = None,
directory = ".",
overwrite = True,
color = "black",
LaTeX = False,
markers = True,
marker_size = 1,
aspect = None,
line = False,
line_style = "-",
line_width = 0.2,
font_size = 20,
scientific_notation = False,
time_axis_x = False
):
# 1D or 2D data
if isinstance(values[0], list):
x = [element[0] for element in values]
y = [element[1] for element in values]
else:
x = range(0, len(values))
y = values
matplotlib.pyplot.ioff()
if LaTeX is True:
matplotlib.pyplot.rc("text", usetex = True)
matplotlib.pyplot.rc("font", family = "serif")
if filename is None:
if title is None:
filename = "graph.png"
else:
filename = shijian.propose_filename(
filename = title + ".png",
overwrite = overwrite
)
else:
filename = shijian.propose_filename(
filename = filename,
overwrite = overwrite
)
figure = matplotlib.pyplot.figure()
if title is not None:
figure.suptitle(
title,
fontsize = font_size
)
if markers is True:
matplotlib.pyplot.scatter(
x,
y,
s = marker_size,
c = color,
edgecolors = "none",
)
if line is True:
matplotlib.pyplot.plot(
x,
y,
line_style,
c = color,
linewidth = line_width
)
# Turn on or off axes scientific notation.
if scientific_notation is False:
matplotlib.pyplot.gca().get_xaxis().\
get_major_formatter().set_scientific(False)
matplotlib.pyplot.gca().get_yaxis().\
get_major_formatter().set_scientific(False)
# Set axes titles.
if title_axis_x is not None:
matplotlib.pyplot.xlabel(title_axis_x, fontsize = font_size)
if title_axis_y is not None:
matplotlib.pyplot.ylabel(title_axis_y, fontsize = font_size)
# Set axes font size.
matplotlib.pyplot.xticks(fontsize = font_size)
matplotlib.pyplot.yticks(fontsize = font_size)
# Set or do not set axis x as time.
if time_axis_x:
time_formatter = matplotlib.dates.DateFormatter("%Y-%m-%d")
matplotlib.pyplot.axes().xaxis_date()
matplotlib.pyplot.axes().xaxis.set_major_formatter(time_formatter)
matplotlib.pyplot.xticks(rotation = -90)
# Set the aspect ratio.
if aspect is None:
matplotlib.pyplot.axes().set_aspect(
1 / matplotlib.pyplot.axes().get_data_ratio()
)
else:
matplotlib.pyplot.axes().set_aspect(aspect)
if not os.path.exists(directory):
os.makedirs(directory)
matplotlib.pyplot.savefig(
directory + "/" + filename,
dpi = 700
)
matplotlib.pyplot.close()
if __name__ == "__main__":
main()
You need to convert your timestamp-like x data to a python datetime object, which can then be used in matplotlib and be understood by the matplotlib.dates.DateFormatter.
This can be done using the datetime.datetime.fromtimestamp() method.
import datetime
import matplotlib.dates
import matplotlib.pyplot as plt
data = [
[1484611200.0, 844.4333],
[1484524800.0, 783.3373],
[1484438400.0, 774.194 ],
[1484352000.0, 769.2299]
]
x = [datetime.datetime.fromtimestamp(element[0]) for element in data]
y = [element[1] for element in data]
plt.plot( x, y, ls="-", c= "b", linewidth = 2 )
plt.xlabel("Dates")
time_formatter = matplotlib.dates.DateFormatter("%Y-%m-%d")
plt.axes().xaxis.set_major_formatter(time_formatter)
plt.axes().xaxis_date() # this is not actually necessary
plt.show()
Whilst not directly addressing the text of the question, the error mentioned in the title can also occur when one attempts to plot data on an existing axis whose timeline units don't match those of the plot data (e.g. seconds vs datetime).

Resources