With this code I want to plot a heat map from eye tracking gaze data (x, y coordinates on screen) on top of a png. I got really close with other posts here in the forum. The only problem left are some dots/blurry patches on the output image (see link). Maybe somebody can help?
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
import pandas as pd
from PIL import Image
with Image.open("/Users/florianteichmann/Desktop/Gaze_Scratch_Paradigm/stimuli_current/trial_image/32space_drop.png") as img3:
w = img3.width
h = img3.height
def myplot(x, y, s, bins=1000):
heatmap, xedges, yedges = np.histogram2d(x, y, bins=bins)
heatmap = gaussian_filter(heatmap, sigma=s)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
return heatmap.T, extent
def transparent_cmap(cmap, N=255):
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0, 1, N+4)
return mycmap
# Generate some test data
df = pd.read_csv('file-path')
x = (df['X'])
y = (df['Y'])
mycmap = transparent_cmap(plt.cm.Greens)
img2 = plt.imread('file-path')
#sigmas = [0, 16, 32, 64]
s = 64
fig, ax = plt.subplots()
plt.axis([0, w, 0, h])
img, extent = myplot(x, y, s)
ax.imshow(img2, extent=[0, w, 0, h])
#ax.plot(img) #cmap=cm.jet)
ax.imshow(img, extent=[0, w, 0, h], origin='lower', cmap=mycmap) #cmap=cm.jet) origin='lower'
plt.show()
output image
Related
I am trying to calibrate the camera with 10-50mm focal lenght, all the images of the chess board are taken with cube size as 0.25cm. when i run the findchessboard function of cv2, it fails to detect the chessboard.
image
file = "filename"
img = cv2.imread(file)
# Color-segmentation to get binary mask
lwr = np.array([0, 0, 90])
upr = np.array([179, 61, 252])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
msk = cv2.inRange(hsv, lwr, upr)
plt.imshow(msk)
plt.show()
# Extract chess-board
krn = cv2.getStructuringElement(cv2.MORPH_RECT, (50, 30))
dlt = cv2.dilate(msk, krn, iterations=5)
res = 255 - cv2.bitwise_and(dlt, msk)
# Displaying chess-board features
res = np.uint8(res)
plt.imshow(res)
plt.show()
ret, corners = cv2.findChessboardCorners(gray, (9, 7),
flags=cv2.CALIB_CB_ADAPTIVE_THRESH +
cv2.CALIB_CB_FAST_CHECK +
cv2.CALIB_CB_NORMALIZE_IMAGE +
cv2.CALIB_CB_EXHAUSTIVE)
if ret:
print(corners)
fnl = cv2.drawChessboardCorners(img, (7, 7), corners, ret)
plt.imshow(fnl)
plt.show
else:
print("No Checkerboard Found")
I have created a fit via the Gamma_3P distribution with alpha, beta and gamma.
The Problem is, that the output y values(red) does not match the distribution(blue).
Thank you in advance for Help.
Code Following:
#test numpy plot
from reliability.Distributions import Gamma_Distribution
from reliability.Fitters import Fit_Gamma_3P
import matplotlib.pyplot as plt
x_min = 0
x_max = 10000
alpha_G = 1827.9715783463666
beta_G = 0.5175119001035541
gamma_G = 0.9999
Gamma_fit_created = Gamma_Distribution(alpha=alpha_G,beta=beta_G, gamma=gamma_G)
y_values = Gamma_fit_created.SF(xmin = x_min, xmax = x_max, label='fit function',color='green', show_plot = False)
#since the output array is 200 values within the limits, I adapted the x values
x_steps = (x_max - x_min)/len(y_values)
x_values = np.arange(x_min, x_max,x_steps)
x_values_plot = x_values[1:]
y_values_plot = y_values[1:]
plt.plot(x_values_plot, y_values_plot, label='fit function created sf 2',color='red')
Gamma_fit_created.SF(xmin = x_min, xmax = x_max, label='fit function correct',color='blue')
plt.xlim(0,10000)
plt.ylim(0,1)
#plt.plot(Gamma_fit_created.SF(xmin = 0, xmax = 10000, label='fit function created sf',color='blue'))
plt.legend()
plt.show()
Code
This is my ouput. The functions should be the same, but they are differenasdas
I am running PyTorch implementation of this article (https://arxiv.org/pdf/1604.03650.pdf).
In the article, it says to initialize the deconv layers with bilinear interpolation which is not in the code. it is told that if in deconvolution, we are scaling the data by factor S, the initial weights are defined as:
enter image description here
Does anyone know how can I implement it?
This is the neural network model designed based on the article:
In another word, I don't know how to initialization the deconvolution layers (for example deconv1 layer in this code).
import torch
import torch.nn as nn
import numpy as np
import torchvision
import torch.nn.functional as F
cfg = [64, 128, 256, 512, 512]
class Deep3d(nn.Module):
def __init__(self, in_channels=3, out_channels=3, device=torch.device('cpu')):
super(Deep3d, self).__init__()
self.device = device
vgg16 = torchvision.models.vgg16_bn(pretrained=True)
modules = []
layer = []
for l in vgg16.features:
if isinstance(l, nn.MaxPool2d):
layer.append(l)
modules.append(layer)
layer = []
else:
layer.append(l)
scale = 1
deconv = []
layer = []
for m in range(len(modules)):
layer.append(nn.Conv2d(cfg[m], cfg[m], kernel_size=3, stride=1, padding=True))
layer.append(nn.ReLU(inplace=True))
layer.append(nn.Conv2d(cfg[m], cfg[m], kernel_size=3, stride=1, padding=True))
layer.append(nn.ReLU(inplace=True))
if(m==0):
layer.append(nn.ConvTranspose2d(cfg[m], 65, kernel_size=1, stride=1, padding=(0,0)))
else:
scale *=2
layer.append(nn.ConvTranspose2d(cfg[m], 65, kernel_size=scale*2, stride=scale, padding=(scale//2, scale//2)))
deconv.append(layer) # add blocks of layers to deconv part of the network
layer = []
self.module_1 = nn.Sequential(*modules[0])
self.module_2 = nn.Sequential(*modules[1])
self.module_3 = nn.Sequential(*modules[2])
self.module_4 = nn.Sequential(*modules[3])
self.module_5 = nn.Sequential(*modules[4])
self.deconv_1 = nn.Sequential(*deconv[0])
self.deconv_2 = nn.Sequential(*deconv[1])
self.deconv_3 = nn.Sequential(*deconv[2])
self.deconv_4 = nn.Sequential(*deconv[3])
self.deconv_5 = nn.Sequential(*deconv[4])
self.linear_module = nn.Sequential(*[nn.Linear(15360,4096), # hyperparam choice
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096,1950)]) # 1950=65(disparity range)*10*3(10*3 is feature map size)
self.deconv_6 = nn.Sequential(*[nn.ConvTranspose2d(65,65,kernel_size=scale*2,stride=scale,padding=(scale//2,scale//2))])
self.upconv_final = nn.Sequential(*[nn.ConvTranspose2d(65,65,kernel_size=(4,4),stride=2,padding=(1,1)),
nn.ReLU(inplace=True),
nn.Conv2d(65,65,kernel_size=(3,3),stride=1,padding=(1,1)),
nn.Softmax(dim=1)])
for block in [self.deconv_1,self.deconv_2,self.deconv_3,self.deconv_4,self.deconv_5,self.deconv_6,self.linear_module,self.upconv_final]:
for m in block:
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, orig_x, x):
x_copy = orig_x
pred = []
out_1 = self.module_1(x)
out_2 = self.module_2(out_1)
out_3 = self.module_3(out_2)
out_4 = self.module_4(out_3)
out_5 = self.module_5(out_4)
# print(out_5.shape)
out_5_flatten = out_5.view(x_copy.shape[0],-1)
out_6 = self.linear_module(out_5_flatten)
p1 = self.deconv_1(out_1)
p2 = self.deconv_2(out_2)
p3 = self.deconv_3(out_3)
p4 = self.deconv_4(out_4)
p5 = self.deconv_5(out_5)
# print(p5.shape)
p6 = self.deconv_6(out_6.view(x_copy.shape[0],65,3,10))
pred.append(p1)
pred.append(p2)
pred.append(p3)
pred.append(p4)
pred.append(p5)
pred.append(p6)
out = torch.zeros(pred[0].shape).to(self.device)
for p in pred:
out = torch.add(out, p)
out = self.upconv_final(out) # to be elt wise multiplied with shifted left views
out = F.interpolate(out,scale_factor=4,mode='bilinear') # upscale to match left input size
new_right_image = torch.zeros(x_copy.size()).to(self.device)
stacked_shifted_view = None
stacked_out = None
for depth_map_idx in range(-33,32):
shifted_input_view = torch.zeros(x_copy.size()).to(self.device)
if depth_map_idx<0:
shifted_input_view[:,:,:,:depth_map_idx] = x_copy[:,:,:,-depth_map_idx:]
elif depth_map_idx==0:
shifted_input_view = x_copy
else:
shifted_input_view[:,:,:,depth_map_idx:] = x_copy[:,:,:,:-depth_map_idx]
if stacked_shifted_view is None:
stacked_shifted_view = shifted_input_view.unsqueeze(1)
else:
stacked_shifted_view = torch.cat((stacked_shifted_view,shifted_input_view.unsqueeze(1)),dim=1)
if stacked_out is None:
stacked_out = out[:,depth_map_idx+33:depth_map_idx+34,:,:].unsqueeze(1)
else:
stacked_out = torch.cat((stacked_out,out[:,depth_map_idx+33:depth_map_idx+34,:,:].unsqueeze(1)),dim=1)
softmaxed_stacked_shifted_view = stacked_shifted_view
mult_soft_shift_out = torch.mul(stacked_out,softmaxed_stacked_shifted_view)
final_rt_image = torch.sum(mult_soft_shift_out,dim=1)
return final_rt_image
# if(__name__=='__main__'):
# vgg16 = torchvision.models.vgg16(pretrained=True)
# model = Deep3d().to(torch.device('cpu'))
# out = model(torch.randn(10,3,384,1280),torch.randn(10,3,96,320))
It seems window redraws everything each time new rect is added, although setUpdatesEnabled is set to False, for win and plt. How to disable updates?
def f(n):
import pyqtgraph as pg
pg.setConfigOption('background', '#a0f0ff')
win = pg.GraphicsWindow()
win_size = 1000
win.setGeometry(500, 30, win_size, win_size)
plt = win.addPlot()
win.setUpdatesEnabled = False
plt.setUpdatesEnabled = False
y = range(n)
x = range(n)
plt.showGrid(x=True, y=True)
empty_pen = pg.mkPen((0, 0, 0, 0))
brush = pg.mkBrush((255, 255, 255))
for i1 in range(n):
for i0 in range(n):
print("i1, i0 =", i1, i0)
rect = pg.QtGui.QGraphicsRectItem(i0, i1, 0.5, 0.5)
rect.setPen(empty_pen)
rect.setBrush(brush)
plt.addItem(rect)
pg.QtGui.QApplication.exec_()
f(40)
Add
plt.disableAutoRange()
before drawing and
plt.autoRange()
after it. See Plotting large arrays in pyqtgraph
I have this python code:
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as axes3d
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot solid of revolution along x-axis
def sor_x(ll, ul):
u = np.linspace(ll, ul, 60)
v = np.linspace(0, 2 * np.pi, 60)
U, V = np.meshgrid(u, v)
X = U
Y = (U**2)*np.cos(V)
Z = (U**2)*np.sin(V)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.plot_surface(X, Y, Z, cmap=plt.cm.YlGnBu_r)
if __name__ == '__main__':
ll, ul = 0, 1
sor_x(ll, ul)
plt.show()
This plots the solid of revolution of function y = x**2 along x-axis. Now I have to change this to a 3D animation like this:
The code for this animation in mathematica is:
f[r_, ϕ_, z_] := {(2 + Tan[z])Cos[ϕ], (2 + Cos[z]) Sin[ϕ], z}
vase[α_] :=
ParametricPlot3D[f[r, ϕ, z], {z, 0, 2 Pi}, {ϕ, 0, α},
AspectRatio -> Automatic, PlotRange -> {{-3, 3}, {-3, 3}, {0, 6}}];
animation = Table[
vase[ϕ],
{ϕ, 0.1, 2π, π/12}];
Export["rotationskoerper_animation.gif", animation,
ConversionOptions -> {"AnimationDisplayTime" -> 0.1, "Loop" -> True},
ImageSize -> {1000, 1000}]