PyTorch RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed - torch

I’m trying to create a basic binary classifier in Pytorch that classifies whether my player plays on the right or the left side in the game Pong. The input is an 1x42x42 image and the label is my player's side (right = 1 or left = 2). The code:
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
net = Net(42 * 42, 100, 2)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer_net = torch.optim.Adam(net.parameters(), 0.001)
net.train()
while True:
state = get_game_img()
state = torch.from_numpy(state)
# right = 1, left = 2
current_side = get_player_side()
target = torch.LongTensor(current_side)
x = Variable(state.view(-1, 42 * 42))
y = Variable(target)
optimizer_net.zero_grad()
y_pred = net(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
The error I get:
File "train.py", line 109, in train
loss = criterion(y_pred, y)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/modules/module.py", line 206, in __call__
result = self.forward(*input, **kwargs)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/modules/loss.py", line 321, in forward
self.weight, self.size_average)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/functional.py", line 533, in cross_entropy
return nll_loss(log_softmax(input), target, weight, size_average)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/functional.py", line 501, in nll_loss
return f(input, target)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/_functions/thnn/auto.py", line 41, in forward
output, *self.additional_args)
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at /py/conda-bld/pytorch_1493676237139/work/torch/lib/THNN/generic/ClassNLLCriterion.c:57

For most of deeplearning library, target(or label) should start from 0.
It means that your target should be in the range of [0,n) with n-classes.

It looks like PyTorch expect to get zero-based labels (0/1 in your case) and you probably feed it with one-based labels (1/2)

I had the same error in my program and i just realized that the problem was in the number of output nodes in my neural network
In my program the number of output nodes of my model was not equal to the number of labels in dataset
the number of output was 1 and the number of target labels was 10. then i changed the number of output to 10, there was no error

Related

ValueError: shapes (2,1000) and (2,2,1000) not aligned: 1000 (dim 1) != 2 (dim 1)

I'm implementing a MLP to test a simple NN architecture, hoping to scale up to a bigger network with a larger dataset. My end goal is making a working phone recognizer for TIMIT data, as part of my internship.
To build the MLP, I used the suggestions of this video: https://www.youtube.com/watch?v=Z97XGNUUx9o.
And the proposal of my teacher to use the following inputs:
X = np.random.rand(5,1000)
y = X[4:5,:]
The error message is the following:
ValueError Traceback (most recent call last)
Cell In [63], line 7
5 build_model()
6 mlp = MLP(1000, [1000], 1000)
----> 7 mlp.train(inputs,targets, 50, 0.1)
8 output = mlp.forward_propagate(input)
Cell In [62], line 117, in MLP.train(self, inputs, targets, epochs, learning_rate)
115 output = self.forward_propagate(input)
116 error = target - output
--> 117 self.back_propagate(error)
118 self.gradient_descent(learning_rate=1)
119 sum_error += self._mse(target,output)
Cell In [62], line 96, in MLP.back_propagate(self, error)
94 current_activations = self.activations[i]
95 current_activations_reshaped = current_activations.reshape(current_activations.shape[0], -1)
---> 96 self.derivatives[i] = np.dot(current_activations, delta)
97 error = np.dot(delta, self.weights[i].T)
98 return error
File <__array_function__ internals>:180, in dot(*args, **kwargs)
ValueError: shapes (2,1000) and (2,2,1000) not aligned: 1000 (dim 1) != 2 (dim 1)
This is the relevant code:
class MLP(object):
def __init__(self, num_inputs=3, hidden_layers=[3,3], num_outputs=2):
self.num_inputs = num_inputs
self.hidden_layers = hidden_layers
self.num_outputs = num_outputs
layers = [num_inputs] + hidden_layers + [num_outputs]
weights = []
for i in range(len(layers) - 1):
w = np.random.rand(layers[i], layers[i + 1])
weights.append(w)
self.weights = weights
activations = []
for i in range(len(layers)):
a = np.zeros(layers[i])
activations.append(a)
self.activations = activations
derivatives = []
for i in range(len(layers) - 1):
d = np.zeros((layers[i], layers[i+1]))
derivatives.append(d)
self.derivatives = derivatives
def forward_propagate(self,inputs):
activations = inputs
self.activations[0] = inputs
for i in range(len(self.weights)):
net_inputs = np.dot(activations,self.weights)
activations = self._sigmoid(net_inputs)
self.activations[i+1] = activations
return activations
def back_propagate(self, error):
for i in reversed(range(len(self.derivatives))):
activations = self.activations[i+1]
delta = error * self._sigmoid_derivative(activations)
delta_reshaped = delta.reshape(delta.shape[0], -1).T
current_activations = self.activations[i]
current_activations_reshaped = current_activations.reshape(current_activations.shape[0], -1)
self.derivatives[i] = np.dot(current_activations, delta)
error = np.dot(delta, self.weights[i].T)
return error
def _sigmoid_derivative(self,x):
return x * (1.0 - x)
def _sigmoid(self,x):
y = 1.0 / (1+np.exp(-x))
return y
def gradient_descent(self, learning_rate):
for i in range(len(self.weights)):
weights = self.weights[i]
derivatives = self.derivatives[i]
weights += derivatives + learning_rate
def _mse(self,target,output):
return np.average((target-output)**2)
def train(self,inputs,targets,epochs,learning_rate):
for i in range(epochs):
sum_error = 0
for input,target in zip(inputs,targets):
output = self.forward_propagate(input)
error = target - output
self.back_propagate(error)
self.gradient_descent(learning_rate=1)
sum_error += self._mse(target,output)
print("Error: {} at epoch {}".format(sum_error/len(inputs), i))
And this is how I ran it:
if __name__ == "__main__":
X, y = load_dataset()
inputs = X
targets = y
build_model()
mlp = MLP(1000, [1000], 1000)
mlp.train(inputs,targets, 50, 0.1)
output = mlp.forward_propagate(input)
Thanks in advance!
I tried to do what the video said, to set up an MLP, as was the suggestion of the teacher, but I don't know how to solve the shape error.

Keras Erro: Graph disconnected

The following code snippet is giving error (Graph disconnected: cannot obtain value for tensor):
x = Concatenate (axis = 1) (submodel_outputs)
parallel_layers = Model (inputs = embedding_layers [0] .input, outputs = x)
The value of embedding_layers [0] .get_input_at (0) is equal to:
<tf.Tensor 'embedding_8_input: 0' shape = (?, 100) dtype = float32>
The variable x is the concatenation of 3 models, such as:
embedding_8 (Embedding) - (None, 100, 300)
dropout_22 (Dropout) - (None, 100, 300)
skip_conv1d_22 (SkipConv1D- (None, 100, 100)
max_pooling1d_22 (MaxPooling)(None, 25, 100)
I've tried everything, but the error continues. What do I do to make the following line work:
parallel_layers = Model (inputs = embedding_layers [0] .input, outputs = x)
This error means that your model input and output are not connected. Please paste your full model definition, and this should help others help you.

What problems can lead to a CuDNNError with ConvolutionND

I am using three-dimensional convolution links (with ConvolutionND) in my chain.
The forward computation run smoothly (I checked intermediate result shapes to be sure I understood correctly the meaning of the parameters of convolution_nd), but during the backward a CuDNNError is raised with the message CUDNN_STATUS_NOT_SUPPORTED.
The cover_all parameter of ConvolutionND as its default value of False, so from the doc I don't see what can be the cause of the error.
Here is how I defind one of the convolution layers :
self.conv1 = chainer.links.ConvolutionND(3, 1, 4, (3, 3, 3)).to_gpu(self.GPU_1_ID)
And the call stack is
File "chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "chainer/functions/connection/convolution_nd.py", line 118, in backward
gy, W, stride=self.stride, pad=self.pad, outsize=x_shape)
File "chainer/functions/connection/deconvolution_nd.py", line 310, in deconvolution_nd
y, = func.apply(args)
File chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "chainer/functions/connection/deconvolution_nd.py", line 128, in forward
return self._forward_cudnn(x, W, b)
File "chainer/functions/connection/deconvolution_nd.py", line 105, in _forward_cudnn
tensor_core=tensor_core)
File "cupy/cudnn.pyx", line 881, in cupy.cudnn.convolution_backward_data
File "cupy/cuda/cudnn.pyx", line 975, in cupy.cuda.cudnn.convolutionBackwardData_v3
File "cupy/cuda/cudnn.pyx", line 461, in cupy.cuda.cudnn.check_status
cupy.cuda.cudnn.CuDNNError: CUDNN_STATUS_NOT_SUPPORTED
So are there special points to take care of when using ConvolutionND ?
A failing code is for instance :
import chainer
from chainer import functions as F
from chainer import links as L
from chainer.backends import cuda
import numpy as np
import cupy as cp
chainer.global_config.cudnn_deterministic = False
NB_MASKS = 60
NB_FCN = 3
NB_CLASS = 17
class MFEChain(chainer.Chain):
"""docstring for Wavelphasenet."""
def __init__(self,
FCN_Dim,
gpu_ids=None):
super(MFEChain, self).__init__()
self.GPU_0_ID, self.GPU_1_ID = (0, 1) if gpu_ids is None else gpu_ids
with self.init_scope():
self.conv1 = chainer.links.ConvolutionND(3, 1, 4, (3, 3, 3)).to_gpu(
self.GPU_1_ID
)
def __call__(self, inputs):
### Pad input ###
processed_sequences = []
for convolved in inputs:
## Transform to sequences)
copy = convolved if self.GPU_0_ID == self.GPU_1_ID else F.copy(convolved, self.GPU_1_ID)
processed_sequences.append(copy)
reprocessed_sequences = []
with cuda.get_device(self.GPU_1_ID):
for convolved in processed_sequences:
convolved = F.expand_dims(convolved, 0)
convolved = F.expand_dims(convolved, 0)
convolved = self.conv1(convolved)
reprocessed_sequences.append(convolved)
states = F.vstack(reprocessed_sequences)
logits = states
ret_logits = logits if self.GPU_0_ID == self.GPU_1_ID else F.copy(logits, self.GPU_0_ID)
return ret_logits
def mfe_test():
mfe = MFEChain(150)
inputs = list(
chainer.Variable(
cp.random.randn(
NB_MASKS,
11,
in_len,
dtype=cp.float32
)
) for in_len in [53248]
)
val = mfe(inputs)
grad = cp.ones(val.shape, dtype=cp.float32)
val.grad = grad
val.backward()
for i in inputs:
print(i.grad)
if __name__ == "__main__":
mfe_test()
cupy.cuda.cudnn.convolutionBackwardData_v3 is incompatible with some specific parameters, as described in an issue in official github.
Unfortunately, the issue only dealt with deconvolution_2d.py (not deconvolution_nd.py), therefore the decision-making about whether cudnn is used or not failed in your case, I guess.
you can check your parameter by confirming
check whether dilation parameter (!=1) or group parameter (!=1) is passed to the convolution.
print chainer.config.cudnn_deterministic, configuration.config.autotune, and configuration.config.use_cudnn_tensor_core.
Further support may be obtained by raising an issue in the official github.
The code you showed is much complicated.
To clarify the problem, the code below would help.
from chainer import Variable, Chain
from chainer import links as L
from chainer import functions as F
import numpy as np
from six import print_
batch_size = 1
in_channel = 1
out_channel = 1
class MyLink(Chain):
def __init__(self):
super(MyLink, self).__init__()
with self.init_scope():
self.conv = L.ConvolutionND(3, 1, 1, (3, 3, 3), nobias=True, initialW=np.ones((in_channel, out_channel, 3, 3, 3)))
def __call__(self, x):
return F.sum(self.conv(x))
if __name__ == "__main__":
my_link = MyLink()
my_link.to_gpu(0)
batch = Variable(np.ones((batch_size, in_channel, 3, 3, 3)))
batch.to_gpu(0)
loss = my_link(batch)
loss.backward()
print_(batch.grad)

OpenMDAO Singular Entry

I'm trying to understand the OpenMDAO error messages
RuntimeError: Singular entry found in '' for column associated with state/residual 'x'.
and
RuntimeError: Singular entry found in '' for row associated with state/residual 'y'.
Can someone explain these? E.g. When running the code
from openmdao.api import Problem, Group, IndepVarComp, ImplicitComponent, ScipyOptimizeDriver, NewtonSolver, DirectSolver, view_model, view_connections
class Test1Comp(ImplicitComponent):
def setup(self):
self.add_input('x', 0.5)
self.add_input('design_x', 1.0)
self.add_output('z', val=0.0)
self.add_output('obj')
self.declare_partials(of='*', wrt='*', method='fd', form='central', step=1.0e-4)
def apply_nonlinear(self, inputs, outputs, resids):
x = inputs['x']
design_x = inputs['design_x']
z = outputs['z']
resids['z'] = x*z + z - 4
resids['obj'] = (z/5.833333 - design_x)**2
if __name__ == "__main__":
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 0.5))
model.add_subsystem('d1', IndepVarComp('design_x', 1.0))
model.add_subsystem('comp', Test1Comp())
model.connect('p1.x', 'comp.x')
model.connect('d1.design_x', 'comp.design_x')
prob.driver = ScipyOptimizeDriver()
prob.driver.options["optimizer"] = 'SLSQP'
model.add_design_var("d1.design_x", lower=0.5, upper=1.5)
model.add_objective('comp.obj')
model.nonlinear_solver = NewtonSolver()
model.nonlinear_solver.options['iprint'] = 2
model.nonlinear_solver.options['maxiter'] = 20
model.linear_solver = DirectSolver()
prob.setup()
prob.run_model()
print(prob['comp.z'])
I get the error message:
File "C:\Scripts/mockup_component3.py", line 46, in <module>
prob.run_model()
File "C:\Python_32\lib\site-packages\openmdao\core\problem.py", line 315, in run_model
return self.model.run_solve_nonlinear()
File "C:\Python_32\lib\site-packages\openmdao\core\system.py", line 2960, in run_solve_nonlinear
result = self._solve_nonlinear()
File "C:\Python_32\lib\site-packages\openmdao\core\group.py", line 1420, in _solve_nonlinear
result = self._nonlinear_solver.solve()
File "C:\Python_32\lib\site-packages\openmdao\solvers\solver.py", line 602, in solve
fail, abs_err, rel_err = self._run_iterator()
File "C:\Python_32\lib\site-packages\openmdao\solvers\solver.py", line 349, in _run_iterator
self._iter_execute()
File "C:\Python_32\lib\site-packages\openmdao\solvers\nonlinear\newton.py", line 234, in _iter_execute
system._linearize()
File "C:\Python_32\lib\site-packages\openmdao\core\group.py", line 1562, in _linearize
self._linear_solver._linearize()
File "C:\Python_32\lib\site-packages\openmdao\solvers\linear\direct.py", line 199, in _linearize
raise RuntimeError(format_singluar_error(err, system, mtx))
RuntimeError: Singular entry found in '' for column associated with state/residual 'comp.obj'.
This error I was able to solve, by adding - outputs['obj'] in the equation for resids['obj']. But I still have little understanding as to what the two error messages mean. What matrix is it that is singular? And what does it mean to have
1) a singular entry for a column?
2) a singular entry for a row?
I realized that the cause for the singular row was that I had not defined the partial derivatives for the component. I fixed this problem by adding the command declare_partials to the top level system. The traceback gave me the clue that the matrix was related to linearization.
The case with the singular column seems related to that I had two equations in apply_nonlinear, but only one unknown (z).

Pyqt bind function to dinamically generated push buttons

Using the PyQT library I am currently having issues with binding a function to the dinamically generated pushbuttons resulting from a loop.
So far I have managed to generate the buttons and bind a function to them with the lambda command. The problem is that since I need every single button to open a different file I find myself in an odd situation as all the button do open the same one. The last value assigned to the variable.
Any idea on how to fix the situation? As a last note, sorry in case of stupid mistakes. I am new to OOP and PyQT.
def searchTheStuff(self):
found = 0
data = MainWindow.intermediateCall()
f1 = open('Path.txt', 'r')
path = f1.read()
f1.close()
Yinc = 25
Y = 40
X = 20
results = 0
for path, dirs, files in os.walk(path, topdown=True):
for name in files:
if name.endswith('.txt'):
fullpath = os.path.join(path, name)
mail = open_close(fullpath)
if mail.find(data) != -1 and results<3:
found = 1
self.buttons.append(QtGui.QPushButton(self))
print fullpath
command = lambda : webbrowser.open()
self.buttons[-1].clicked.connect(command)
self.buttons[-1].setText(_translate("self", name, None))
self.buttons[-1].setGeometry(QtCore.QRect(X, Y, 220, 20))
results = results+1
if results == 33:
X = 260
Y = 15
Y = Y + Yinc
if found == 0:
self.label = QtGui.QLabel(self)
self.label.setGeometry(QtCore.QRect(20, 40, 321, 21))
self.label.setText(_translate("self", "No templates have been found:", None))

Resources