urequests.get cant get answer anymore - python-requests

I was running code on my NodeMCU esp8266 for switching light in my fish tank according to sunset and sunrise. Today code frozen on line 8, and I spend few hours to figure out but it seems I can't find solution. I tried to add headers and proxies to
r = requests.get(url) but no success
boot.py
import network
ssid = 'SSID'
password = 'PASS'
station = network.WLAN(network.STA_IF)
station.active(True)
station.connect(ssid, password)
while station.isconnected() == False:
pass
main.py
import urequests as requests
import ujson, ntptime, utime
import ssd1306
from machine import Pin, PWM, SoftI2C
from utime import time, sleep
url = 'https://api.sunrise-sunset.org/json?lat=50.147240&lng=18.838700&formatted=0'
r = requests.get(url)
timezone_hour = 2 # timezone offset (hours)
Blue = PWM(Pin(14), 1000)
Red = PWM(Pin(12), 1000)
White_1 = PWM(Pin(13), 1000)
i2c = SoftI2C(scl=Pin(5), sda=Pin(4))
while True:
ntptime.settime()
now = utime.localtime()
day = now[0],now[1],now[2]
hours = now[3]+timezone_hour,now[4]
data = ujson.loads(r.content)
sunrise = data['results']['sunrise']
sunset = data['results']['sunset']
sunrise_time = int(sunrise[11:13])+timezone_hour, int(sunrise[14:16])
sunset_time = int(sunset[11:13])+timezone_hour, int(sunset[14:16])
hours_string = str(hours)
sunrise_time_string = str(sunrise_time)
sunset_time_string = str(sunset_time)
oled_width = 128
oled_height = 64
oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)
if hours > sunrise_time and hours < sunset_time:
Blue.duty(1024)
Red.duty(1024)
White_1.duty(1024)
oled.fill(0)
oled.text(hours_string, 0, 0)
oled.text(sunrise_time_string, 0, 10)
oled.text(sunset_time_string, 0, 20)
oled.text("sunrise", 0, 30)
oled.show()
print(hours_string)
else:
Blue.duty(5)
Red.duty(0)
White_1.duty(0)
oled.fill(0)
oled.text(hours_string, 0, 0)
oled.text(sunrise_time_string, 0, 10)
oled.text(sunset_time_string, 0, 20)
oled.text("sunset", 0, 30)
oled.show()
sleep(60)
if str(sunrise[0:10]) != str(day):
r = requests.get(url)
errors:
Traceback (most recent call last):
File "<stdin>", line 8, in <module>
File "urequests.py", line 116, in get
File "urequests.py", line 62, in request
OSError: -40

Related

TRT inference using onnx - Error Code 1: Cuda Driver (invalid resource handle)

Currently I'm tryin to convert given onnx file to tensorrt file, and do inference on the generated tensorrt file.
To do so, I used tensorrt python binding API, but
"Error Code 1: Cuda Driver (invalid resource handle)" happens and there is no kind description about this.
Can anyone help me to overcome this situation?
Thx in advance, and below is my code snippet.
def trt_export(self):
fp_16_mode = True
## Obviously, I provided appropriate file names
trt_file_name = "PATH_TO_TRT_FILE"
onnx_name = "PATH_TO_ONNX_FILE"
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(EXPLICIT_BATCH)
parser = trt.OnnxParser(network, TRT_LOGGER)
config = builder.create_builder_config()
config.max_workspace_size = (1<<30)
config.set_flag(trt.BuilderFlag.FP16)
config.default_device_type = trt.DeviceType.GPU
profile = builder.create_optimization_profile()
profile.set_shape('input', (1, 3, IMG_SIZE, IMG_SIZE), (12, 3, IMG_SIZE, IMG_SIZE), (32, 3, IMG_SIZE, IMG_SIZE)) # random nubmers for min. opt. max batch
config.add_optimization_profile(profile)
with open(onnx_name, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
engine = builder.build_engine(network, config)
buf = engine.serialize()
with open(trt_file_name, 'wb') as f:
f.write(buf)
def validate_trt_result(self, input_path):
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
trt_file_name = "PATH_TO_TRT_FILE"
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(trt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
cuda.init()
device = cuda.Device(0)
ctx = device.make_context()
inputs, outputs, bindings = [], [], []
context = engine.create_execution_context()
stream = cuda.Stream()
index = 0
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * -1 # assuming one batch
dtype = trt.nptype(engine.get_binding_dtype(binding))
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(device_mem))
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
context.set_binding_shape(index, [1, 3, IMG_SIZE, IMG_SIZE])
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
index += 1
print(context.all_binding_shapes_specified)
input_img = cv2.imread(input_path)
input_r = cv2.resize(input_img, dsize = (256, 256))
input_p = np.transpose(input_r, (2, 0, 1))
input_e = np.expand_dims(input_p, axis = 0)
input_f = input_e.astype(np.float32)
input_f /= 255
numpy_array_input = [input_f]
hosts = [input.host for input in inputs]
trt_types = [trt.int32]
for numpy_array, host, trt_types in zip(numpy_array_input, hosts, trt_types):
numpy_array = np.asarray(numpy_array).astype(trt.nptype(trt_types)).ravel()
print(numpy_array.shape)
np.copyto(host, numpy_array)
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
#### ERROR HAPPENS HERE ####
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
#### ERROR HAPPENS HERE ####
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
stream.synchronize()
print("TRT model inference result : ")
output = outputs[0].host
for one in output :
print(one)
ctx.pop()
Looks like ctx.push() line is missing before a line with memcpy_htod_async.
Such a error can happen if TensorFlow / PyTorch is also using CUDA in parallel with TensorRT.
See the related question/answer: https://stackoverflow.com/a/73996477/5655977

Pytorch RuntimeError: Expected object of device type cuda but got device type cpu for argument #1 'self' in call to _th_index_select

I am training a model that takes tokenized strings which are then passed through an embedding layer and an LSTM thereafter. However, there seems to be an error in the input, as it does not pass through the embedding layer.
class DrugModel(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim, drug_embed_dim,
lstm_layer, lstm_dropout, bi_lstm, linear_dropout, char_vocab_size,
char_embed_dim, char_dropout, dist_fn, learning_rate,
binary, is_mlp, weight_decay, is_graph, g_layer,
g_hidden_dim, g_out_dim, g_dropout):
super(DrugModel, self).__init__()
# Save model configs
self.drug_embed_dim = drug_embed_dim
self.lstm_layer = lstm_layer
self.char_dropout = char_dropout
self.dist_fn = dist_fn
self.binary = binary
self.is_mlp = is_mlp
self.is_graph = is_graph
self.g_layer = g_layer
self.g_dropout = g_dropout
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# For one-hot encoded SMILES
if not is_mlp:
self.char_embed = nn.Embedding(char_vocab_size, char_embed_dim,
padding_idx=0)
self.lstm = nn.LSTM(char_embed_dim, drug_embed_dim, lstm_layer,
bidirectional=False,
batch_first=True, dropout=lstm_dropout)
# Distance function
self.dist_fc = nn.Linear(drug_embed_dim, 1)
if binary:
# Binary Cross Entropy
self.criterion = lambda x, y: y*torch.log(x) + (1-y)*torch.log(1-x)
def init_lstm_h(self, batch_size):
return (Variable(torch.zeros(
self.lstm_layer*1, batch_size, self.drug_embed_dim)).cuda(),
Variable(torch.zeros(
self.lstm_layer*1, batch_size, self.drug_embed_dim)).cuda())
# Set Siamese network as basic LSTM
def siamese_sequence(self, inputs, length):
# Character embedding
inputs = inputs.long()
inputs = inputs.cuda()
self.char_embed = self.char_embed(inputs.to(self.device))
c_embed = self.char_embed(inputs)
# c_embed = F.dropout(c_embed, self.char_dropout)
maxlen = inputs.size(1)
if not self.training:
# Sort c_embed
_, sort_idx = torch.sort(length, dim=0, descending=True)
_, unsort_idx = torch.sort(sort_idx, dim=0)
maxlen = torch.max(length)
# Pack padded sequence
c_embed = c_embed.index_select(0, Variable(sort_idx).cuda())
sorted_len = length.index_select(0, sort_idx).tolist()
c_packed = pack_padded_sequence(c_embed, sorted_len, batch_first=True)
else:
c_packed = c_embed
# Run LSTM
init_lstm_h = self.init_lstm_h(inputs.size(0))
lstm_out, states = self.lstm(c_packed, init_lstm_h)
hidden = torch.transpose(states[0], 0, 1).contiguous().view(
-1, 1 * self.drug_embed_dim)
if not self.training:
# Unsort hidden states
outputs = hidden.index_select(0, Variable(unsort_idx).cuda())
else:
outputs = hidden
return outputs
def forward(self, key1, key2, targets, key1_len, key2_len, status, predict = False):
if not self.is_mlp:
output1 = self.siamese_sequence(key1, key1_len)
output2 = self.siamese_sequence(key2, key2_len)
After instantiating the class I get the following error when passing the input through the embedding layer:
<ipython-input-128-432fcc7a1e39> in forward(self, key1, key2, targets, key1_len, key2_len, status, predict)
129 def forward(self, key1, key2, targets, key1_len, key2_len, status, predict = False):
130 if not self.is_mlp:
--> 131 output1 = self.siamese_sequence(key1, key1_len)
132 output2 = self.siamese_sequence(key2, key2_len)
133 set_trace()
<ipython-input-128-432fcc7a1e39> in siamese_sequence(self, inputs, length)
74 inputs = inputs.cuda()
75
---> 76 self.char_embed = self.char_embed(inputs.to(self.device))
77 set_trace()
78 c_embed = self.char_embed(inputs)
~/miniconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~/miniconda3/lib/python3.7/site-packages/torch/nn/modules/sparse.py in forward(self, input)
112 return F.embedding(
113 input, self.weight, self.padding_idx, self.max_norm,
--> 114 self.norm_type, self.scale_grad_by_freq, self.sparse)
115
116 def extra_repr(self):
~/miniconda3/lib/python3.7/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1482 # remove once script supports set_grad_enabled
1483 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1484 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1485
1486
RuntimeError: Expected object of device type cuda but got device type cpu for argument #1 'self' in call to _th_index_select
despite the fact that the input (e.g. key1) has already been passed to cuda and has been transformed into long format:
tensor([[25, 33, 30, ..., 0, 0, 0],
[25, 7, 7, ..., 0, 0, 0],
[25, 7, 30, ..., 0, 0, 0],
...,
[25, 7, 33, ..., 0, 0, 0],
[25, 33, 41, ..., 0, 0, 0],
[25, 33, 41, ..., 0, 0, 0]], device='cuda:0')
setting model.device to cuda does not change your inner module devices, so self.lstm, self.char_embed, and self.dist_fc are all still on cpu. correct way of doing it is by using DrugModel().to(device)
in general, it's better not to feed a device to your model and write it in a device-agnostic way. to make your init_lstm_h function device-agnostic you can use something like this
in order to make nn work in cuda.
first step: we must set initial model to cuda
device = torch.device('cuda:1')
model = esm.ProteinBertModel(
args,
alphabet,
).to(device)
second step: we should set loaded model to cuda
bert_model = bert_model.to(device)

Librosa Display Waveplot, why are they totally blue and flat?

I followed this example of Music Synchronization with Dynamic Time Warping
However, when I do this:
import matplolib.pyplot as plt
import librosa
import librosa.display
x_1, fs = librosa.load('musicdata/slow_melody.wav')
plt.figure(figsize=(16, 4))
librosa.display.waveplot(x_1, sr=fs)
plt.title('Slower Version $X_1$')
plt.tight_layout()
and same for the faster version, I get this result:
I can properly reach the pitch classes of the wav files in chroma representations and there are no problems in the wav files.
I created the fast and slow versions of the wav files like this:
# Tone-duration sequence
melody = [('E', 0.3), ('E', 0.3), ('F', 0.3), ('G', 0.3)]
slow_melody = [('E', 0.6), ('E', 0.6), ('F', 0.6), ('G', 0.6)]
melody_output = np.array([])
# Construct the audio signal based on the chord sequence
for item in melody:
input_tone = item[0]
duration = item[1]
synthesized_tone = synthesizer(tone_freq_map[input_tone], duration, amplitude, sampling_freq)
melody_output = np.append(melody_output, synthesized_tone, axis=0)
# Write to the output file
name = 'melody' + '.wav'
write(name, sampling_freq, melody_output)
slow_melody_output = np.array([])
# Construct the audio signal based on the chord sequence
for item in slow_melody:
input_tone = item[0]
duration = item[1]
synthesized_tone = synthesizer(tone_freq_map[input_tone], duration, amplitude, sampling_freq)
slow_melody_output = np.append(slow_melody_output, synthesized_tone, axis=0)
# Write to the output file
name = 'slow_melody' + '.wav'
write(name, sampling_freq, slow_melody_output)
I get the tone frequencies from:
{
"A": 440,
"Asharp": 466,
"B": 494,
"C": 523,
"Csharp": 554,
"D": 587,
"Dsharp": 622,
"E": 659,
"F": 698,
"Fsharp": 740,
"G": 784,
"Gsharp": 831
}
Synthesizer is:
def synthesizer(freq, duration, amp=1.0, sampling_freq=44100):
# Build the time axis
t = np.linspace(0, duration, duration * sampling_freq)
# Construct the audio signal
audio = amp * np.sin(2 * np.pi * freq * t)
return audio.astype(np.int16)
Also, the input parameters are:
duration = 2
amplitude = 10000
sampling_freq = 44100
So, why couldn't I properly visualize the waveplots? What could be the reason that they appear like this?
I believe there is something wrong in the tutorial you are following. librosa.display.waveplot() doesn't plot anything by itself, you still have to call plt.show() to visualize it. From the official documentation, here's an example of it's usage:
y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
librosa.display.waveplot(y_harm, sr=sr, alpha=0.25)
plt.tight_layout()
plt.show()
You can find more info here https://librosa.github.io/librosa/generated/librosa.display.waveplot.html

What problems can lead to a CuDNNError with ConvolutionND

I am using three-dimensional convolution links (with ConvolutionND) in my chain.
The forward computation run smoothly (I checked intermediate result shapes to be sure I understood correctly the meaning of the parameters of convolution_nd), but during the backward a CuDNNError is raised with the message CUDNN_STATUS_NOT_SUPPORTED.
The cover_all parameter of ConvolutionND as its default value of False, so from the doc I don't see what can be the cause of the error.
Here is how I defind one of the convolution layers :
self.conv1 = chainer.links.ConvolutionND(3, 1, 4, (3, 3, 3)).to_gpu(self.GPU_1_ID)
And the call stack is
File "chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "chainer/functions/connection/convolution_nd.py", line 118, in backward
gy, W, stride=self.stride, pad=self.pad, outsize=x_shape)
File "chainer/functions/connection/deconvolution_nd.py", line 310, in deconvolution_nd
y, = func.apply(args)
File chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "chainer/functions/connection/deconvolution_nd.py", line 128, in forward
return self._forward_cudnn(x, W, b)
File "chainer/functions/connection/deconvolution_nd.py", line 105, in _forward_cudnn
tensor_core=tensor_core)
File "cupy/cudnn.pyx", line 881, in cupy.cudnn.convolution_backward_data
File "cupy/cuda/cudnn.pyx", line 975, in cupy.cuda.cudnn.convolutionBackwardData_v3
File "cupy/cuda/cudnn.pyx", line 461, in cupy.cuda.cudnn.check_status
cupy.cuda.cudnn.CuDNNError: CUDNN_STATUS_NOT_SUPPORTED
So are there special points to take care of when using ConvolutionND ?
A failing code is for instance :
import chainer
from chainer import functions as F
from chainer import links as L
from chainer.backends import cuda
import numpy as np
import cupy as cp
chainer.global_config.cudnn_deterministic = False
NB_MASKS = 60
NB_FCN = 3
NB_CLASS = 17
class MFEChain(chainer.Chain):
"""docstring for Wavelphasenet."""
def __init__(self,
FCN_Dim,
gpu_ids=None):
super(MFEChain, self).__init__()
self.GPU_0_ID, self.GPU_1_ID = (0, 1) if gpu_ids is None else gpu_ids
with self.init_scope():
self.conv1 = chainer.links.ConvolutionND(3, 1, 4, (3, 3, 3)).to_gpu(
self.GPU_1_ID
)
def __call__(self, inputs):
### Pad input ###
processed_sequences = []
for convolved in inputs:
## Transform to sequences)
copy = convolved if self.GPU_0_ID == self.GPU_1_ID else F.copy(convolved, self.GPU_1_ID)
processed_sequences.append(copy)
reprocessed_sequences = []
with cuda.get_device(self.GPU_1_ID):
for convolved in processed_sequences:
convolved = F.expand_dims(convolved, 0)
convolved = F.expand_dims(convolved, 0)
convolved = self.conv1(convolved)
reprocessed_sequences.append(convolved)
states = F.vstack(reprocessed_sequences)
logits = states
ret_logits = logits if self.GPU_0_ID == self.GPU_1_ID else F.copy(logits, self.GPU_0_ID)
return ret_logits
def mfe_test():
mfe = MFEChain(150)
inputs = list(
chainer.Variable(
cp.random.randn(
NB_MASKS,
11,
in_len,
dtype=cp.float32
)
) for in_len in [53248]
)
val = mfe(inputs)
grad = cp.ones(val.shape, dtype=cp.float32)
val.grad = grad
val.backward()
for i in inputs:
print(i.grad)
if __name__ == "__main__":
mfe_test()
cupy.cuda.cudnn.convolutionBackwardData_v3 is incompatible with some specific parameters, as described in an issue in official github.
Unfortunately, the issue only dealt with deconvolution_2d.py (not deconvolution_nd.py), therefore the decision-making about whether cudnn is used or not failed in your case, I guess.
you can check your parameter by confirming
check whether dilation parameter (!=1) or group parameter (!=1) is passed to the convolution.
print chainer.config.cudnn_deterministic, configuration.config.autotune, and configuration.config.use_cudnn_tensor_core.
Further support may be obtained by raising an issue in the official github.
The code you showed is much complicated.
To clarify the problem, the code below would help.
from chainer import Variable, Chain
from chainer import links as L
from chainer import functions as F
import numpy as np
from six import print_
batch_size = 1
in_channel = 1
out_channel = 1
class MyLink(Chain):
def __init__(self):
super(MyLink, self).__init__()
with self.init_scope():
self.conv = L.ConvolutionND(3, 1, 1, (3, 3, 3), nobias=True, initialW=np.ones((in_channel, out_channel, 3, 3, 3)))
def __call__(self, x):
return F.sum(self.conv(x))
if __name__ == "__main__":
my_link = MyLink()
my_link.to_gpu(0)
batch = Variable(np.ones((batch_size, in_channel, 3, 3, 3)))
batch.to_gpu(0)
loss = my_link(batch)
loss.backward()
print_(batch.grad)

PyTorch RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed

I’m trying to create a basic binary classifier in Pytorch that classifies whether my player plays on the right or the left side in the game Pong. The input is an 1x42x42 image and the label is my player's side (right = 1 or left = 2). The code:
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
net = Net(42 * 42, 100, 2)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer_net = torch.optim.Adam(net.parameters(), 0.001)
net.train()
while True:
state = get_game_img()
state = torch.from_numpy(state)
# right = 1, left = 2
current_side = get_player_side()
target = torch.LongTensor(current_side)
x = Variable(state.view(-1, 42 * 42))
y = Variable(target)
optimizer_net.zero_grad()
y_pred = net(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
The error I get:
File "train.py", line 109, in train
loss = criterion(y_pred, y)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/modules/module.py", line 206, in __call__
result = self.forward(*input, **kwargs)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/modules/loss.py", line 321, in forward
self.weight, self.size_average)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/functional.py", line 533, in cross_entropy
return nll_loss(log_softmax(input), target, weight, size_average)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/functional.py", line 501, in nll_loss
return f(input, target)
File "/home/shani/anaconda2/lib/python2.7/site-packages/torch/nn/_functions/thnn/auto.py", line 41, in forward
output, *self.additional_args)
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at /py/conda-bld/pytorch_1493676237139/work/torch/lib/THNN/generic/ClassNLLCriterion.c:57
For most of deeplearning library, target(or label) should start from 0.
It means that your target should be in the range of [0,n) with n-classes.
It looks like PyTorch expect to get zero-based labels (0/1 in your case) and you probably feed it with one-based labels (1/2)
I had the same error in my program and i just realized that the problem was in the number of output nodes in my neural network
In my program the number of output nodes of my model was not equal to the number of labels in dataset
the number of output was 1 and the number of target labels was 10. then i changed the number of output to 10, there was no error

Resources