Env:
❯ sw_vers
ProductName: macOS
ProductVersion: 11.6.1
BuildVersion: 20G224
❯ minikube version
minikube version: v1.24.0
commit: 76b94fb3c4e8ac5062daf70d60cf03ddcc0a741b
I made a self-signed certificate example on NGINX pod. Omitting to create certificates and keys, since they are working on my local mac, the files are following:
❯ ll rootCA.*
-rw-r--r--# 1 hansuk staff 1383 1 17 12:37 rootCA.crt
-rw------- 1 hansuk staff 1874 1 17 12:02 rootCA.key
❯ ll localhost.*
-rw------- 1 hansuk staff 1704 1 17 12:09 localhost.key
-rw-r--r-- 1 hansuk staff 1383 1 17 12:37 localhost.pem
Start up the following kubernetes definitions on minikube(kubectl apply -f nginx.yml -n cert):
apiVersion: v1
kind: Service
metadata:
name: nginx-cert
labels:
app: nginx-cert
spec:
type: NodePort
ports:
- port: 80
protocol: TCP
name: http
nodePort: 30080
- port: 443
protocol: TCP
name: https
nodePort: 30443
selector:
app: nginx-cert
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: nginx-cert
name: nginx-cert
spec:
replicas: 1
selector:
matchLabels:
app: nginx-cert
template:
metadata:
labels:
app: nginx-cert
spec:
volumes:
- name: secret-volume
secret:
secretName: nginxsecret
- name: configmap-volume
configMap:
name: nginxconfmap
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
- containerPort: 443
volumeMounts:
- mountPath: /etc/nginx/ssl
name: secret-volume
- mountPath: /etc/nginx/conf.d
name: configmap-volume
Create the configmap and secret for nginx config and TLS path respectively:
❯ cat default.conf
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
listen 443 ssl;
root /usr/share/nginx/html;
index index.html;
server_name locahost;
ssl_certificate /etc/nginx/ssl/tls.crt;
ssl_certificate_key /etc/nginx/ssl/tls.key;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
location / {
try_files / =404;
}
}
❯ kubectl create configmap nginxconfmap --from-file=default.conf -n cert
❯ kubectl create secret tls nginxsecret --key localhost.key --cert localhost.pem -n cert
All status of deployments and services, and event logs are OK. No failures:
❯ kubectl get all -n cert
NAME READY STATUS RESTARTS AGE
pod/nginx-cert-76f7f8748f-q2nvl 1/1 Running 0 21m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx-cert NodePort 10.110.115.36 <none> 80:30080/TCP,443:30443/TCP 21m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-cert 1/1 1 1 21m
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-cert-76f7f8748f 1 1 1 21m
❯ kubectl get events -n cert
22m Normal Scheduled pod/nginx-cert-76f7f8748f-q2nvl Successfully assigned cert/nginx-cert-76f7f8748f-q2nvl to minikube
22m Normal Pulling pod/nginx-cert-76f7f8748f-q2nvl Pulling image "nginx"
22m Normal Pulled pod/nginx-cert-76f7f8748f-q2nvl Successfully pulled image "nginx" in 4.345505365s
22m Normal Created pod/nginx-cert-76f7f8748f-q2nvl Created container nginx
22m Normal Started pod/nginx-cert-76f7f8748f-q2nvl Started container nginx
22m Normal SuccessfulCreate replicaset/nginx-cert-76f7f8748f Created pod: nginx-cert-76f7f8748f-q2nvl
22m Normal ScalingReplicaSet deployment/nginx-cert Scaled up replica set nginx-cert-76f7f8748f to
And then, SSL handshaking is working with minukube service IP:
❯ minikube service --url nginx-cert --namespace cert
http://192.168.64.2:30080
http://192.168.64.2:30443
❯ openssl s_client -CAfile rootCA.crt -connect 192.168.64.2:30443 -showcerts 2>/dev/null < /dev/null
CONNECTED(00000003)
---
Certificate chain
0 s:C = KR, ST = Seoul, L = Seocho-gu, O = Localhost, CN = localhost
i:C = KR, ST = RootState, L = RootCity, O = Root Inc., OU = Root CA, CN = Self-signed Root CA
a:PKEY: rsaEncryption, 2048 (bit); sigalg: RSA-SHA256
v:NotBefore: Jan 17 03:37:15 2022 GMT; NotAfter: Jan 17 03:37:15 2023 GMT
-----BEGIN CERTIFICATE-----
MIIDzzCCAregAwIBAgIUYMe6nRgsZwq9UPMKFgj9dt9z9FIwDQYJKoZIhvcNAQEL
BQAweDELMAkGA1UEBhMCS1IxEjAQBgNVBAgMCVJvb3RTdGF0ZTERMA8GA1UEBwwI
Um9vdENpdHkxEjAQBgNVBAoMCVJvb3QgSW5jLjEQMA4GA1UECwwHUm9vdCBDQTEc
MBoGA1UEAwwTU2VsZi1zaWduZWQgUm9vdCBDQTAeFw0yMjAxMTcwMzM3MTVaFw0y
MzAxMTcwMzM3MTVaMFkxCzAJBgNVBAYTAktSMQ4wDAYDVQQIDAVTZW91bDESMBAG
A1UEBwwJU2VvY2hvLWd1MRIwEAYDVQQKDAlMb2NhbGhvc3QxEjAQBgNVBAMMCWxv
Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALc9retjBorw
RKbuyC1SNx1U9L5LJPPbBkBh4kg98saQxtRX0Wqs5mgswWMZYL3E6yRl0gfwBkdq
t8GVQ49dgg0QO5MbG9ylfCLS9xR3WWjAgxaDJ0W96PyvTzmg295aqqHFKPSaG/nM
JyZgFJDuGoRRgwoWNqZ1pRCDLMIENDx4qgjOnQch529pM9ZRwFQSswKpn4BVkY00
/u8jIvax67kFOg70QGY16paGEg7YfSNle7BFZY0VJ8rIiBoqwRmPH6hbF/djxe5b
yzkI9eqts9bqw8eDLC28S36x62FxkdqkK8pI/rzWAKSV43TWML1zq4vM2bI+vp0k
a06GhSsS1bUCAwEAAaNwMG4wHwYDVR0jBBgwFoAUURHNpOE9zTgXgVYAGvLt94Ym
P+8wCQYDVR0TBAIwADALBgNVHQ8EBAMCBPAwFAYDVR0RBA0wC4IJbG9jYWxob3N0
MB0GA1UdDgQWBBSS1ZHHT6OHTomYIRsmhz6hMJLGnDANBgkqhkiG9w0BAQsFAAOC
AQEAWA23pCdAXtAbdSRy/p8XURCjUDdhkp3MYA+1gIDeGAQBKNipU/KEo5wO+aVk
AG6FryPZLOiwiP8nYAebUxOAqKG3fNbgT9t95BEGCip7Cxjp96KNYt73Kl/OTPjJ
KZUkHQ7MXN4vc5gmca8q+OqwCCQ/daMkzLabPQWNk3R/Hzo/mT42v8ht9/nVh1Ml
u3Dow5QPp8LESrJABLIRyRs0+Tfp+WodgekgDX5hnkkSk77+oXB49r2tZUeG/CVv
Fg8PuUNi+DWpdxX8fE/gIbSzSsamOf29+0sCIoJEPvk7lEVLt9ca0SoJ7rKn/ai4
HxwTiYo9pNcoLwhH3xdXjvbuGA==
-----END CERTIFICATE-----
---
Server certificate
subject=C = KR, ST = Seoul, L = Seocho-gu, O = Localhost, CN = localhost
issuer=C = KR, ST = RootState, L = RootCity, O = Root Inc., OU = Root CA, CN = Self-signed Root CA
---
No client certificate CA names sent
Peer signing digest: SHA256
Peer signature type: RSA-PSS
Server Temp Key: X25519, 253 bits
---
SSL handshake has read 1620 bytes and written 390 bytes
Verification: OK
---
New, TLSv1.2, Cipher is ECDHE-RSA-AES256-GCM-SHA384
Server public key is 2048 bit
Secure Renegotiation IS supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1.2
Cipher : ECDHE-RSA-AES256-GCM-SHA384
Session-ID: EED06A09B8971ADD25F352BF55298096581A490020C88BB457AB9864B9844778
Session-ID-ctx:
Master-Key: 71C686180017B4DB5D681CCFC2C8741A7A70F7364572811AE548556A1DCAC078ABAF34B9F53885C6177C7024991B98FF
PSK identity: None
PSK identity hint: None
SRP username: None
TLS session ticket lifetime hint: 300 (seconds)
TLS session ticket:
0000 - 8b 7f 76 5a c3 4a 1f 40-43 8e 00 e7 ad 35 ae 24 ..vZ.J.#C....5.$
0010 - 5c 63 0b 0c 91 86 d0 74-ef 39 94 8a 07 fa 96 51 \c.....t.9.....Q
0020 - 58 cd 61 99 7d ae 47 87-7b 36 c1 22 89 fa 8e ca X.a.}.G.{6."....
0030 - 52 c2 04 6e 7a 9f 2d 3e-42 25 fc 1f 87 11 5f 02 R..nz.->B%...._.
0040 - 37 b3 26 d4 1f 10 97 a3-29 e8 d1 37 cd 9a a3 8e 7.&.....)..7....
0050 - 61 52 15 63 89 99 8e a8-95 58 a8 e0 12 03 c4 15 aR.c.....X......
0060 - 95 bf 1e b7 48 dc 4e fb-c4 8c 1a 17 eb 19 88 ca ....H.N.........
0070 - eb 16 b0 17 83 97 04 0d-79 ca d9 7d 80 5b 96 8d ........y..}.[..
0080 - d3 bf 6f 4f 55 6d 2f ce-0b b9 24 a9 a2 d0 5b 28 ..oOUm/...$...[(
0090 - 06 10 1d 72 52 a3 ef f1-5c e3 2a 35 83 93 a1 91 ...rR...\.*5....
00a0 - cb 94 6c 4f 3e f7 2e 8d-87 76 a5 46 29 6f 0e 5f ..lO>....v.F)o._
Start Time: 1643011123
Timeout : 7200 (sec)
Verify return code: 0 (ok)
Extended master secret: yes
---
But it fail to connect on Chrome browser or on curl, redirecting to its listening port each(30080 -> 80, 30443 -> 443):
# for convenience ignore root CA now, the problem is not in there.
❯ curl -k https://192.168.64.2:30443
<html>
<head><title>301 Moved Permanently</title></head>
<body>
<center><h1>301 Moved Permanently</h1></center>
<hr><center>nginx/1.21.5</center>
</body>
</html>
❯ curl -kL https://192.168.64.2:30443
curl: (7) Failed to connect to 192.168.64.2 port 443: Connection refused
❯ curl http://192.168.64.2:30080
<html>
<head><title>301 Moved Permanently</title></head>
<body>
<center><h1>301 Moved Permanently</h1></center>
<hr><center>nginx/1.21.5</center>
</body>
</html>
❯ curl -L http://192.168.64.2:30080
curl: (7) Failed to connect to 192.168.64.2 port 80: Connection refused
❯ kubectl logs nginx-cert-76f7f8748f-q2nvl -n cert
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: can not modify /etc/nginx/conf.d/default.conf (read-only file system?)
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
2022/01/24 07:33:25 [notice] 1#1: using the "epoll" event method
2022/01/24 07:33:25 [notice] 1#1: nginx/1.21.5
2022/01/24 07:33:25 [notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)
2022/01/24 07:33:25 [notice] 1#1: OS: Linux 4.19.202
2022/01/24 07:33:25 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
2022/01/24 07:33:25 [notice] 1#1: start worker processes
2022/01/24 07:33:25 [notice] 1#1: start worker process 24
2022/01/24 07:33:25 [notice] 1#1: start worker process 25
172.17.0.1 - - [24/Jan/2022:07:44:36 +0000] "\x16\x03\x01\x01$\x01\x00\x01 \x03\x03rM&\xF2\xDD\xA3\x04(\xB0\xB2\xBF\x1CTS`\xDC\x90\x86\xF1\xEC\xBD9\x9Cz1c4\x0B\x8F\x13\xC2" 400 157 "-" "-"
172.17.0.1 - - [24/Jan/2022:07:44:48 +0000] "\x16\x03\x01\x01$\x01\x00\x01 \x03\x03'Y\xECP\x15\xD1\xE6\x1C\xC4\xB1v\xC1\x97\xEE\x04\xEBu\xDE\xF9\x04\x95\xC2V\x14\xB5\x7F\x91\x86V\x8F\x05\x83 \xBFtL\xDB\xF6\xC2\xD8\xD4\x1E]\xAE4\xCA\x03xw\x92D&\x1E\x8D\x97c\xB3,\xFD\xCD\xF47\xC4:\xF8\x00>\x13\x02\x13\x03\x13\x01\xC0,\xC00\x00\x9F\xCC\xA9\xCC\xA8\xCC\xAA\xC0+\xC0/\x00\x9E\xC0$\xC0(\x00k\xC0#\xC0'\x00g\xC0" 400 157 "-" "-"
172.17.0.1 - - [24/Jan/2022:07:45:05 +0000] "\x16\x03\x01\x01$\x01\x00\x01 \x03\x03;J\xA7\xD0\xC2\xC3\x1A\xF9LK\xC7\xA8l\xBD>*\x80A$\xA4\xFCw\x19\xE7(\xFAGc\xF6]\xF3I \xFF\x83\x84I\xC2\x8D\xD5}\xEA\x95\x8F\xDB\x8Cfq\xC6\xBA\xCF\xDDyn\xC6v\xBA\xCC\xDC\xCC\xCC/\xAF\xBC\xB2\x00>\x13\x02\x13\x03\x13\x01\xC0,\xC00\x00\x9F\xCC\xA9\xCC\xA8\xCC\xAA\xC0+\xC0/\x00\x9E\xC0$\xC0(\x00k\xC0#\xC0'\x00g\xC0" 400 157 "-" "-"
172.17.0.1 - - [24/Jan/2022:07:49:08 +0000] "GET / HTTP/1.1" 301 169 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
172.17.0.1 - - [24/Jan/2022:07:49:08 +0000] "GET / HTTP/1.1" 301 169 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
172.17.0.1 - - [24/Jan/2022:08:00:24 +0000] "GET / HTTP/1.1" 400 255 "-" "curl/7.64.1"
172.17.0.1 - - [24/Jan/2022:08:01:46 +0000] "GET / HTTP/1.1" 301 169 "-" "curl/7.64.1"
172.17.0.1 - - [24/Jan/2022:08:01:50 +0000] "GET / HTTP/1.1" 301 169 "-" "curl/7.64.1"
172.17.0.1 - - [24/Jan/2022:08:03:04 +0000] "GET / HTTP/1.1" 301 169 "-" "curl/7.64.1"
172.17.0.1 - - [24/Jan/2022:08:03:07 +0000] "GET / HTTP/1.1" 301 169 "-" "curl/7.64.1"
Actually, at first, the pod respond with the requested ports, 30080 and 30443, but it respond with 80 and 443 now. I have no ideas when it changed and i did change.
I have changed server_name on nginx config from localhost to 192.168.64.2 but it doesn't matter.
I completely recreated your configuration for minikube on Linux. Your Kubernetes configuration is fine. And I got the same response - 301 Moved Permanently.
After that, I changed these lines in the default.conf file:
location / {
try_files $uri $uri/ =404;
}
And everything is working for me now (nginx web page from the pod is reachable using curl and browser).
I'm newbie in envoy.
I have been struggling during a week with error below. So my downstream(server which requests for some data/update) receives response:
Status code: 503
Headers:
...
Server:"envoy"
X-Envoy-Response-Code-Details:"upstream_reset_before_response_started{connection_failure}"
X-Envoy-Response-Flags: "UF,URX"
Body: upstream connect error or disconnect/reset before headers. reset reason: connection failure
On the other side, my upstream gets disconnection(context cancelled).
And upstream service doesn't return 503 codes at all.
All network is going by http1.
My envoy.yaml:
admin:
access_log_path: /tmp/admin_access.log
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"#type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: http1
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: [ "*" ]
response_headers_to_add: # added for debugging
- header:
key: x-envoy-response-code-details
value: "%RESPONSE_CODE_DETAILS%"
- header:
key: x-envoy-response-flags
value: "%RESPONSE_FLAGS%"
routes:
- match: # consistent routing
safe_regex:
google_re2: { }
regex: SOME_STRANGE_REGEX_FOR_CONSISTENT_ROUTING
route:
cluster: consistent_cluster
hash_policy:
header:
header_name: ":path"
regex_rewrite:
pattern:
google_re2: { }
regex: SOME_STRANGE_REGEX_FOR_CONSISTENT_ROUTING
substitution: "\\1"
retry_policy: # attempt to avoid 503 errors by retries
retry_on: "connect-failure,refused-stream,unavailable,cancelled,resource-exhausted,retriable-status-codes"
retriable_status_codes: [ 503 ]
num_retries: 3
retriable_request_headers:
- name: ":method"
exact_match: "GET"
- match: { prefix: "/" } # default routing (all routes except consistent)
route:
cluster: default_cluster
retry_policy: # attempt to avoid 503 errors by retries
retry_on: "connect-failure,refused-stream,unavailable,cancelled,resource-exhausted,retriable-status-codes"
retriable_status_codes: [ 503 ]
retry_host_predicate:
- name: envoy.retry_host_predicates.previous_hosts
host_selection_retry_max_attempts: 3
http_filters:
- name: envoy.filters.http.router
clusters:
- name: consistent_cluster
connect_timeout: 0.05s
type: STRICT_DNS
dns_refresh_rate: 1s
dns_lookup_family: V4_ONLY
lb_policy: MAGLEV
health_checks:
- timeout: 1s
interval: 1s
unhealthy_threshold: 1
healthy_threshold: 1
http_health_check:
path: "/health"
load_assignment:
cluster_name: consistent_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: consistent-host
port_value: 80
- name: default_cluster
connect_timeout: 0.05s
type: STRICT_DNS
dns_refresh_rate: 1s
dns_lookup_family: V4_ONLY
lb_policy: ROUND_ROBIN
health_checks:
- timeout: 1s
interval: 1s
unhealthy_threshold: 1
healthy_threshold: 1
http_health_check:
path: "/health"
outlier_detection: # attempt to avoid 503 errors by ejecting unhealth pods
consecutive_gateway_failure: 1
load_assignment:
cluster_name: default_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: default-host
port_value: 80
I also tried to add logs:
access_log:
- name: accesslog
typed_config:
"#type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: /tmp/http_access.log
log_format:
text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %CONNECTION_TERMINATION_DETAILS% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"
filter:
status_code_filter:
comparison:
op: GE
value:
default_value: 500
runtime_key: access_log.access_error.status
It gave me nothing, because %CONNECTION_TERMINATION_DETAILS% is always empty("-") and response flags I have seen already from headers in downstream responses.
I increased connect_timeout twice (0.01s -> 0.02s -> 0.05s). It didn't help at all. And other services(by direct routing) work okay with connect timeout 10ms.
BTW everything works nice after redeploy during approximately 20 minutes for sure.
Hope to hear your ideas what it can be and where i should dig into)
P.S: I also receive health check errors sometimes(in logs), but i have no idea why. And everything without envoy worked well(no errors, no timeouts): health checking, direct requests, etc.
I experienced a similar problem when starting envoy as a docker container. In the end, the reason was a missing --network host option in the docker run command which lead to the clusters not being visible from within envoy's docker container. Maybe this helps you, too?
I am trying to connect to thingsboard server using thingsboard IoT gateway. I have followed all steps given in the below link : https://thingsboard.io/docs/iot-gateway/install/source-installation/
When running tb-gateway with command : python3 ./thingsboard_gateway/tb_gateway.py
I am getting below error message :
INFO - [mqtt_connector.py] - mqtt_connector - 157 - Number of rejected mapping handlers: 0"
INFO - [mqtt_connector.py] - mqtt_connector - 153 - Number of accepted serverSideRpc handlers: 2"
INFO - [mqtt_connector.py] - mqtt_connector - 157 - Number of rejected serverSideRpc handlers: 0"
INFO - [mqtt_connector.py] - mqtt_connector - 153 - Number of accepted connectRequests handlers: 2"
INFO - [mqtt_connector.py] - mqtt_connector - 157 - Number of rejected connectRequests handlers: 0"
INFO - [mqtt_connector.py] - mqtt_connector - 153 - Number of accepted disconnectRequests handlers: 2"
INFO - [mqtt_connector.py] - mqtt_connector - 157 - Number of rejected disconnectRequests handlers: 0"
ERROR - [mqtt_connector.py] - mqtt_connector - 130 - 'attributeRequests' section missing from configuration"
INFO - [mqtt_connector.py] - mqtt_connector - 153 - Number of accepted attributeUpdates handlers: 1"
INFO - [mqtt_connector.py] - mqtt_connector - 157 - Number of rejected attributeUpdates handlers: 0"
INFO - [tb_gateway_service.py] - tb_gateway_service - 135 - Gateway started."
ERROR - [tb_client.py] - tb_client - 132 - [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond"
Traceback (most recent call last):
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python39\lib\site-packages\thingsboard_gateway-2.5.4-py3.9.egg\thingsboard_gateway\gateway\tb_client.py", line 127, in run
self.client.connect(keepalive=keep_alive,
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python39\lib\site-packages\thingsboard_gateway-2.5.4-py3.9.egg\thingsboard_gateway\tb_client\tb_device_mqtt.py", line 167, in connect
self._client.connect(self.__host, self.__port, keepalive=keepalive)
I have done below changes in the respective files :
mqtt.json :
"broker": {
"name":"Default Local Broker",
"host":"IP Address",
"port":1883,
"clientId": "ThingsBoard_gateway",
"security": {
"type": "basic",
"username": "**********",
"password": "***********"
}
}
tb_gateway.yaml :
host: "IP Address"
port: 1883
remoteShell: false
remoteConfiguration: false
security:
accessToken: ********************
qos: 1
storage:
type: memory
read_records_count: 100
max_records_count: 100000
connectors:
-
name: MQTT Broker Connector
type: mqtt
configuration: mqtt.json
Appreciate your help. Thanks in advance.
Do you have any server ? I gonna explain my method. for example I have a TCP server. But I can not directly connect with my TCP server as a client to thingsboard. But everyone can connect to thingsboard as a mqtt client. Just one role, you need to know your device access token in thingsboard. if you know access token you can connect to thingsboard as a client by using mqtt://demo.thingsboard.io, { username: "ACCESS_TOKEN" } url and v1/devices/me/telemetry topic. example js code that running with node.js. var client = mqtt.connect('mqtt://demo.thingsboard.io' + { username: "AccessToken" }); and to publish client.publish('v1/devices/me/telemetry', JSON.stringify(data));
nginx.conf - http block:
map $status $loggable {
~^[23] 0;
default 1;
}
Can somebody tell me why this is not working for me?
I always get default 1, even when the status code is 200 or 301?
For example, the following map works fine:
map $remote_addr $islocal {
"127.0.0.1" 0;
"192.168.178.100" 0;
default 1;
}
The log file shows me that $status gives correct status code.
nginx.conf - http block:
log_format main '$loggable $status - [$time_local] - $remote_addr - "$request"';
webpage.conf - server block:
access_log /share/NGinX/var/log/access.log main if=$loggable;
Log file:
1 200 - [20/Jan/2019:12:49:38 +0100] - ...
1 301 - [20/Jan/2019:13:04:43 +0100] - ...
1 500 - [20/Jan/2019:13:11:44 +0100] - ...
1 301 - [20/Jan/2019:13:48:05 +0100] - ...
1 500 - [20/Jan/2019:13:48:06 +0100] - ...
1 200 - [20/Jan/2019:13:59:55 +0100] - ...
1 200 - [20/Jan/2019:13:59:58 +0100] - ...
1 404 - [20/Jan/2019:14:28:03 +0100] - ...
I'm coding on a simple HTTP crawler but I have an issue running the code at the bottom. I'm requesting 50 URLs and get the content of 20+ back. I've generated few files with 150kB size each to test the crawler. So I think the 20+ responses are limited by the bandwidth? BUT: how to tell the Erlang snippet not to quit until the last file is not fetched? The test data server is online, so plz try the code out and any hints are welcome :)
-module(crawler).
-define(BASE_URL, "http://46.4.117.69/").
-export([start/0, send_reqs/0, do_send_req/1]).
start() ->
ibrowse:start(),
proc_lib:spawn(?MODULE, send_reqs, []).
to_url(Id) ->
?BASE_URL ++ integer_to_list(Id).
fetch_ids() ->
lists:seq(1, 50).
send_reqs() ->
spawn_workers(fetch_ids()).
spawn_workers(Ids) ->
lists:foreach(fun do_spawn/1, Ids).
do_spawn(Id) ->
proc_lib:spawn_link(?MODULE, do_send_req, [Id]).
do_send_req(Id) ->
io:format("Requesting ID ~p ... ~n", [Id]),
Result = (catch ibrowse:send_req(to_url(Id), [], get, [], [], 10000)),
case Result of
{ok, Status, _H, B} ->
io:format("OK -- ID: ~2..0w -- Status: ~p -- Content length: ~p~n", [Id, Status, length(B)]);
Err ->
io:format("ERROR -- ID: ~p -- Error: ~p~n", [Id, Err])
end.
That's the output:
Requesting ID 1 ...
Requesting ID 2 ...
Requesting ID 3 ...
Requesting ID 4 ...
Requesting ID 5 ...
Requesting ID 6 ...
Requesting ID 7 ...
Requesting ID 8 ...
Requesting ID 9 ...
Requesting ID 10 ...
Requesting ID 11 ...
Requesting ID 12 ...
Requesting ID 13 ...
Requesting ID 14 ...
Requesting ID 15 ...
Requesting ID 16 ...
Requesting ID 17 ...
Requesting ID 18 ...
Requesting ID 19 ...
Requesting ID 20 ...
Requesting ID 21 ...
Requesting ID 22 ...
Requesting ID 23 ...
Requesting ID 24 ...
Requesting ID 25 ...
Requesting ID 26 ...
Requesting ID 27 ...
Requesting ID 28 ...
Requesting ID 29 ...
Requesting ID 30 ...
Requesting ID 31 ...
Requesting ID 32 ...
Requesting ID 33 ...
Requesting ID 34 ...
Requesting ID 35 ...
Requesting ID 36 ...
Requesting ID 37 ...
Requesting ID 38 ...
Requesting ID 39 ...
Requesting ID 40 ...
Requesting ID 41 ...
Requesting ID 42 ...
Requesting ID 43 ...
Requesting ID 44 ...
Requesting ID 45 ...
Requesting ID 46 ...
Requesting ID 47 ...
Requesting ID 48 ...
Requesting ID 49 ...
Requesting ID 50 ...
OK -- ID: 49 -- Status: "200" -- Content length: 150000
OK -- ID: 47 -- Status: "200" -- Content length: 150000
OK -- ID: 50 -- Status: "200" -- Content length: 150000
OK -- ID: 17 -- Status: "200" -- Content length: 150000
OK -- ID: 48 -- Status: "200" -- Content length: 150000
OK -- ID: 45 -- Status: "200" -- Content length: 150000
OK -- ID: 46 -- Status: "200" -- Content length: 150000
OK -- ID: 10 -- Status: "200" -- Content length: 150000
OK -- ID: 09 -- Status: "200" -- Content length: 150000
OK -- ID: 19 -- Status: "200" -- Content length: 150000
OK -- ID: 13 -- Status: "200" -- Content length: 150000
OK -- ID: 21 -- Status: "200" -- Content length: 150000
OK -- ID: 16 -- Status: "200" -- Content length: 150000
OK -- ID: 27 -- Status: "200" -- Content length: 150000
OK -- ID: 03 -- Status: "200" -- Content length: 150000
OK -- ID: 23 -- Status: "200" -- Content length: 150000
OK -- ID: 29 -- Status: "200" -- Content length: 150000
OK -- ID: 14 -- Status: "200" -- Content length: 150000
OK -- ID: 18 -- Status: "200" -- Content length: 150000
OK -- ID: 01 -- Status: "200" -- Content length: 150000
OK -- ID: 30 -- Status: "200" -- Content length: 150000
OK -- ID: 40 -- Status: "200" -- Content length: 150000
OK -- ID: 05 -- Status: "200" -- Content length: 150000
Update:
thanks stemm for the hint with the wait_workers. I've combined your and mine code but same behaviour :(
-module(crawler).
-define(BASE_URL, "http://46.4.117.69/").
-export([start/0, send_reqs/0, do_send_req/2]).
start() ->
ibrowse:start(),
proc_lib:spawn(?MODULE, send_reqs, []).
to_url(Id) ->
?BASE_URL ++ integer_to_list(Id).
fetch_ids() ->
lists:seq(1, 50).
send_reqs() ->
spawn_workers(fetch_ids()).
spawn_workers(Ids) ->
%% collect reference to each worker
Refs = [ do_spawn(Id) || Id <- Ids ],
%% wait for response from each worker
wait_workers(Refs).
wait_workers(Refs) ->
lists:foreach(fun receive_by_ref/1, Refs).
receive_by_ref(Ref) ->
%% receive message only from worker with specific reference
receive
{Ref, done} ->
done
end.
do_spawn(Id) ->
Ref = make_ref(),
proc_lib:spawn_link(?MODULE, do_send_req, [Id, {self(), Ref}]),
Ref.
do_send_req(Id, {Pid, Ref}) ->
io:format("Requesting ID ~p ... ~n", [Id]),
Result = (catch ibrowse:send_req(to_url(Id), [], get, [], [], 10000)),
case Result of
{ok, Status, _H, B} ->
io:format("OK -- ID: ~2..0w -- Status: ~p -- Content length: ~p~n", [Id, Status, length(B)]),
%% send message that work is done
Pid ! {Ref, done};
Err ->
io:format("ERROR -- ID: ~p -- Error: ~p~n", [Id, Err]),
%% repeat request if there was error while fetching a page,
do_send_req(Id, {Pid, Ref})
%% or - if you don't want to repeat request, put there:
%% Pid ! {Ref, done}
end.
Running the crawler forks fine for a handful of files, but then the code even doesnt fetch the entire files (file size each 150000 bytes) - he crawler fetches some files partially, see the following web server log :(
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /10 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /1 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /3 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /8 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /39 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /7 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /6 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /2 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /5 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /50 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /9 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /44 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /38 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /47 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /49 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /43 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /37 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /46 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /48 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:00 +0200] "GET /36 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /42 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /41 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /45 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /17 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /35 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /16 HTTP/1.1" 200 150000 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /15 HTTP/1.1" 200 17020 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /21 HTTP/1.1" 200 120360 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /40 HTTP/1.1" 200 117600 "-" "-"
82.114.62.14 - - [13/Sep/2012:15:17:01 +0200] "GET /34 HTTP/1.1" 200 60660 "-" "-"
Any hints are welcome. I have no clue what's going wrong there :(
So, if I've understand you correctly - you don't want to return control from function spawn_workers until each of worker is not stopped (and fetched a page)? If that - you may change your code in such way:
spawn_workers(Ids) ->
%% collect reference to each worker
Refs = [ do_spawn(Id) || Id <- Ids ],
%% wait for response from each worker
wait_workers(Refs).
wait_workers(Refs) ->
lists:foreach(fun receive_by_ref/1, Refs).
receive_by_ref(Ref) ->
%% receive message only from worker with specific reference
receive
{Ref, done} ->
done
end.
do_spawn(Id) ->
Ref = make_ref(),
proc_lib:spawn_link(?MODULE, do_send_req, [Id, {self(), Ref}]),
Ref.
do_send_req(Id, {Pid, Ref}) ->
io:format("Requesting ID ~p ... ~n", [Id]),
Result = (catch ibrowse:send_req(to_url(Id), [], get, [], [], 10000)),
case Result of
{ok, Status, _H, B} ->
io:format("OK -- ID: ~2..0w -- Status: ~p -- Content length: ~p~n", [Id, Status, length(B)]),
%% send message that work is done
Pid ! {Ref, done};
Err ->
io:format("ERROR -- ID: ~p -- Error: ~p~n", [Id, Err]),
%% repeat request if there was error while fetching a page,
do_send_req(Id, {Pid, Ref})
%% or - if you don't want to repeat request, put there:
%% Pid ! {Ref, done}
end.
Edit:
I've noticed that your entry point (function start) returns control without waiting for all workers are end their tasks (because of you calling there spawn). If you want to wait there too - just do the similar trick:
start() ->
ibrowse:start(),
Ref = make_ref(),
proc_lib:spawn(?MODULE, send_reqs, [self(), Ref]),
receive_by_ref(Ref).
send_reqs(Pid, Ref) ->
spawn_workers(fetch_ids()),
Pid ! {Ref, done}.
You can use a combination of supervisors and the queue module: spawn N fetching children, each child fetches 1 item of the queue and processes it. When done notify parent process to continue with next item in queue. That way you can put a cap on number of concurrent requests.
If you spawn 500 reqs at the time ibrowse might be confused. Do you get any errors in the console?
See ibrowse:get_config_value/1 and ibrowse:set_config_value/2