Ejabberd APIs not working with python requests - python-requests

I'm using ejabberd from the docker container. I followed this link to install ejabberd docker container.
I tried the Administration APIs in the docs. For example, I have tried to register users with API in postman. It worked and created the user in the server. But when I tried to send a post request with the python requests library, I get 401 error.
My ejabberd.yml file:
###
### ejabberd configuration file
###
### The parameters used in this configuration file are explained at
###
### https://docs.ejabberd.im/admin/configuration
###
### The configuration file is written in YAML.
### *******************************************************
### ******* !!! WARNING !!! *******
### ******* YAML IS INDENTATION SENSITIVE *******
### ******* MAKE SURE YOU INDENT SECTIONS CORRECTLY *******
### *******************************************************
### Refer to http://en.wikipedia.org/wiki/YAML for the brief description.
###
hosts:
- localhost
loglevel: 4
log_rotate_size: 10485760
log_rotate_date: ""
log_rotate_count: 1
log_rate_limit: 100
certfiles:
- /home/ejabberd/conf/server.pem
ca_file: "/home/ejabberd/conf/cacert.pem"
## When using let's encrypt to generate certificates
##certfiles:
## - /etc/letsencrypt/live/localhost/fullchain.pem
## - /etc/letsencrypt/live/localhost/privkey.pem
##
##ca_file: "/etc/letsencrypt/live/localhost/fullchain.pem"
listen:
-
port: 5222
ip: "::"
module: ejabberd_c2s
max_stanza_size: 262144
shaper: c2s_shaper
access: c2s
starttls_required: true
-
port: 5269
ip: "::"
module: ejabberd_s2s_in
max_stanza_size: 524288
-
port: 5443
ip: "::"
module: ejabberd_http
tls: true
request_handlers:
"/admin": ejabberd_web_admin
"/api": mod_http_api
"/bosh": mod_bosh
"/captcha": ejabberd_captcha
"/upload": mod_http_upload
"/ws": ejabberd_http_ws
"/oauth": ejabberd_oauth
-
port: 5280
ip: "::"
module: ejabberd_http
request_handlers:
"/admin": ejabberd_web_admin
-
port: 5281
module: ejabberd_http
ip: 127.0.0.1
request_handlers:
/api: mod_http_api
-
port: 1883
ip: "::"
module: mod_mqtt
backlog: 1000
##
## https://docs.ejabberd.im/admin/configuration/#stun-and-turn
## ejabberd_stun: Handles STUN Binding requests
##
##-
## port: 3478
## ip: "0.0.0.0"
## transport: udp
## module: ejabberd_stun
## use_turn: true
## turn_ip: "{{ IP }}"
## auth_type: user
## auth_realm: "example.com"
##-
## port: 3478
## ip: "0.0.0.0"
## module: ejabberd_stun
## use_turn: true
## turn_ip: "{{ IP }}"
## auth_type: user
## auth_realm: "example.com"
##-
## port: 5349
## ip: "0.0.0.0"
## module: ejabberd_stun
## certfile: "/home/ejabberd/conf/server.pem"
## tls: true
## use_turn: true
## turn_ip: "{{ IP }}"
## auth_type: user
## auth_realm: "example.com"
##
## https://docs.ejabberd.im/admin/configuration/#sip
## To handle SIP (VOIP) requests:
##
##-
## port: 5060
## ip: "0.0.0.0"
## transport: udp
## module: ejabberd_sip
##-
## port: 5060
## ip: "0.0.0.0"
## module: ejabberd_sip
##-
## port: 5061
## ip: "0.0.0.0"
## module: ejabberd_sip
## tls: true
s2s_use_starttls: optional
acl:
local:
user_regexp: ""
loopback:
ip:
- 127.0.0.0/8
- ::1/128
- ::FFFF:127.0.0.1/128
admin:
user:
- "admin#localhost"
apicommands:
user:
- "admin#localhost"
access_rules:
local:
allow: local
c2s:
deny: blocked
allow: all
announce:
allow: admin
configure:
allow: admin
muc_create:
allow: local
pubsub_createnode:
allow: local
trusted_network:
allow: loopback
api_permissions:
"API used from localhost allows all calls":
who:
ip: 127.0.0.1/8
what:
- "*"
- "!stop"
- "!start"
"console commands":
from:
- ejabberd_ctl
who: all
what: "*"
"admin access":
who:
access:
allow:
acl: loopback
acl: admin
oauth:
scope: "ejabberd:admin"
access:
allow:
acl: loopback
acl: admin
what:
- "*"
- "!stop"
- "!start"
"public commands":
who:
ip: 127.0.0.1/8
what:
- status
- connected_users_number
"some playing":
from:
- ejabberd_ctl
- mod_http_api
who:
acl: apicommands
what: "*"
shaper:
normal: 1000
fast: 50000
shaper_rules:
max_user_sessions: 10
max_user_offline_messages:
5000: admin
100: all
c2s_shaper:
none: admin
normal: all
s2s_shaper: fast
max_fsm_queue: 10000
acme:
contact: "mailto:example-admin#example.com"
ca_url: "https://acme-staging-v02.api.letsencrypt.org/directory"
modules:
mod_adhoc: {}
mod_admin_extra: {}
mod_announce:
access: announce
mod_avatar: {}
mod_blocking: {}
mod_bosh: {}
mod_caps: {}
mod_carboncopy: {}
mod_client_state: {}
mod_configure: {}
mod_disco: {}
mod_fail2ban: {}
mod_http_api: {}
mod_http_upload:
put_url: https://#HOST#:5443/upload
mod_last: {}
mod_mam:
## Mnesia is limited to 2GB, better to use an SQL backend
## For small servers SQLite is a good fit and is very easy
## to configure. Uncomment this when you have SQL configured:
## db_type: sql
assume_mam_usage: true
default: never
mod_mqtt: {}
mod_muc:
access:
- allow
access_admin:
- allow: admin
access_create: muc_create
access_persistent: muc_create
access_mam:
- allow
default_room_options:
allow_subscription: true # enable MucSub
mam: false
mod_muc_admin: {}
mod_offline:
access_max_user_messages: max_user_offline_messages
mod_ping: {}
mod_privacy: {}
mod_private: {}
mod_proxy65:
access: local
max_connections: 5
mod_pubsub:
access_createnode: pubsub_createnode
plugins:
- flat
- pep
force_node_config:
## Avoid buggy clients to make their bookmarks public
storage:bookmarks:
access_model: whitelist
mod_push: {}
mod_push_keepalive: {}
mod_register:
## Only accept registration requests from the "trusted"
## network (see access_rules section above).
## Think twice before enabling registration from any
## address. See the Jabber SPAM Manifesto for details:
## https://github.com/ge0rg/jabber-spam-fighting-manifesto
ip_access: trusted_network
mod_roster:
versioning: true
mod_sip: {}
mod_s2s_dialback: {}
mod_shared_roster: {}
mod_stream_mgmt:
resend_on_timeout: if_offline
mod_vcard: {}
mod_vcard_xupdate: {}
mod_version:
show_os: false
### Local Variables:
### mode: yaml
### End:
### vim: set filetype=yaml tabstop=8
I tried to create user with POSTMAN and its working.
But, when I tried to create it with requests library, its not working.
api.py
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import requests
from requests.auth import HTTPBasicAuth
url = "http://localhost:5443/api/register"
data = {
"user": "testuser2",
"host": "localhost",
"password": "password"
}
# res = requests.post(url, json=data, auth=("admin", "admin_password"))
res = requests.post(url, json=data, auth=HTTPBasicAuth("admin", "root"))
print(res)
The response when I run the above script:
<Response [401]>
I have admin user in the server with the same credentials that I passed to the auth attribute of the post method.
I'm new to XMPP and I'm not sure what I'm missing here.
I'm using the latest version of ejabberd docker container. I have disabled SSL while using POSTMAN.

The full process, with all the steps that you must do, or ensure are correctly done:
Register the account "admin#localhost":
ejabberdctl register admin localhost somepass
Add the "admin#localhost" account to the "admin" ACL:
acl:
admin:
user:
- "admin#localhost"
Allow the "admin" ACL to perform "register" API calls:
api_permissions:
"admin access":
who:
access:
allow:
- acl: admin
what:
- "register"
In your query, set the account JID and password, not only the account username:
res = requests.post(url, json=data, auth=HTTPBasicAuth("admin#localhost", "somepass"))
Now run your program:
$ python3 api.py
<Response [200]>
Check the account was registered:
$ ejabberdctl registered_users localhost
admin
testuser2
If you to register an account that already exists, it will response 409:
$ python3 api.py
<Response [409]>

Related

artifactory docker push error unknown: Method Not Allowed

we are using artifactory pro license and installed artifactory through helm on kubernetes.
when we create a docker local repo(The Repository Path Method) and push docker image,
we get 405 method not allowed errror. (docker login/ pull is working normally.)
########## error msg
# docker push art2.bee0dev.lge.com/docker-local/hello-world
e07ee1baac5f: Pushing [==================================================>] 14.85kB
unknown: Method Not Allowed
##########
we are using haproxy load balancer that is used for TLS, in front of Nginx Ingress Controller.
(nginx ingress controller's http nodeport is 31071)
please help us how can we solve the problem.
The artifactory and haproxy settings are as follows.
########## value.yaml
global:
joinKeySecretName: "artbee-stg-joinkey-secret"
masterKeySecretName: "artbee-stg-masterkey-secret"
storageClass: "sa-stg-netapp8300-bee-blk-nonretain"
ingress:
enabled: true
defaultBackend:
enabled: false
hosts: ["art2.bee0dev.lge.com"]
routerPath: /
artifactoryPath: /artifactory/
className: ""
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_pass_header Server;
proxy_set_header X-JFrog-Override-Base-Url https://art2.bee0dev.lge.com;
labels: {}
tls: []
additionalRules: []
## Artifactory license.
artifactory:
name: artifactory
replicaCount: 1
image:
registry: releases-docker.jfrog.io
repository: jfrog/artifactory-pro
# tag:
pullPolicy: IfNotPresent
labels: {}
updateStrategy:
type: RollingUpdate
migration:
enabled: false
customInitContainersBegin: |
- name: "init-mount-permission-setup"
image: "{{ .Values.initContainerImage }}"
imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
securityContext:
runAsUser: 0
runAsGroup: 0
allowPrivilegeEscalation: false
capabilities:
drop:
- NET_RAW
command:
- 'bash'
- '-c'
- if [ $(ls -la /var/opt/jfrog | grep artifactory | awk -F' ' '{print $3$4}') == 'rootroot' ]; then
echo "mount permission=> root:root";
echo "change mount permission to 1030:1030 " {{ .Values.artifactory.persistence.mountPath }};
chown -R 1030:1030 {{ .Values.artifactory.persistence.mountPath }};
else
echo "already set. No change required.";
ls -la {{ .Values.artifactory.persistence.mountPath }};
fi
volumeMounts:
- mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
name: artifactory-volume
database:
maxOpenConnections: 80
tomcat:
maintenanceConnector:
port: 8091
connector:
maxThreads: 200
sendReasonPhrase: false
extraConfig: 'acceptCount="100"'
customPersistentVolumeClaim: {}
license:
## licenseKey is the license key in plain text. Use either this or the license.secret setting
licenseKey: "???"
secret:
dataKey:
resources:
requests:
memory: "2Gi"
cpu: "1"
limits:
memory: "20Gi"
cpu: "8"
javaOpts:
xms: "1g"
xmx: "12g"
admin:
ip: "127.0.0.1"
username: "admin"
password: "!swiit123"
secret:
dataKey:
service:
name: artifactory
type: ClusterIP
loadBalancerSourceRanges: []
annotations: {}
persistence:
mountPath: "/var/opt/jfrog/artifactory"
enabled: true
accessMode: ReadWriteOnce
size: 100Gi
type: file-system
storageClassName: "sa-stg-netapp8300-bee-blk-nonretain"
nginx:
enabled: false
##########
########## haproxy config
frontend cto-stage-http-frontend
bind 10.185.60.75:80
bind 10.185.60.76:80
bind 10.185.60.201:80
bind 10.185.60.75:443 ssl crt /etc/haproxy/ssl/bee0dev.lge.com.pem ssl-min-ver TLSv1.2 ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256
bind 10.185.60.76:443 ssl crt /etc/haproxy/ssl/bee0dev.lge.com.pem ssl-min-ver TLSv1.2 ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256
bind 10.185.60.201:443 ssl crt /etc/haproxy/ssl/bee0dev.lge.com.pem ssl-min-ver TLSv1.2 ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256
mode http
option forwardfor
option accept-invalid-http-request
acl k8s-cto-stage hdr_end(host) -i -f /etc/haproxy/web-ide/cto-stage
use_backend k8s-cto-stage-http if k8s-cto-stage
backend k8s-cto-stage-http
mode http
redirect scheme https if !{ ssl_fc }
option tcp-check
balance roundrobin
server lgestgbee04v 10.185.60.78:31071 check fall 3 rise 2
##########
The request doesn't seem to be landing at the correct endpoint. Please remove the semi-colon from the docker command and retry again.
docker push art2.bee0dev.lge.com;/docker-local/hello-world
Try executing it like below,
docker push art2.bee0dev.lge.com/docker-local/hello-world

Basic auth is not working for Traefik v2.1

my issue is that I cannot set the basic authentication for my frontend app throught traefik
This is how I have configured my traefik
traefik.yml
global:
checkNewVersion: true
sendAnonymousUsage: false
entryPoints:
https:
address: :443
http:
address: :80
traefik:
address: :8080
tls:
options:
foo:
minVersion: VersionTLS12
cipherSuites:
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
- "TLS_RSA_WITH_AES_256_GCM_SHA384"
providers:
providersThrottleDuration: 2s
docker:
watch: true
endpoint: unix:///var/run/docker.sock
exposedByDefault: false
network: web
api:
insecure: true
dashboard: true
log:
level: INFO
certificatesResolvers:
default:
acme:
storage: /acme.json
httpChallenge:
entryPoint: http
docker-compose.yml
version: '3'
services:
traefik:
image: traefik:v2.0
restart: always
ports:
- "80:80"
- "443:443"
- "8080:8080"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/srv/traefik/traefik.yml:/etc/traefik/traefik.yml"
- "/srv/traefik/acme.json:/acme.json"
networks:
- web
networks:
web:
external: true
And here is where I have my frontend app running as a traefik provider and where I have my basic auth label
version: '3.7'
services:
frontend:
image: git.xxxx.com:7000/dockerregistry/registry/xxxx
restart: "always"
networks:
- web
volumes:
- "/srv/config/api.js:/var/www/htdocs/api.js"
- "/srv/efs/workspace:/var/www/htdocs/stock"
labels:
- traefik.enable=true
- traefik.http.routers.frontend-http.rule=Host(`test.xxxx.com`)
- traefik.http.routers.frontend-http.service=frontend
- traefik.http.routers.frontend-http.entrypoints=http
- traefik.http.routers.frontend.tls=true
- traefik.http.routers.frontend.tls.certresolver=default
- traefik.http.routers.frontend.entrypoints=http
- traefik.http.routers.frontend.rule=Host(`test.xxxx.com`)
- traefik.http.routers.frontend.service=frontend
- traefik.http.middlewares.frontend.basicAuth.users=test:$$2y$$05$$c45HvbP0Sq9EzcfaXiGNsuuWMfPhyoFZVYgiTylpMMLtJY2nP1P6m
- traefik.http.services.frontend.loadbalancer.server.port=8080
networks:
web:
external: true
I cannot get the login prompt, so Im wondering if I missing some container label for this.
Thanks in advance! Joaquin
firstly , the labels should be in quotation marks like this ""
secondly, I think you are missing a label in the frontend app .
when using basic auth it takes two steps and should look like this :
- "traefik.http.routers.frontend.middlewares=frontend-auth"
- "traefik.http.middlewares.frontend-auth.basicauth.users=test:$$2y$$05$$c45HvbP0Sq9EzcfaXiGNsuuWMfPhyoFZVYgiTylpMMLtJY2nP1P6m"
In your Docker Compose file don't add the "middlewares" label for traefik, instead do it using a traefik.yml file passing the providers.file option, where you should define the routers, services, middlewares, etc. In that "providers file" you should set middlewares under http.routes.traefik – This may sound super confuse at the beginning but is not that hard, trust me.
Let's do a YAML case (you can convert it to "TOML" here).
This example assumes you have a Docker Compose file specifically for Traefik – I haven't tried using the same Docker Compose file with any other services in it (like Wordpress, databases or whatever) since I already have a different path for those files.
docker-compose.yml
version: '3.1'
services:
reverse-proxy:
image: traefik:v2.4
[ ... ]
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Map the dynamic conf into the container
- ./traefik/config.yml:/etc/traefik/config.yml:ro
# Map the static conf into the container
- ./traefik/traefik.yml:/etc/traefik/traefik.yml:ro
# Note you don't use "traefik.http.routers.<service>.middlewares etc." here
[ ... ]
In this case I set/get the config files for Traefik in ./traefik (relative to the docker-compose.yml file).
./traefik/config.yml
http:
routers:
traefik:
middlewares: "basicauth"
[ ... ]
middlewares:
basicauth:
basicAuth:
removeHeader: true
users:
- <user>:<password>
# password should be generated using `htpasswd` (md5, sha1 or bcrypt)
[ ... ]
Here you can set the basicauth name as you wish (since that's the middleware name you'll see in the Dashboard), so you could do:
http:
routers:
traefik:
middlewares: "super-dashboard-auth"
[ ... ]
middlewares:
super-dashboard-auth:
basicAuth:
removeHeader: true
users:
- <user>:<password>
# password should be generated using `htpasswd` (md5, sha1 or bcrypt)
[ ... ]
Note that basicAuth must remain as is. Also, here you don't need to use the "double dollar method" to scape it (as in the label approach), so after creating the user password you should enter it exactly like htpasswd created it.
# BAD
user:$$2y$$10$$nRLqyZT.64JI/CD/ym65UGDn8HaY0D6CBTxhe6JXf9u4wi5bEMdh.
# GOOD
user:$2y$10$nRLqyZT.64JI/CD/ym65UGDn8HaY0D6CBTxhe6JXf9u4wi5bEMdh.
Of course you may want to get this data from an .env file and not hardcode those strings, in that case you need to pass the environmental variable from the docker-compose.yml using environment like this:
services:
reverse-proxy:
image: traefik:v2.4
container_name: traefik
[ ... ]
environment:
TRAEFIK_DASHBOARD_USER: "${TRAEFIK_DASHBOARD_USER}"
TRAEFIK_DASHBOARD_PWD: "${TRAEFIK_DASHBOARD_PWD}"
# And any other env. var. you may need
[ ... ]
and use it like this in you traefik/config.yml file:
[ ... ]
middlewares:
super-dashboard-auth:
basicAuth:
removeHeader: true
users:
- "{{env "TRAEFIK_DASHBOARD_USER"}}:{{env "TRAEFIK_DASHBOARD_PWD"}}"
[ ... ]
After that include the previous file in the providers.file.filename
./traefik/traefik.yml
[ ... ]
api:
dashboard: true
insecure: false
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
[ ... ]
file:
filename: /etc/traefik/config.yml
watch: true
[ ... ]
And then simply docker-compose up -d
I configure it this way:
generate password by apache2-utils e.g.
htpasswd -nb admin secure_password
setup traefik.toml
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.web.http.redirections.entryPoint]
to = "websecure"
scheme = "https"
[entryPoints.websecure]
address = ":443"
[api]
dashboard = true
[certificatesResolvers.lets-encrypt.acme]
email = "your_email#your_domain"
storage = "acme.json"
[certificatesResolvers.lets-encrypt.acme.tlsChallenge]
[providers.docker]
watch = true
network = "web"
[providers.file]
filename = "traefik_dynamic.toml"
setup traefik_dynamic.toml
[http.middlewares.simpleAuth.basicAuth]
users = [
"admin:$apr1$ruca84Hq$mbjdMZBAG.KWn7vfN/SNK/"
]
[http.routers.api]
rule = "Host(`monitor.your_domain`)"
entrypoints = ["websecure"]
middlewares = ["simpleAuth"]
service = "api#internal"
[http.routers.api.tls]
certResolver = "lets-encrypt"
setup traefik service
services:
reverse-proxy:
image: traefik:v2.3
restart: always
command:
- --api.insecure=true
- --providers.docker
ports:
- "80:80"
- "443:443"
networks:
- web
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./traefik.toml:/traefik.toml
- ./traefik_dynamic.toml:/traefik_dynamic.toml
- ./acme.json:/acme.json
Regarding this part of the documentation.
If you are using Docker scripts for settings.
Configure as the following.
For example:
labels:
- "traefik.http.middlewares.foo-add-prefix.addprefix.prefix=/foo"
- "traefik.http.routers.router1.middlewares=foo-add-prefix#docker"
I had same issue and I was missing namespace name #docker in the middleware name.

Simple gRPC envoy configuration

I'm trying to setup a envoy proxy as a gRPC fron end, and can't get it to work, so I'm trying to get to as simple a test setup as possible and build from there, but I can't get that to work either. Here's what my test setup looks like:
Python server (slightly modified gRPC example code)
# greeter_server.py
from concurrent import futures
import time
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:8081')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
Python client (slightly modified gRPC example code)
from __future__ import print_function
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('localhost:9911') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
if __name__ == '__main__':
run()
And then my two envoy yaml files:
# envoy-hello-server.yaml
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 8811
filter_chains:
- filters:
- name: envoy.http_connection_manager
typed_config:
"#type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
codec_type: auto
stat_prefix: ingress_http
access_log:
- name: envoy.file_access_log
typed_config:
"#type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
path: "/dev/stdout"
route_config:
name: local_route
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/"
grpc: {}
route:
cluster: hello_grpc_service
http_filters:
- name: envoy.router
typed_config: {}
clusters:
- name: hello_grpc_service
connect_timeout: 0.250s
type: strict_dns
lb_policy: round_robin
http2_protocol_options: {}
load_assignment:
cluster_name: hello_grpc_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: hello_grpc_service
port_value: 8081
admin:
access_log_path: "/tmp/envoy_hello_server.log"
address:
socket_address:
address: 0.0.0.0
port_value: 8881
and
# envoy-hello-client.yaml
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 9911
filter_chains:
- filters:
- name: envoy.http_connection_manager
typed_config:
"#type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
codec_type: auto
add_user_agent: true
access_log:
- name: envoy.file_access_log
typed_config:
"#type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
path: "/dev/stdout"
stat_prefix: egress_http
common_http_protocol_options:
idle_timeout: 0.840s
use_remote_address: true
route_config:
name: local_route
virtual_hosts:
- name: backend
domains:
- grpc
routes:
- match:
prefix: "/"
route:
cluster: backend-proxy
http_filters:
- name: envoy.router
typed_config: {}
clusters:
- name: backend-proxy
type: logical_dns
dns_lookup_family: V4_ONLY
lb_policy: round_robin
connect_timeout: 0.250s
http_protocol_options: {}
load_assignment:
cluster_name: backend-proxy
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: hello_grpc_service
port_value: 8811
admin:
access_log_path: "/tmp/envoy_hello_client.log"
address:
socket_address:
address: 0.0.0.0
port_value: 9991
Now, what I expect this would allow is something like hello_client.py (port 9911) -> envoy (envoy-hello-client.yaml) -> envoy (envoy-hello-server.yaml) -> hello_server.py (port 8081)
Instead, what I get is an error from the python client:
$ python3 greeter_client.py
Traceback (most recent call last):
File "greeter_client.py", line 35, in <module>
run()
File "greeter_client.py", line 30, in run
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
File "/usr/lib/python3/dist-packages/grpc/_channel.py", line 533, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/usr/lib/python3/dist-packages/grpc/_channel.py", line 467, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.UNIMPLEMENTED
details = ""
debug_error_string = "{"created":"#1594770575.642032812","description":"Error received from peer","file":"src/core/lib/surface/call.cc","file_line":1017,"grpc_message":"","grpc_status":12}"
>
And in the envoy client log:
[2020-07-14 16:22:10.407][16935][info][main] [external/envoy/source/server/server.cc:652] starting main dispatch loop
[2020-07-14 16:23:25.441][16935][info][runtime] [external/envoy/source/common/runtime/runtime_impl.cc:524] RTDS has finished initialization
[2020-07-14 16:23:25.441][16935][info][upstream] [external/envoy/source/common/upstream/cluster_manager_impl.cc:182] cm init: all clusters initialized
[2020-07-14 16:23:25.441][16935][info][main] [external/envoy/source/server/server.cc:631] all clusters initialized. initializing init manager
[2020-07-14 16:23:25.441][16935][info][config] [external/envoy/source/server/listener_manager_impl.cc:844] all dependencies initialized. starting workers
[2020-07-14 16:23:25.441][16935][warning][main] [external/envoy/source/server/server.cc:537] there is no configured limit to the number of allowed active connections. Set a limit via the runtime key overload.global_downstream_max_connections
[2020-07-14T23:49:35.641Z] "POST /helloworld.Greeter/SayHello HTTP/2" 200 NR 0 0 0 - "10.0.0.56" "grpc-python/1.16.1 grpc-c/6.0.0 (linux; chttp2; gao)" "aa72310a-3188-46b2-8cbf-9448b074f7ae" "localhost:9911" "-"
And nothing in the server log.
Also, weirdly, this is an almost one second delay between when I run the python client and when the log message shows up in the client envoy.
What am I missing to make these two scripts talk via envoy?
I know I'm bit late, hope this helps someone. Since you are grpc server is running in the same host you could specify hostname to be host.docker.internal (previous docker.for.mac.localhost deprecated from docker v18.03.0)
In your case if you are running in a dockerized environment you could do the following:
Envoy version: 1.13+
clusters:
- name: backend-proxy
type: logical_dns
dns_lookup_family: V4_ONLY
lb_policy: round_robin
connect_timeout: 0.250s
http_protocol_options: {}
load_assignment:
cluster_name: backend-proxy
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: host.docker.internal
port_value: 8811
hello_grpc_service won't be resolved to IP in dockerized environment.
Note: you could enable envoy trace log level for detailed logs

Argo artifacts gives error "http: server gave HTTP response to HTTPS client"

I was setting up Argo in my k8s cluster in Argo namespace.
I also Installed MinIO as an Artifact repository (https://github.com/argoproj/argo-workflows/blob/master/docs/configure-artifact-repository.md).
I am configuring a workflow which tries to access that Non-Default Artifact Repository as:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: artifact-passing-
spec:
entrypoint: artifact-example
templates:
- name: artifact-example
steps:
- - name: generate-artifact
template: whalesay
- - name: consume-artifact
template: print-message
arguments:
artifacts:
# bind message to the hello-art artifact
# generated by the generate-artifact step
- name: message
from: "{{steps.generate-artifact.outputs.artifacts.hello-art}}"
- name: whalesay
container:
image: docker/whalesay:latest
command: [sh, -c]
args: ["cowsay hello world | tee /tmp/hello_world.txt"]
outputs:
artifacts:
# generate hello-art artifact from /tmp/hello_world.txt
# artifacts can be directories as well as files
- name: hello-art
path: /tmp/hello_world.txt
s3:
endpoint: argo-artifacts-minio.argo:9000
bucket: my-bucket
key: /my-output-artifact.tgz
accessKeySecret:
name: argo-artifacts-minio
key: accesskey
secretKeySecret:
name: argo-artifacts-minio
key: secretkey
- name: print-message
inputs:
artifacts:
# unpack the message input artifact
# and put it at /tmp/message
- name: message
path: /tmp/message
s3:
endpoint: argo-artifacts-minio.argo:9000
bucket: my-bucket
accessKeySecret:
name: argo-artifacts-minio
key: accesskey
secretKeySecret:
name: argo-artifacts-minio
key: secretkey
container:
image: alpine:latest
command: [sh, -c]
args: ["cat /tmp/message"]
I created the workflow in argo namespace by:
argo submit --watch artifact-passing-nondefault-new.yaml -n argo
But the workflow fails with an error:
STEP PODNAME DURATION MESSAGE
✖ artifact-passing-z9g64 child 'artifact-passing-z9g64-150231068' failed
└---⚠ generate-artifact artifact-passing-z9g64-150231068 12s failed to save outputs: Get https://argo-artifacts-minio.argo:9000/my-bucket/?location=: http: server gave HTTP response to HTTPS client
Can someone help me to solve this error?
Since the minio setup runs without TLS configured, the workflow should specify that it should connect to an insecure artifact repository.
Including a field insecure: true in the s3 definition section of the workflow solves the issue.
s3:
endpoint: argo-artifacts-minio.argo:9000
insecure: true
bucket: my-bucket
key: /my-output-artifact.tgz
accessKeySecret:
name: argo-artifacts-minio
key: accesskey
secretKeySecret:
name: argo-artifacts-minio
key: secretkey

Salt pillar setup correct but apache-formula keeps generating defalt configuration

im playing around with saltstack and wanted to use the apache-formula from github/saltstack-formulas.
my pillar looks like following:
top.sls
base:
'ubuntu-xenial-salt':
- systems.ubuntu-xenial-salt
systems/ubuntu-xenial-salt.sls
include:
- setups.apache.prod
apache:
sites:
ubuntu-salt-xenial:
enabled: True
template_file: salt://apache/vhosts/standard.tmpl
template_engine: jinja
interface: '*'
port: '80'
exclude_listen_directive: True # Do not add a Listen directive in httpd.conf
ServerName: ubuntu-salt-xenial
ServerAlias: ubuntu-salt-xenial
ServerAdmin: minion#ubuntu-salt-xenial.com
LogLevel: debug
ErrorLog: /var/log/apache2/example.com-error.log
CustomLog: /var/log/apache2/example.com-access.log
DocumentRoot: /var/www/ubuntu-salt-xenial/
Directory:
default:
Options: -Indexes +FollowSymLinks
Require: all granted
AllowOverride: None
setups/apache/prod.sls
include:
- applications.apache
# ``apache`` formula configuration:
apache:
register-site:
# any name as an array index, and you can duplicate this section
UNIQUE_VALUE_HERE:
name: 'PROD'
path: 'salt://path/to/sites-available/conf/file'
state: 'enabled'
# Optional - use managed file as Jinja Template
#template: true
#defaults:
# custom_var: "default value"
modules:
enabled: # List modules to enable
- rewrite
- ssl
disabled: # List modules to disable
- ldap
# KeepAlive: Whether or not to allow persistent connections (more than
# one request per connection). Set to "Off" to deactivate.
keepalive: 'On'
security:
# can be Full | OS | Minimal | Minor | Major | Prod
# where Full conveys the most information, and Prod the least.
ServerTokens: Prod
# ``apache.mod_remoteip`` formula additional configuration:
mod_remoteip:
RemoteIPHeader: X-Forwarded-For
RemoteIPTrustedProxy:
- 10.0.8.0/24
- 127.0.0.1
# ``apache.mod_security`` formula additional configuration:
mod_security:
crs_install: True
# If not set, default distro's configuration is installed as is
manage_config: True
sec_rule_engine: 'On'
sec_request_body_access: 'On'
sec_request_body_limit: '14000000'
sec_request_body_no_files_limit: '114002'
sec_request_body_in_memory_limit: '114002'
sec_request_body_limit_action: 'Reject'
sec_pcre_match_limit: '15000'
sec_pcre_match_limit_recursion: '15000'
sec_debug_log_level: '3'
rules:
enabled:
modsecurity_crs_10_setup.conf:
rule_set: ''
enabled: True
modsecurity_crs_20_protocol_violations.conf:
rule_set: 'base_rules'
enabled: False
custom_rule_files:
# any name as an array index, and you can duplicate this section
UNIQUE_VALUE_HERE:
file: 'PROD'
path: 'salt://path/to/modsecurity/custom/file'
enabled: True
applications/apache.sls
apache:
lookup:
version: '2.4'
default_charset: 'UTF-8'
global:
AllowEncodedSlashes: 'On'
name_virtual_hosts:
- interface: '*'
port: 80
- interface: '*'
port: 443
Using this pillar configuration and calling highstate for my minion ubuntu-xenial-salt run without any error, however the setup is not the same as declared in the pillar:
for example:
the enabled rewrite module is not there.
the virtual-host config is not as setup in the pillar.
everything seems to be pretty much standard configuration from example.pillar.
When i call
salt 'ubuntu-xenial-salt' pillar.data
i get the pillar data just like i modified it... i cant understand what is happening...
ubuntu-xenial-salt:
----------
apache:
----------
keepalive:
On
lookup:
----------
default_charset:
UTF-8
global:
----------
AllowEncodedSlashes:
On
name_virtual_hosts:
|_
----------
interface:
*
port:
80
|_
----------
interface:
*
port:
443
version:
2.4
mod_remoteip:
----------
RemoteIPHeader:
X-Forwarded-For
RemoteIPTrustedProxy:
- 10.0.8.0/24
- 127.0.0.1
mod_security:
----------
crs_install:
True
custom_rule_files:
----------
UNIQUE_VALUE_HERE:
----------
enabled:
True
file:
PROD
path:
salt://path/to/modsecurity/custom/file
manage_config:
True
rules:
----------
enabled:
None
modsecurity_crs_10_setup.conf:
----------
enabled:
True
rule_set:
modsecurity_crs_20_protocol_violations.conf:
----------
enabled:
False
rule_set:
base_rules
sec_debug_log_level:
3
sec_pcre_match_limit:
15000
sec_pcre_match_limit_recursion:
15000
sec_request_body_access:
On
sec_request_body_in_memory_limit:
114002
sec_request_body_limit:
14000000
sec_request_body_limit_action:
Reject
sec_request_body_no_files_limit:
114002
sec_rule_engine:
On
modules:
----------
disabled:
- ldap
enabled:
- ssl
- rewrite
register-site:
----------
UNIQUE_VALUE_HERE:
----------
name:
PROD
path:
salt://path/to/sites-available/conf/file
state:
enabled
security:
----------
ServerTokens:
Prod
sites:
----------
ubuntu-salt-xenial:
----------
CustomLog:
/var/log/apache2/example.com-access.log
Directory:
----------
default:
----------
AllowOverride:
None
Options:
-Indexes +FollowSymLinks
Require:
all granted
DocumentRoot:
/var/www/ubuntu-salt-xenial/
ErrorLog:
/var/log/apache2/example.com-error.log
LogLevel:
debug
ServerAdmin:
minion#ubuntu-salt-xenial.com
ServerAlias:
ubuntu-salt-xenial
ServerName:
ubuntu-salt-xenial
enabled:
True
exclude_listen_directive:
True
interface:
*
port:
80
template_engine:
jinja
template_file:
salt://apache/vhosts/standard.tmpl
Do someone knows what's happening here and can help me get it running?

Resources