I watn to use Nginx as load balancer for Consul cluster. The Consul cluster is reachable only with TLS.
Here I've tried to reverse proxy a single Consul server to check if the TLS certificates are working
server {
listen 80;
listen [::]:80;
location /consul/ {
resolver 127.0.0.1;
proxy_pass https://core-consul-server-1-dev.company.io:8500;
sub_filter_types text/css application/javascript;
sub_filter_once off;
sub_filter /v1/ /consul_v1/;
proxy_ssl_certificate /etc/nginx/certs/agent.crt;
proxy_ssl_certificate_key /etc/nginx/certs/agent.key;
proxy_ssl_trusted_certificate /etc/nginx/certs/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 4;
}
}
this configuration working fine and I can call it with
curl http://core-proxy-server-1-dev.company.io/consul/consul_v1/agent/members
Now I've tried to do an upstream like this:
upstream consul {
server core-consul-server-1-dev.company.io:8500;
server core-consul-server-2-dev.company.io:8500;
}
server {
listen 80;
listen [::]:80;
location /consul/ {
resolver 127.0.0.1;
proxy_pass https://consul;
sub_filter_types text/css application/javascript;
sub_filter_once off;
sub_filter /v1/ /consul_v1/;
proxy_ssl_certificate /etc/nginx/certs/agent.crt;
proxy_ssl_certificate_key /etc/nginx/certs/agent.key;
proxy_ssl_trusted_certificate /etc/nginx/certs/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 4;
}
}
when calling the same curl command as before, I get the following error:
2021/04/20 08:38:59 [debug] 3364#3364: *1 X509_check_host(): no match
2021/04/20 08:38:59 [error] 3364#3364: *1 upstream SSL certificate does not match "consul" while SSL handshaking to upstream, client: 10.10.xx.xxx, server: , request: "GET /consul/consul_v1/agent/members HTTP/1.1", upstream: "https://10.10.yy.yyy:8500/consul/consul_v1/agent/members", host: "core-proxy-server-1-dev.company.io"
Then I've tried like this:
upstream consul_1 {
server core-consul-server-1-dev.company.io:8500;
}
upstream consul_2 {
server core-consul-server-2-dev.company.io:8500;
}
map $http_host $backend {
core-consul-server-1-dev.company.io consul_1;
core-consul-server-2-dev.company.io consul_2;
}
server {
listen 80;
listen [::]:80;
location /consul/ {
resolver 127.0.0.1;
proxy_pass https://$backend;
sub_filter_types text/css application/javascript;
sub_filter_once off;
sub_filter /v1/ /consul_v1/;
proxy_ssl_certificate /etc/nginx/certs/agent.crt;
proxy_ssl_certificate_key /etc/nginx/certs/agent.key;
proxy_ssl_trusted_certificate /etc/nginx/certs/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 4;
}
}
but also no luck;
2021/04/20 08:45:05 [error] 3588#3588: *1 invalid URL prefix in "https://", client: 10.10.xx.xxx, server: , request: "GET /consul/consul_v1/agent/members HTTP/1.1", host: "core-proxy-server-1-dev.company.io"
any ideas? can someone please help me with one?
I figured it out.
in this variation:
upstream consul {
server core-consul-server-1-dev.company.io:8500;
server core-consul-server-2-dev.company.io:8500;
}
server {
listen 80;
listen [::]:80;
location /consul/ {
resolver 127.0.0.1;
proxy_pass https://consul;
sub_filter_types text/css application/javascript;
sub_filter_once off;
sub_filter /v1/ /consul_v1/;
proxy_ssl_certificate /etc/nginx/certs/agent.crt;
proxy_ssl_certificate_key /etc/nginx/certs/agent.key;
proxy_ssl_trusted_certificate /etc/nginx/certs/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 4;
}
}
the upstream name consul should also match the alt_names I defined in my certificate. so changing the configuration to the one below did the trick:
upstream core-consul-server-1-dev.company.io{
server core-consul-server-1-dev.company.io:8500;
server core-consul-server-2-dev.company.io:8500;
}
server {
listen 80;
listen [::]:80;
location /consul/ {
resolver 127.0.0.1;
proxy_pass https://core-consul-server-1-dev.company.io;
sub_filter_types text/css application/javascript;
sub_filter_once off;
sub_filter /v1/ /consul_v1/;
proxy_ssl_certificate /etc/nginx/certs/agent.crt;
proxy_ssl_certificate_key /etc/nginx/certs/agent.key;
proxy_ssl_trusted_certificate /etc/nginx/certs/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 4;
}
}
I should just later add a general name in alt_names so I can reference the stream as
core-consul-server-dev.company.io
Related
What I am trying do is to configure NGINX to forward https requests to corresponding containers (by hostnames) running on the same machine with TLS passthrough so TLS termination will be done at the containers. As right now, I only have bw.domain.com
Here is my nginx config that I try to config:
stream {
map $ssl_preread_server_name $name {
bw.domain.com bw;
}
upstream bw {
server 127.0.0.1:4443;
}
server {
listen 443;
proxy_pass $name;
ssl_preread on;
}
}
Here is the nginx config generated by self-host bitwarden (upstream):
#######################################################################
# WARNING: This file is generated. Do not make changes to this file. #
# They will be overwritten on update. You can manage various settings #
# used in this file from the ./bwdata/config.yml file for your #
# installation. #
#######################################################################
server {
listen 8080 default_server;
listen [::]:8080 default_server;
server_name bw.domain.com;
return 301 https://bw.domain.com$request_uri;
}
server {
listen 8443 ssl http2;
listen [::]:8443 ssl http2;
server_name bw.domain.com;
ssl_certificate /etc/ssl/fullchain.pem;
ssl_certificate_key /etc/ssl/privkey.pem;
ssl_session_timeout 30m;
ssl_session_cache shared:SSL:20m;
ssl_session_tickets off;
# Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_protocols TLSv1.2;
ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256";
# Enables server-side protection from BEAST attacks
ssl_prefer_server_ciphers on;
# OCSP Stapling ---
# Fetch OCSP records from URL in ssl_certificate and cache them
ssl_stapling on;
ssl_stapling_verify on;
# Verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/ssl/fullchain.pem;
resolver 1.1.1.1 1.0.0.1 9.9.9.9 149.112.112.112 valid=300s;
include /etc/nginx/security-headers-ssl.conf;
include /etc/nginx/security-headers.conf;
location / {
proxy_pass http://web:5000/;
include /etc/nginx/security-headers-ssl.conf;
include /etc/nginx/security-headers.conf;
add_header Content-Security-Policy "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https://haveibeenpwned.com https://www.gravatar.com; child-src 'self' https://*.duosecurity.com https://*.duofederal.com; frame-src 'self' https://*.duosecurity.com https://*.duofederal.com; connect-src 'self' wss://bw.domain.com https://api.pwnedpasswords.com https://2fa.directory; object-src 'self' blob:;";
add_header X-Frame-Options SAMEORIGIN;
add_header X-Robots-Tag "noindex, nofollow";
}
location /alive {
return 200 'alive';
add_header Content-Type text/plain;
}
location = /app-id.json {
proxy_pass http://web:5000/app-id.json;
include /etc/nginx/security-headers-ssl.conf;
include /etc/nginx/security-headers.conf;
proxy_hide_header Content-Type;
add_header Content-Type $fido_content_type;
}
location = /duo-connector.html {
proxy_pass http://web:5000/duo-connector.html;
}
location = /webauthn-connector.html {
proxy_pass http://web:5000/webauthn-connector.html;
}
location = /webauthn-fallback-connector.html {
proxy_pass http://web:5000/webauthn-fallback-connector.html;
}
location = /sso-connector.html {
proxy_pass http://web:5000/sso-connector.html;
}
location /attachments/ {
proxy_pass http://attachments:5000/;
}
location /api/ {
proxy_pass http://api:5000/;
}
location /icons/ {
proxy_pass http://icons:5000/;
}
location /notifications/ {
proxy_pass http://notifications:5000/;
}
location /notifications/hub {
proxy_pass http://notifications:5000/hub;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
}
location /events/ {
proxy_pass http://events:5000/;
}
location /sso {
proxy_pass http://sso:5000;
include /etc/nginx/security-headers-ssl.conf;
include /etc/nginx/security-headers.conf;
add_header X-Frame-Options SAMEORIGIN;
}
location /identity {
proxy_pass http://identity:5000;
include /etc/nginx/security-headers-ssl.conf;
include /etc/nginx/security-headers.conf;
add_header X-Frame-Options SAMEORIGIN;
}
location /admin {
proxy_pass http://admin:5000;
include /etc/nginx/security-headers-ssl.conf;
include /etc/nginx/security-headers.conf;
add_header X-Frame-Options SAMEORIGIN;
}
}
Right now, it throws this error when I use Firefox:
Secure Connection Failed
An error occurred during a connection to bw.domain.com.
PR_END_OF_FILE_ERROR
Error code: PR_END_OF_FILE_ERROR
Here is the logs of the NGINX
2023/01/07 23:09:37 [error] 28#28: *1 connect() failed (111:
Connection refused) while connecting to upstream, client: 172.16.1.1,
server: 0.0.0.0:443, upstream: "127.0.0.1:4443", bytes from/to
client:0/0, bytes from/to upstream:0/0
This is my nginx configuration:
upstream itw_upstream {
server 127.0.0.1:2019 fail_timeout=3s;
}
proxy_cache_path /var/cache/nginx/mpword levels=2:2 keys_zone=itw_cache:10m inactive=300d max_size=1g;
proxy_temp_path /var/cache/nginx/tmp;
#
# the reverse proxy server as www
#
server {
listen 80;
server_name blog.demo.com;
root /opt/mpword/none;
access_log /opt/mpword/log/www_access.log;
error_log /opt/mpword/log/www_error.log;
client_max_body_size 2m;
gzip on;
gzip_min_length 1024;
gzip_buffers 4 8k;
gzip_types text/css application/x-javascript application/json;
sendfile on;
location = /favicon.ico {
proxy_pass http://source.blog.demo.com; // this is the line 14.
}
location = /robots.txt {
proxy_pass http://source.blog.demo.com;
}
location ~ /static/ {
rewrite ^(.*) http://static.blog.demo.com$1 permanent;
}
location ~ /files/ {
rewrite ^(.*) http://static.blog.demo.com$1 permanent;
}
location / {
proxy_pass http://itw_upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
#
# the source server that serves static files and uploaded files
#
server {
listen 80;
server_name source.blog.demo.com;
root /opt/mpword/../src/main/resources;
access_log /opt/mpword/log/source_access.log;
error_log /opt/mpword/log/source_error.log;
client_max_body_size 1m;
gzip on;
gzip_min_length 1024;
gzip_buffers 4 8k;
gzip_types text/css application/x-javascript application/json;
sendfile on;
location ~ /static/ {
}
location ~ /files/ {
proxy_pass http://itw_upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_cache itw_cache;
proxy_cache_key $uri;
proxy_cache_valid 1d;
expires 1d;
}
}
#
# the simulated CDN server
#
server {
listen 80;
server_name static.blog.demo.com;
access_log /opt/mpword/log/static_access.log;
error_log /opt/mpword/log/static_error.log;
client_max_body_size 1m;
gzip on;
gzip_min_length 1024;
gzip_buffers 4 8k;
gzip_types text/css application/x-javascript application/json;
sendfile on;
location ~ /static/ {
add_header "Access-Control-Allow-Origin" "http://blog.demo.com";
add_header "Access-Control-Allow-Methods" "GET, POST";
proxy_pass http://source.blog.demo.com;
proxy_read_timeout 3s;
}
location ~ /files/ {
add_header "Access-Control-Allow-Origin" "http://blog.demo.com";
add_header "Access-Control-Allow-Methods" "GET, POST";
proxy_pass http://source.blog.demo.com;
proxy_read_timeout 3s;
}
}
but when I run nginx -t I get error:
nginx: [emerg] host not found in upstream "source.blog.demo.com" in /etc/nginx/conf.d/blog.demo.com.conf:14
nginx: configuration file /etc/nginx/nginx.conf test failed
I didn't find the issue? how to solve it?
This is your config:
server {
server_name blog.demo.com;
location = /favicon.ico {
proxy_pass http://source.blog.demo.com; # here you ask for a upstream server
}
}
server {
server_name source.blog.demo.com; # here you spin up the upstream server
}
Didn't test it, but I think the problem is, that nginx is looking for upstream servers given in proxy_pass. But the server source.blog.demo.com is not running at the time where the proxy_pass directive is loaded in the server blog.demo.com.
So 1.) make the second server the first one to spin up and the third the second. Finally run the blog.demo.com.
Or 2.) use the workaround preventing nginx checking for upstream servers in proxy_pass:
server {
resolver 8.8.8.8 valid=30s; # or any other reachable DNS, e.g. 127.0.0.11 for docker
location = /favicon.ico {
set $upstream_source source.blog.demo.com;
proxy_pass http://$upstream_source/favicon.ico;
}
}
I have the old nginx-based OSM tile caching proxy configured by https://coderwall.com/p/--wgba/nginx-reverse-proxy-cache-for-openstreetmap, but as source tile server migrated to HTTPS this solution is not working anymore: 421-Misdirected Request.
The fix I based on the article https://kimsereyblog.blogspot.com/2018/07/nginx-502-bad-gateway-after-ssl-setup.html. Unfortunately after days of experiments - I'm still getting 502 error.
My theory is that the root cause is the upstream servers SSL certificate which uses wildcard: *.tile.openstreetmap.org but all attempts to use $http_host, $host, proxy_ssl_name, proxy_ssl_session_reuse in different combinations did't help: 421 or 502 every time.
My current nginx config is:
worker_processes auto;
events {
worker_connections 768;
}
http {
access_log /etc/nginx/logs/access_log.log;
error_log /etc/nginx/logs/error_log.log;
client_max_body_size 20m;
proxy_cache_path /etc/nginx/cache levels=1:2 keys_zone=openstreetmap-backend-cache:8m max_size=500000m inactive=1000d;
proxy_temp_path /etc/nginx/cache/tmp;
proxy_ssl_trusted_certificate /etc/nginx/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
proxy_ssl_session_reuse on;
proxy_ssl_name *.tile.openstreetmap.org;
sendfile on;
upstream openstreetmap_backend {
server a.tile.openstreetmap.org:443;
server b.tile.openstreetmap.org:443;
server c.tile.openstreetmap.org:443;
}
server {
listen 80;
listen [::]:80;
server_name example.com www.example.com;
include /etc/nginx/mime.types;
root /dist/browser/;
location ~ ^/osm-tiles/(.+) {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_FORWARDED_PROTO http;
proxy_set_header Host $http_host;
proxy_cache openstreetmap-backend-cache;
proxy_cache_valid 200 302 365d;
proxy_cache_valid 404 1m;
proxy_redirect off;
if (!-f $request_filename) {
proxy_pass https://openstreetmap_backend/$1;
break;
}
}
}
}
But it still produces error when accessing https://example.com/osm-tiles/12/2392/1188.png:
2021/02/28 15:05:47 [error] 23#23: *1 upstream SSL certificate does not match "*.tile.openstreetmap.org" while SSL handshaking to upstream, client: 172.28.0.1, server: example.com, request: "GET /osm-tiles/12/2392/1188.png HTTP/1.0", upstream: "https://151.101.2.217:443/12/2392/1188.png", host: "localhost:3003"
Host OS Ubuntu 20.04 (here https is handled), nginx is runnig on docker from nginx:latest image, ca.crt is the default ubuntu's crt.
Please help.
I have an Odoo app running on port 8069, and while this setup worked fine in my old server, my new server is using Engintron which seems to have a different method of working with vhosts. The standout issue is that under common_http.conf, this line becomes a duplicate of the vhost needed to run the app but is included in the automatically generated config that gets overridden whenever a new cpanel account is created, deleted, or when Engintron is updated.
What would be the correct way of setting this up properly within Engintron?
common_http.conf
location / {
try_files $uri $uri/ #backend;
}
# This location / ends up getting included in the custom
# vhost which is needed for all of the sites except this Odoo app.
custom_vhost.com.conf
upstream example{
server 127.0.0.1:8069 weight=1 fail_timeout=0;
}
upstream example-chat {
server 127.0.0.1:8072 weight=1 fail_timeout=0;
}
server {
listen [::]:80;
server_name delegates.example.com;
return 301 https://delegates.example.com$request_uri;
}
server {
listen [::]:80;
server_name vendors.example.com;
return 301 https://vendors.example.com$request_uri;
}
server {
listen [::]:80;
server_name example.com;
return 301 https://example.com;
}
server {
listen [::]:80;
server_name *.example.com;
return 301 https://example.com;
}
server {
listen [::]:443 ssl;
server_name pgadmin.example.com;
# well-known_start
location ^~ /.well-known {
add_header Host-Header 192fc2e7e50945beb8231a492d6a8024;
root /home/example/public_html;
}
# well-known_end
ssl_certificate /var/cpanel/ssl/apache_tls/*.example.com/combined;
ssl_certificate_key /var/cpanel/ssl/apache_tls/*.example.com/combined;
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains";
add_header X-Content-Type-Options nosniff;
add_header Cache-Control public;
location / {
deny all;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://127.0.0.1:5050;
}
}
server {
listen [::]:443 ssl;
server_name example.com www.example.com;
return 301 https://example.com;
}
server {
listen [::]:443 ssl http2;
server_name vendors.example.com delegates.example.com;
client_max_body_size 200m;
proxy_read_timeout 720s;
proxy_connect_timeout 720s;
proxy_send_timeout 720s;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-NginX-Proxy true;
#proxy_set_header X-Odoo-dbfilter ^%d\Z;
proxy_redirect off;
proxy_buffering off;
# well-known_start
location ^~ /.well-known {
add_header Host-Header 192fc2e7e50945beb8231a492d6a8024;
root /home/example/public_html;
}
# well-known_end
ssl_certificate /var/cpanel/ssl/apache_tls/*.example.com/combined;
ssl_certificate_key /var/cpanel/ssl/apache_tls/*.example.com/combined;
access_log /var/log/nginx/odoo.access.log;
error_log /var/log/nginx/odoo.error.log;
# adds gzip options
gzip on;
gzip_types text/css text/plain text/xml application/xml application/javascript application/x-javascript text/javascript application/json text/x-json;
gzip_proxied no-store no-cache private expired auth;
#gzip_min_length 1000;
gzip_disable "MSIE [1-6]\.";
location /longpolling {
proxy_pass http://example-chat;
}
location ~* /web/static/ {
gzip_static on;
proxy_cache_valid 200 90m;
proxy_buffering on;
expires 864000;
add_header Cache-Control public;
proxy_pass http://example;
}
location / {
error_page 403 = https://example.com;
proxy_pass http://example;
proxy_redirect off;
gzip_static on;
}
# The above location becomes a duplicate of the previous default location - which in turn fails the validity of the configuration.
location ~* /web/content/ {
gzip_static on;
proxy_cache_valid 200 90m;
proxy_buffering on;
expires 864000;
add_header Cache-Control public;
proxy_pass http://example;
}
location /web/database/manager {
deny all;
error_page 403 https://example.com;
proxy_pass http://example;
}
}
Since the conf files are added in alphabetical order, and any conflicting or duplicate settings are ignored - I ended up changing the name of the file so that it's included before the other ones. Also made the file immutable with the following command:
chattr +ai 1_custom_vhost.com.conf
I'm quite sure this is not a graceful solution, but it does the job for now.
We have 2 files in our nginx/sites-available/ folder. The odoo-80 and the odoo-443. How can we add a exception for 1 specific path without creating a loop? If we add the exception to the 443 file it redirects back to the 80 file and opposite... Im normally working with apache and not nginx so I would be very happy for some help!
To summ up: we want this path: /pos/web to run with http and all the rest with https.
odoo-80 file
server {
listen 80;
server_name odoo.server.com;
access_log /var/log/nginx/odoo.access.log;
error_log /var/log/nginx/odoo.error.log;
location / {
rewrite ^/(.*) https://odoo.server.com:443/$1 permanent;
}}
Odoo-443 file
#odoo server
upstream odoo {
server 127.0.0.1:8069;
}
upstream odoochat {
server 127.0.0.1:8072;
}
server {
listen 443;
server_name odoo.server.com;
proxy_read_timeout 720s;
proxy_connect_timeout 720s;
proxy_send_timeout 720s;
# Add Headers for odoo proxy mode
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
# SSL parameters
***
# log
***
# Redirect longpoll requests to odoo longpolling port
location /longpolling {
proxy_pass http://odoochat;
}
# Redirect requests to odoo backend server
location / {
proxy_redirect off;
proxy_pass http://odoo;
}
# common gzip
gzip_types text/css text/less text/plain text/xml application/xml application/json application/javascript;
gzip on;
}
#pos20.07
#server {
# listen 443 default_server ssl;
# server_name odoo.server.com;
# Force pos in http for the posbox
# location ~ ^/pos/web {
# rewrite ^(.*)$ http://$host:80$1 permanent;
#}
You can declare a location {} in your odoo-80 file, this should override the catch-all you have in place so it shouldn't send it to 443 in the first place.
server {
listen 80;
server_name odoo.server.com;
access_log /var/log/nginx/odoo.access.log;
error_log /var/log/nginx/odoo.error.log;
location ~ ^/pos/web {
proxy_redirect off;
proxy_pass http://odoo;
}
location / {
rewrite ^/(.*) https://odoo.server.com:443/$1 permanent;
}
}
Also, what Richard Smith mentioned, add the same block to 443 just in case someone types in https:// and heed his call re: HSTS (which I doubt is setup)
"we" should also be posting on Serverfault by the way :D