I just installed Nginx and a service on port 8069. I able to access to my service from outside by using odoo.domain.com. But the problem is I can't access from local network (LAN).
If I try to access Nginx's IP then I can see Nginx default page but if I add ip:8069, it still not working.
Below is my Nginx config.
server {
server_name odoo.domain.com;
listen 80;
access_log /var/log/nginx/testing-access.log;
error_log /var/log/nginx/testing-error.log;
#return 301 https://$host$request_uri;
listen 443 ssl http2;
#rewrite ^(.*) https://$host$1 permanent;
ssl_certificate /etc/letsencrypt/live/odoo.domain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/odoo.domain.com/privkey.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
# ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
# ssl_prefer_server_ciphers off;
location /longpolling {
proxy_connect_timeout 3600;
proxy_read_timeout 3600;
proxy_send_timeout 3600;
send_timeout 3600;
proxy_pass http://127.0.0.1:8072;
}
location / {
proxy_connect_timeout 3600;
proxy_read_timeout 3600;
proxy_send_timeout 3600;
send_timeout 3600;
proxy_pass http://127.0.0.1:8069/;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
gzip on;
gzip_min_length 1000;
}
upstream odoo {
server 127.0.0.1:8069 weight=1 fail_timeout=0;
}
upstream odoo-im {
server 127.0.0.1:8072 weight=1 fail_timeout=0;
}
Related
I have real time chatapplication, unable to scale them.
so i have frontend ngnix which proxy pass to backend ngnix , now backend ngnix will upstream to flasksocket io multiple instance,
server {
listen 443 ssl;
#server_name xx.xxx.xx.xxx;
client_max_body_size 300M;
ssl_certificate /etc/nginx/ssl/newssl/ssl_bundle.crt;
ssl_certificate_key /etc/nginx/ssl/newssl/example.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers 'xxx+xxxx+xxxx';
set_real_ip_from 0.0.0.0/0;
real_ip_header X-Real-IP;
real_ip_recursive on;
add_header X-Frame-Options "SAMEORIGIN" ;
add_header Access-Control-Allow-Origin *;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_read_timeout 120s;
proxy_connect_timeout 120s;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 65s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 100 128k;
client_max_body_size 0M;
proxy_pass http://live_chatnodes;
backend ngnix
server {
listen 80;
server_name 10.0.2.9;
client_max_body_size 300M;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_read_timeout 120s;
proxy_connect_timeout 120s;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 65s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 100 128k;
client_max_body_size 0M;
proxy_pass http://live_nodes;
}
this now pass request to upstream and upstream has 15 nodes, how ever all the request are going to one flasksocket io. which is causing my system go down. and last info upstream has iphash as per flaskcoketio documentation.
all my clients are using vpn so they have static ip as well,
I can't get my dotnet mvc app to be hosted correctly over ssl (https). It only works over http. The following is my relevant nginx files (with "example.org" used instead of my domain)
/etc/nginx/sites-enabled/default
# Default server configuration
#
server {
server_name example.org *.example.org;
location / {
proxy_pass http://127.0.0.1:5000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# SSL configuration
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/example.org/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/example.org/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
listen 80 default_server;
# listen [::]:80 default_server deferred;
return 444;
}
server {
if ($host = example.org) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
server_name example.org *.example.org;
return 404; # managed by Certbot
}
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
/etc/nginx/proxy.conf
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
This makes my site work over "http://example.org" but not over "https://example.org". I don't know why it won't work over https? I tried altering my /etc/nginx/nginx.conf file to make it like the recommended documentation for asp.net hosting via Microsoft. Here's my new /etc/nginx/nginx.conf file.
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
include /etc/nginx/proxy.conf;
limit_req_zone $binary_remote_addr zone=one:10m rate=5r/s;
server_tokens off;
sendfile on;
# Adjust keepalive_timeout to the lowest possible value that makes sense
# for your use case.
keepalive_timeout 29;
client_body_timeout 10; client_header_timeout 10; send_timeout 10;
upstream my-app{
server 127.0.0.1:5000;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name example.org *.example.org;
ssl_certificate /etc/letsencrypt/live/example.org/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.org/privkey.pem;
ssl_session_timeout 1d;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_tickets off;
ssl_stapling off;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
#Redirects all traffic
location / {
proxy_pass http://my-app;
limit_req zone=one burst=10 nodelay;
}
}
}
When I change my /etc/nginx/nginx.conf file to the above, both "http://example.org" and "https://example.com" fail. So how do I get this app to work over https?
The problem was actually my ufw firewall. When I was setting up the droplet I did the commands:
sudo ufw enable
sudo ufw allow OpenSSH
sudo ufw default deny incoming
sudo ufw allow 'Nginx HTTP'
The problem above is that was supposed to do sudo ufw allow 'Nginx Full' then sudo reboot. After this, my original nginx configuration worked!
Try the config below (don't forget to disable https redirection in your app - remove app.UseHttpsRedirection(); from your Program.cs (Startup.cs) and remove applicationUrl https reference from launchSettings.json "https://localhost:5001"):
worker_processes 1;
events { worker_connections 1024; }
http {
sendfile on;
access_log /var/log/nginx/access.log combined;
error_log /var/log/nginx/error.log;
upstream web-api {
server 127.0.0.1:5000;
}
server {
listen 80;
server_name yourdomain.com;
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl;
server_name yourdomain.com;
ssl_certificate /etc/ssl/certs/yourdomain.com.crt;
ssl_certificate_key /etc/ssl/private/yourdomain.com.key;
location / {
proxy_pass http://web-api;
proxy_redirect off;
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $server_name;
}
}
}
I have a odoo 11 installation behind a Nginx proxy. Its been working for a while but now when you access it, its showing index of / instead of Odoo login page (see screenshot).
Here is my Nginx configuration:
#odoo server
upstream odoo {
server 127.0.0.1:8069;
}
upstream odoochat {
server 127.0.0.1:8072;
}
# http -> https
server {
listen 80;
server_name businessapps.enone.tech;
#server_name odoo;
rewrite ^(.*) https://$host$1 permanent;
}
server {
listen 443;
server_name businessapps.enone.tech;
#server_name odoo;
proxy_read_timeout 720s;
proxy_connect_timeout 720s;
proxy_send_timeout 720s;
# Add Headers for odoo proxy mode
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
# SSL parameters
ssl on;
ssl_certificate /etc/letsencrypt/live/businessapps.enone.tech/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/businessapps.enone.tech/privkey.pem;
ssl_session_timeout 30m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers '<replaced cipher>';
ssl_prefer_server_ciphers on;
# log
access_log /var/log/nginx/odoo.access.log;
error_log /var/log/nginx/odoo.error.log;
# Redirect longpoll requests to odoo longpolling port
location /longpolling {
proxy_pass http://odoochat;
}
# Redirect requests to odoo backend server
location / {
proxy_redirect off;
proxy_pass http://odoo;
}
# common gzip
gzip_types text/css text/less text/plain text/xml application/xml application/json application/javascript;
gzip on;
}
Can someone point out what could be the issue. Sometimes I can access the login page, other times I get the index of / page. The installation is on Ubuntu 16.0
You need to specify the directory in the nginx config, and I had this error previously and found that even once changing my config I had weird errors with odoo, so I recommend you do what I did and that is completely re install Nginx and ensure you reboot your server after having done so!
Add location block inside server {} (block)
I have used below nginx configuration
upstream odooapp {
server odoo:8069;
keepalive 8;
}
upstream longpolling {
server odoo:8072;
keepalive 8;
}
server {
listen 80;
listen [::]:80;
server_name businessapps.enone.tech;
access_log /var/log/nginx/access.log mainlog;
error_log /var/log/nginx/error.log;
return 301 https://businessapps.enone.tech:443/web/login;
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name businessapps.enone.tech;
access_log /var/log/nginx/access.log mainlog;
error_log /var/log/nginx/error.log;
ssl_ciphers ALL:!ADH:!MD5:!EXPORT:!SSLv2:RC4+RSA:+HIGH:+MEDIUM;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_certificate <PATH_TO_CERT>;
ssl_certificate_key <PATH_TO_KEY>;
add_header Strict-Transport-Security "max-age=2592000; preload;" always;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
client_max_body_size 20M;
proxy_pass http://odooapp/;
proxy_redirect off;
}
location /longpolling {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://longpolling/longpolling;
proxy_redirect off;
}
I want the following scenario
Client makes browser request to http://my-domain.com
Nginx A intercepts that request which then forwards it to Nginx B which is hosting my website
I have the current configuration but i am getting ERR_TOO_MANY_REDIRECTS
Nginx A (landing host proxy)
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
server_tokens off;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
ssl_certificate /my/ssl/my-domain.com.crt;
ssl_certificate_key /my/ssl/my-domain.com.key;
ssl_dhparam /my/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
ssl_ciphers
'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK';
return 404;
}
upstream client_proxy {
server my_internal_server:80;
}
server {
server_name my-domain.com;
listen 443 ssl;
ssl_certificate /my/ssl/my-domain.com.crt;
ssl_certificate_key /my/ssl/my-domain.com.key;
ssl_dhparam /my/ssl/dhparam.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK';
location / {
rewrite ^ http://my-domain.com/;
index index.html index.htm;
charset utf-8;
auth_basic off;
allow all;
proxy_pass http://client_proxy/;
proxy_ignore_headers Set-Cookie Cache-Control Expires;
proxy_hide_header "Set-Cookie";
proxy_redirect off;
proxy_set_header Host my-domain.com;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_For;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_connect_timeout 90s;
proxy_send_timeout 90s;
proxy_read_timeout 90s;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
send_timeout 90s;
}
}
Nginx B (Web server)
server {
listen 80;
root /my/www;
index index.html;
try_files $uri $uri/ /index.html;
# ... other location blocks which are irrelevant here
}
As stated by Richard Smith in the comment,
remove the rewrite rule and it fixed it
Is it possible to use Nginx reverse proxy with SSL Pass-through so that it can pass request to a server who require certificate authentication for client.
It means server will need to have certificate of client server and will not need certificate of Nginx reverse proxy server.
Not sure how much it can work in your situation, but newer (1.9.3+) versions of Nginx can pass (encrypted) TLS packets directly to an upstream server, using the stream block :
stream {
server {
listen 443;
proxy_pass backend.example.com:443;
}
}
If you want to target multiple upstream servers, distinguished by their hostnames, this is possible by using the nginx modules ngx_stream_ssl_preread and ngx_stream_map. The concept behind this is TLS Server Name Indication.
Dave T. outlines a solution nicely. See his answer on this network.
From the moment that we want to do ssl pass-through, the ssl termination will take place to the backend nginx server. Also i haven't seen an answer that takes care of the http connections as well.
The optimal solution will be a Nginx that is acting as a Layer 7 + Layer4 proxy at the same time. Something else that is rarely a subject of discussion is the IP Address redirection. When we use a proxy, this must be configured on the proxy, and not to the backend server like usually.
Lastly, the client ip address must be preserved, hence we must use the proxy protocol to do this correctly.
Sounds confusing? It's not much.
I came up with a solution that i currently using in production is works flawlessly.
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
variables_hash_bucket_size 1024;
variables_hash_max_size 1024;
map_hash_max_size 1024;
map_hash_bucket_size 512;
types_hash_bucket_size 512;
server_names_hash_bucket_size 512;
sendfile on;
tcp_nodelay on;
tcp_nopush on;
autoindex off;
server_tokens off;
keepalive_timeout 15;
client_max_body_size 100m;
upstream production_server {
server backend1:3080;
}
upstream staging_server {
server backend2:3080;
}
upstream ip_address {
server backend1:3080; #or backend2:3080 depending on your preference.
}
server {
server_name server1.tld;
listen 80;
listen [::]:80;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header Connection "";
#add_header X-Upstream $upstream_addr;
proxy_redirect off;
proxy_connect_timeout 300;
proxy_http_version 1.1;
proxy_buffers 16 16k;
proxy_buffer_size 64k;
proxy_cache_background_update on;
proxy_pass http://production_server$request_uri;
}
}
server {
server_name server2.tld;
listen 80;
listen [::]:80;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header Connection "";
#add_header X-Upstream $upstream_addr;
proxy_redirect off;
proxy_connect_timeout 300;
proxy_http_version 1.1;
proxy_buffers 16 16k;
proxy_buffer_size 16k;
proxy_cache_background_update on;
proxy_pass http://staging_server$request_uri;
}
}
server {
server_name 192.168.1.1; #replace with your own main ip address
listen 80;
listen [::]:80;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header Connection "";
#add_header X-Upstream $upstream_addr;
proxy_redirect off;
proxy_connect_timeout 300;
proxy_http_version 1.1;
proxy_buffers 16 16k;
proxy_buffer_size 16k;
proxy_cache_background_update on;
proxy_pass http://ip_address$request_uri;
}
}
}
stream {
map $ssl_preread_server_name $domain {
server1.tld production_server_https;
server2.tld staging_server_https;
192.168.1.1 ip_address_https;
default staging_server_https;
}
upstream production_server_https {
server backend1:3443;
}
upstream staging_server_https {
server backend2:3443;
}
upstream ip_address_https {
server backend1:3443;
}
server {
ssl_preread on;
proxy_protocol on;
tcp_nodelay on;
listen 443;
listen [::]:443;
proxy_pass $domain;
}
log_format proxy '$protocol $status $bytes_sent $bytes_received $session_time';
access_log /var/log/nginx/access.log proxy;
error_log /var/log/nginx/error.log debug;
}
Now the only thing is yet to be done is to enable proxy protocol to the backend servers. The example below will get you going:
server {
real_ip_header proxy_protocol;
set_real_ip_from proxy;
server_name www.server1.tld;
listen 3080;
listen 3443 ssl http2;
listen [::]:3080;
listen [::]:3443 ssl http2;
include ssl_config;
# Non-www redirect
return 301 https://server1.tld$request_uri;
}
server {
real_ip_header proxy_protocol;
set_real_ip_from 1.2.3.4; # <--- proxy ip address, or proxy container hostname for docker
server_name server1.tld;
listen 3443 ssl http2 proxy_protocol; #<--- proxy protocol to the listen directive
listen [::]:3443 ssl http2 proxy_protocol; # <--- proxy protocol to the listen directive
root /var/www/html;
charset UTF-8;
include ssl_config;
#access_log logs/host.access.log main;
location ~ /.well-known/acme-challenge {
allow all;
root /var/www/html;
default_type "text/plain";
}
location / {
index index.php;
try_files $uri $uri/ =404;
}
error_page 404 /404.php;
# place rest of the location stuff here
}
Now everything should work like a charm.