How to use nginx as a reverse proxy with multiple ports - nginx

is it possible to use nginx as a reverse proxy like this?
I was looking at this post: https://www.digitalocean.com/community/questions/self-hosted-ngrok-or-serveo-alternative
And wanted to get it to work with a dynamic port.
for example:
http://1234.mydomain.com --> http://localhost:1234
http://3333.mydomain.com --> http://localhost:3333
Is this possible with nginx?
I tried to create a wildcard domain and in nginx's sites-available I've put this:
server {
#server_name tunnel.mydomain.com;
server_name *.mydomain.com; #<----------
server_name ~^(?<port>[\w-]+)\.mydomain\.com$; #<----------
access_log /var/log/nginx/tunnel/$host;
listen 443 ssl;
listen [::]:443;
ssl_certificate /etc/letsencrypt/live/mydomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/mydomain.com.ml/privkey.pem;
location / {
#proxy_pass http://localhost:3333/;
proxy_pass http://localhost:$port/; #<----------
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_redirect off;
}
error_page 502 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
But it's not working, this does not distinguish different ports.

Related

How to remove port number in nginx when redirecting

This is my domain.conf file in nginx:
server {
listen 80;
listen 8080;
server_name EXAMPLE.COM www.EXAMPLE.COM;
return 301 https://EXAMPLE.COM$request_uri;
}
server {
listen 443 ssl;
root /home/path;
ssl_certificate /etc/letsencrypt/live/EXAMPLE.COM/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/EXAMPLE.COM/privkey.pem;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:8080;
proxy_redirect off;
# Socket.IO Support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
Now when I type http://EXAMPLE.COM:8080 or http://EXAMPLE.COM:8080/some_folder/, my website over the port number 8080 works, but I want to remove this port number.
But what I want is:
--> Whenever I type http://EXAMPLE.COM:8080/folder, it redirects to https://EXAMPLE.COM/folder
I think the answer of what you are looking for is in proxy_redirect option, after proxy_pass.
This nginx configuration sample can be useful: (Take a look on proxy redirect line)
location /one/ {
proxy_pass http://upstream:port/two/;
proxy_redirect http://upstream:port/two/ /one/;
I think adding this should do the trick:
proxy_redirect http://127.0.0.1:8000 /blog;
You can find full documentation and examples in the nginx documentation.
http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect

nginx configuration server_name is wrong but website is still working?

I only want to allow access to my server from one domain. Lets say my domain is called "mydomain.mydomain.com" (yes, it is a subdomain).
Normally I would write everywhere server_name mydomain.mydomain.com, but I changed it to a non-existing domain and I can still enter the website? Why is my website working also from other domains? I know nginx is normally using the first server-block if no server_name is found, but my first server-block is my catch-all non-existing domain block. I defined server_name _; and default_server, but still, my website is working.
I have the following configuration:
server {
#If server_name mydomain.mydomain.com is not found return 444
listen 80 default_server;
server_name _;
return 444;
}
# redirect all traffic to https if the domain is mydomain.mydomain.com (server_name)
server {
listen 80;
listen [::]:80;
#-------------------------------------------
# I CHANGE HERE TO A NON-EXISTING DOMAIN AND MY WEBSITE IS STILL WORKING?!?!?
#-------------------------------------------
server_name nonExistingDomain.com;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
root /config/www;
index index.html index.htm index.php;
#-------------------------------------------
# I CHANGE HERE TO A NON-EXISTING DOMAIN AND MY WEBSITE IS STILL WORKING?!?!?
#-------------------------------------------
server_name nonExistingDomain.com;
# enable subfolder method reverse proxy confs
include /config/nginx/proxy-confs/*.subfolder.conf;
# all ssl related config moved to ssl.conf
include /config/nginx/ssl.conf;
client_max_body_size 0;
error_page 404 =200 /portal;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN";
location = / {
return 301 https://mydomain.mydomain.com/portal;
#try_files $uri $uri/ /index.html /index.php?$args =404;
}
location /pea {
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:8080/pea;
# do not pass the CORS header from the response of the proxied server to the
# client
#proxy_hide_header 'Access-Control-Allow-Origin';
}
location /portal {
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:8180/portal;
}
location /auth {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://localhost:8280/auth;
}
}
You are listening to the IpV6 network socket in your server blocks where you change domain to non-existent. Since there are no other such server blocks, they are the default for those IPv6 ports.
Note that your first server block is default only for IPv4 network socket listen 80 default_server;.
Thus the behavior can be explained only by the fact that you are connecting/testing over IpV6.
To avoid inconsistency, use default_server for all your listen options. E.g. in the first server block add default server for IPv6 too:
server {
#If server_name mydomain.mydomain.com is not found return 444
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
return 444;
}

Keycloak Admin Console behind Nginx configured to use HTTPS

I'm trying to set up Keycloak, however the tutorials expect me to visit http://localhost:8080, but I'm setting it up on a remote host and need to access the admin console externally. I've tried to expose it via Nginx. Keycloak Administration Console seems to work with the new domain name and port seamlessly, but it still tries to use the "http" urls instead of the "https" ones (I've the Nginx configured to redirect HTTP to HTTPS and I want to keep it that way for security reasons). I have found the problem is that it internally sets a variable:
var authServerUrl = 'http://example.com/auth';
While the correct url would be https://example.com/auth.
As a result, when I open https://example.com/auth/admin/master/console/ in the browser, I get the error:
Refused to frame 'http://example.com/' because it violates the following Content Security Policy directive: "frame-src 'self'".
How to fix that? The Nginx config I use is:
server {
server_name example.com;
listen 80;
listen [::]:80;
location / {
return 301 https://$server_name$request_uri;
}
}
ssl_session_cache shared:ssl_session_cache:10m;
server {
server_name example.com;
listen 443 ssl http2;
listen [::]:443 ssl http2;
# ... <SSL and Gzip config goes here> ...
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8080;
client_max_body_size 16m;
}
}
You are doing SSL offloading in the nginx, but you need to forward information that https schema was used also to the Keycloak (X-Forwarded-Proto header). Try this:
server {
server_name example.com;
listen 80;
listen [::]:80;
location / {
return 301 https://$server_name$request_uri;
}
}
ssl_session_cache shared:ssl_session_cache:10m;
server {
server_name example.com;
listen 443 ssl http2;
listen [::]:443 ssl http2;
# ... <SSL and Gzip config goes here> ...
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://127.0.0.1:8080;
client_max_body_size 16m;
}
}

Configure varnish for my app that has already proxy_pass in nginx

I'm trying to figure out how to configure my website to pass thru varnish. I'm using Ubuntu 18.04. I've tried some methods I already found online, but I can only make it work for HTTP, not for HTTPS. Here is my actual nginx.conf. My website is built in React and as you can see I already have a proxy_pass in my Nginx.
listen 80;
listen [::]:80;
return 301 https://$host$request_uri;
}
server {
# SSL configuration
listen 443 ssl http2;
listen [::]:443 ssl http2;
include snippets/ssl-params.conf;
client_max_body_size 15M;
ssl_certificate_key /srv/www/dev.site.com/ssl/dev.key;
ssl_certificate /srv/www/dev.site.com/ssl/dev.chain.crt;
access_log /srv/www/dev.site.com/logs/temp_access.log;
error_log /srv/www/dev.site.com/logs/temp_error.log;
error_page 502 /502.html;
location = /502.html {
root /usr/share/nginx/html/;
allow all;
internal;
}
# root /srv/www/dev.site.com/html;
# index index.php index.html;
server_name www.dev.site.com dev.site.com;
location / {
proxy_pass http://127.0.0.1:3000/;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Client-Verify SUCCESS;
proxy_set_header X-Client-DN $ssl_client_s_dn;
proxy_set_header X-SSL-Subject $ssl_client_s_dn;
proxy_set_header X-SSL-Issuer $ssl_client_i_dn;
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
if ($request_uri ~* ".(ico|css|js|gif|jpe?g|png|json)$") {
expires 30d;
access_log off;
add_header Pragma public;
add_header Cache-Control "public";
break;
}
}
Thanks
HTTP/1.1
For regular HTTP/1.1 requests, this one should do the trick:
server {
listen 443 ssl;
server_name example.com;
ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem;
ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key;
location / {
proxy_pass http://127.0.0.1:80;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-Port 443;
proxy_set_header Host $host;
}
}
Please make sure you include the right certificates, and proxy through to the right hostname/port.
HTTP/2
For HTTP/2 requests, you can use the following Nginx config:
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem;
ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key;
location / {
proxy_pass http://127.0.0.1:80;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-Port 443;
proxy_set_header Host $host;
}
}
And for Varnish, you need to make sure the -p feature=+http2 runtime flag is added to the varnishd process. So the varnishd process could look like this:
varnishd -a:80 -f /etc/varnish/default.vcl -s malloc,2g -p feature=+http2

Nginx reverse proxy for tomcat

I am having issues getting my nginx + tomcat 7 reverse proxy setup working.
Basically I want https://192.168.10.101 to serve content from the upstream cluster/webapp/; However I am getting a 404 page from my applicaton.
Any hints on whats going wrong would be greatly appreciated.
My configuration is given below.
server {
server_name 192.168.10.101;
access_log /var/log/nginx/mysite-access.log;
listen 443;
ssl on;
ssl_certificate /etc/nginx/ssl/mysite.crt;
ssl_certificate_key /etc/nginx/ssl/private/mysite_pvt.key;
location / {
proxy_redirect off;
proxy_pass https://tccluster/webapp/;
rewrite_log on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_max_temp_file_size 0;
}
}
upstream tccluster {
server 192.168.56.103:8443;
server 192.168.56.104:8443;
}
Finally figured it out. The app has a filter that redirects to /webapp/index.html , which made nginx make the request for /webapp/webapp/index.html which was giving the 404.
I added a rewrite rule
location / {
proxy_pass https://backend/webapp/;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
rewrite ^/webapp/(.*)$ /$1 last;
}
And this seems to be working for now !
full nginx config to pass to tomcat context :
server {
listen 80; # e.g., listen 192.168.1.1:80; In most cases *:80 is a good idea
listen [::]:80;
server_name tomcat-context.domain.com ;
# individual nginx logs for this vhost
access_log /var/log/nginx/tomcat-context_domain_access.log main;
error_log /var/log/nginx/tomcat-context_domain_error.log;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/www;
}
location / {
proxy_pass http://127.0.0.1:10080/tomcat-context/;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
rewrite ^/tomcat-context/(.*)$ /$1 last;
}
location /tomcat-context {
rewrite ^/tomcat-context(.*)$ $1 redirect;
}
}

Resources