Reverse proxy from NGINX to Keycloak with 2FA - nginx

I have a problem with NGINX. In addition, I will provide you with a configuration file and a picture of the architecture schema ( https://ibb.co/niZCRx ).
I want to access Keycloak via nginx and log in to it. I use it as an Identity Management where I have a login with a username and password and a certificate where I check the certificate, that is 2FA. My problem is that when I access the browser through NGINX, I do not get popup to submit my user certificate, but then go to the second step to enter a username and password, but after that, Keycloak tells me I'm missing a certificate.
Something I've tried and worked on is if I add these things to the configuration file, proxy_ssl_certificate and proxy_ssl_certificate_key will pass it on, but only for one user. An example if proxy_ssl_certificate and proxy_ssl_certificate_key are a certificate and a key from the user joncheski and log in to Keycloak with the user joncheski will pass successfully. But if I want to log in with another user, it will not pass, because the certificate and the username are not equal.
I need your help. How to set this up for more users to work.
nginx.conf:
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server {
listen 443 ssl;
server_name #SET NAME OF SERVER#;
#HTTPS-and-mTLS
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
proxy_ssl_session_reuse on;
proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
proxy_ssl_trusted_certificate #PATH OF PUBLIC CA CERTIFICATE#;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
ssl_certificate #PATH OF PUBLIC CERTIFICATE FROM SDP GATEWAY#;
ssl_certificate_key #PATH OF PRIVATE KEY FROM SDP GATEWAY#;
ssl_trusted_certificate #PATH OF PUBLIC CA CERTIFICATE#;
ssl_client_certificate #PATH OF PUBLIC CA CERTIFICATE#;
ssl_verify_client off;
ssl_session_tickets on;
ssl_session_cache shared:SSL:40m;
ssl_session_timeout 4h;
location '/auth' {
proxy_pass #SET HOST TO IDENTITY MANAGEMENT#;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_http_version 1.1;
}
location '/' {
proxy_pass #SET HOST TO REDIRECT GUI REQUEST#;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
}
Best regards,
Goce Joncheski

Related

What is the relationship of server's setting in both nginx.conf and proxy.conf?

I am very newbie on NGINX.
In my project, the server is defined in both etc/nginx/nginx.conf and etc/nginx/conf.d/proxy.conf. And etc/nginx/conf.d/proxy.conf is included in nginx.conf
I am not understand the relationship the server's setting in these two files. ex. In nginx.conf, server's setting is listen 80 ; listen [::]:80 ; and in proxy.conf, server's setting is listen 80 proxy_protocol.
In above example, which setting will be used in real communication?
Does the server's setting of proxy.conf overwrite the server's setting of nginx.conf?
or the server's setting of proxy.conf will be merged into server's setting of nginx.conf?
Please find the full conf files as below:
etc/nginx/conf.d/proxy.conf
content: |
client_max_body_size 500M;
server_names_hash_bucket_size 128;
upstream backend {
server unix:///var/run/puma/my_app.sock;
}
server {
listen 80 proxy_protocol;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
large_client_header_buffers 8 32k;
set_real_ip_from 10.0.0.0/8;
real_ip_header proxy_protocol;
location / {
proxy_http_version 1.1;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_buffers 8 32k;
proxy_buffer_size 64k;
proxy_pass http://backend;
proxy_redirect off;
Enables WebSocket support
location /v1/cable {
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "Upgrade";
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
}
}
}
etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
server {
listen 80 ;
listen [::]:80 ;
server_name localhost;
root /usr/share/nginx/html;
location / {
}
}
}
Nginx selects a server block to process a request based on the values of the listen and server_name directives.
If a matching server name cannot be found, the default server for that port will be used.
In the configuration in your question, the server block in proxy.conf is encountered first, so it becomes the de-facto default server for port 80.
The server block in nginx.conf will only match requests which use the correct host name, i.e. http://localhost
See this document for details.

Nginx Slow/Failing requests when hosting multiple sites

I think this is somewhat of an interesting issue, and being new to nginx I'm not sure what configuration I need to look at to resolve this. I am hosting two web applications using the nginx reverse proxy. If a request comes in looking for myapp1.com it will route them to my app hosted at 127.0.0.1:3000, if they request myapp2.com it will route them to my app hosted at 127.0.0.1:3001. The problem is when I try to hit both of them in a relatively short amount of time (IE: hit http://myapp1.com and http://myapp2.com in browser). It will usually serve the first one up and then the request for the other will take too long and fail. Whats going wrong with nginx? Below are my configuration files.
Edit:
After connecting to a VPN this issue does not happen. Is this an issue because I am on the same LAN connection with the host? Is there any way to get around that?
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
log_format main_ext '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'"$host" sn="$server_name" ' 'rt=$request_time '
'ua="$upstream_addr" us="$upstream_status" '
'ut="$upstream_response_time" ul="$upstream_response_length" '
'cs=$upstream_cache_status' ;
access_log /var/log/nginx/access.log main_ext;
error_log /var/log/nginx/error.log warn;
##
# Gzip Settings
##
gzip on;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
/etc/nginx/sites-enabled/default
server {
listen 80;
server_name myapp1.com www.myapp1.com;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/myapp1.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/myapp1.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
listen 80;
server_name myapp2.com www.myapp2.com;
location / {
proxy_pass http://127.0.0.1:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}

NGINX: How do I remove a port when performing a reverse proxy?

I have an Nginx reverse proxy set up which is being used as an SSL offload for several servers such as confluence. I've got it successfully working for taking http://confluence and https://confluence but when I try to redirect http://confluence:8090, it tries to go to https://confluence:8090 and fails.
How can I remove the port from the URL?
The config below is a bit trimmed but maybe helpful? Is the $server_port bit in the headers causing the problem?
server {
listen 8090;
server_name confluence;
return 301 https://confluence$request_uri;
}
server {
listen 443 ssl http2;
server_name confluence;
location / {
proxy_http_version 1.1;
proxy_pass http://confbackend:8091
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $server_name:$server_port;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade; #WebSocket Support
proxy_set_header Connection $connection_upgrade; #WebSocket Support
}
}
Seems like a lot of answers here involve http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect but I find no solace in that confusing mess.
I also would have thought you'd have a single server but I was trying the advice from https://serverfault.com/questions/815797/nginx-rewrite-to-new-protocol-and-port
I tried messing with the port_in_redirect off; option but maybe I was using it wrong?
EDIT 1: Add conf files
The files below are modifications from the Artifactory nginx setup. I used their setup initially and added additional conf files (in ./conf.d/) for other RP endpoints.
Confluence.conf
server {
listen 8090 ssl http2;
server_name confluence.domain.com confluence;
## return 301 https://confluence.domain.com$request_uri;
proxy_redirect https://confluence.domain.com:8090 https://confluence.domain.com;
}
server {
## add ssl entries when https has been set in config
ssl_certificate /data/rpssl/confluence.pem;
ssl_certificate_key /data/rpssl/confluence_unencrypted.key;
## server configuration
listen 443 ssl http2;
server_name confluence.domain.com confluence;
add_header Strict-Transport-Security max-age=31536000;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/confluence-access.log timing;
error_log /var/log/nginx/confluence-error.log;
client_max_body_size 0;
proxy_read_timeout 1200;
proxy_connect_timeout 240;
location / {
proxy_http_version 1.1;
proxy_pass http://backendconfluence.domain.com:8091;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $server_name:$server_port;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade; # WebSocket Support
proxy_set_header Connection $connection_upgrade; # WebSocket support
}
}
nginx.conf
# Main Nginx configuration file
worker_processes 4;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
worker_rlimit_nofile 4096;
events {
worker_connections 2048;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
map $http_upgrade $connection_upgrade { #WebSocket support
default upgrade;
'' '';
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format timing 'ip = $remote_addr '
'user = \"$remote_user\" '
'local_time = \"$time_local\" '
'host = $host '
'request = \"$request\" '
'status = $status '
'bytes = $body_bytes_sent '
'upstream = \"$upstream_addr\" '
'upstream_time = $upstream_response_time '
'request_time = $request_time '
'referer = \"$http_referer\" '
'UA = \"$http_user_agent\"';
access_log /var/log/nginx/access.log timing;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
Your problem is the STS header
add_header Strict-Transport-Security max-age=31536000;
When you add the STS header. The first request to http://example.com:8090 generates a redirect to https://example.com
This https://example.com then returns the STS header in the response and the browser remembers the example.com always needs to be served on https no matter what. The port doesn't make a difference
Now when you make another request to http://example.com:8090, STS kicks in and then converts it to https://example.com:8090, which is your problem here
Because a port can only serve http or https, you can't use 8090 to redirect http to https AND redirect https 8090 to https 443

Nginx defaulting to first config for all www. requests

I host some websites with Node.js and use Nginx to make it so that I can host multiple sites on the same server. I've just started getting this problem where if, for example, I go to examplesite1 it works fine and if I go to www.examplesite1 it works fine aswell and if I go to examplesite2 it works fine but when I go to www.examplesite2 it shows the content from examplesite1 on it. Here is the config I'm using:
nginx.conf:
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
server_names_hash_bucket_size 64;
}
conf.d/blog.jakewalker.xyz.conf:
server {
listen 80;
server_name blog.jakewalker.xyz;
location / {
proxy_pass http://ghost:2368; # this is to a docker container
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
conf.d/jakewalker.xyz.conf:
server {
listen 80;
server_name jakewalker.xyz;
location / {
proxy_pass http://sites:2100; # also for a docker container
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
I followed this guide https://www.digitalocean.com/community/tutorials/how-to-host-multiple-node-js-applications-on-a-single-vps-with-nginx-forever-and-crontab on how to set it up.
I'm not sure if it'll help but here is my DNS for my website:
A jakewalker.xyz {ip of server}
CNAME www.jakewalker.xyz #
A blog.jakewalker.xyz {ip of server}
Am I doing anything wrong?

use nginx for websockets with two different host/ports

I have my play project running with non-ssl.So i decided to use nginx as front end proxy server for SSL. Now, i have two play project deployed. One is used to portal and one for rest application stuff.
Within my portal project (front end UI) , i have called websocket urls accessing the rest-api project for getting data.
The problem is i get security error when i use SSL for portal. I need to understand how can i pass the websockets . Here is my nginx.conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log debug;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
#include /etc/nginx/conf.d/*.conf;
server {
listen 443 ssl;
server_name 10.100.10.99;
ssl on;
ssl_certificate /etc/ssl/certs/punvm-core06.crt;
ssl_certificate_key /etc/ssl/private/punvm-core06.key;
ssl_session_timeout 5m;
ssl_protocols SSLv2 SSLv3 TLSv1;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://matrix09:9701; #portal url.
proxy_redirect off;
#WebSocket support (nginx 1.4)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_read_timeout 86400;
}
}
}
The websocket are called from my javascript in my portal play project. From nginx understand i can give only one proxy pass .
So i have two sets of url .
1. http://matrix09:9700 - My rest url .
2. http://matrix09:9701 _ my portal url
In java script, i call websocket like this,
var WS_appstatus = window['MozWebSocket'] ? MozWebSocket : WebSocket
var appstate=new WS_appstatus("ws://matrix09:9700/services/reports/v1/realtimestreaming/smpApplicationStatus")
appstate.onmessage = function(event) {
var datapoint = jQuery.parseJSON(event.data )
app_status(datapoint);
}
PLease suggest.

Resources