Nginx redirect config issue - nginx

I have nginx bitnami container deployed in Openshift that serves my application. The issue that I am facing is that the redirect is not working. In the logs, there are no indications that the request is caught by a proxy_pass location block.
So. the idea is that a request to app.com/backend1/api/something should be forwarded to service1.com/backend1/api/something. The same goes for service2.
worker_processes 1;
events {
worker_connections 1024;
}
http {
upstream service1 {
server service1.com;
}
upstream service2 {
server service2.com;
}
server {
listen 8443 ssl;
listen [::]:8443 http2 ssl;
server_name app.com;
error_log /opt/bitnami/nginx/error.log debug;
ssl on;
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
keepalive_timeout 70;
include /opt/bitnami/nginx/conf/mime.types;
root /opt/bitnami/nginx/html;
location ~ ^/backend1/api/(.*)$ {
proxy_pass https://service1/backend1/api/$1;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/backend2/api/(.*)$ {
proxy_pass https://service2/backend2/api/$1;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
try_files $uri /index.html;
}
}
}
I have also tried moving the order of the location blocks, as well as moving the root directive, but without success.
Any ideas on how to resolve this issue?

Related

Jenkins Https reverse proxy configuration is not working in centos

I have installed nginx and jenkins server in centos. I want to connect my jenkins server with https. so I have configured reverse-proxy. But its not working, I'm getting the following error
Below are the my configuration values
File - /etc/nginx/sites-enabled/default
server {
listen 80;
return 301 https://$host$request_uri;
}
server {
listen 443;
server_name 134.68.44.235;
ssl_certificate /etc/nginx/cert.crt;
ssl_certificate_key /etc/nginx/cert.key;
ssl on;
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
ssl_prefer_server_ciphers on;
access_log /var/log/nginx/jenkins.access.log;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Fix the “It appears that your reverse proxy set up is broken" error.
proxy_pass http://localhost:8080;
proxy_read_timeout 90;
proxy_redirect http://localhost:8080 https://134.68.44.235;
}
}
File /etc/default/jenkins
JENKINS_ARGS="--webroot=/var/cache/jenkins/war --httpListenAddress=127.0.0.1 --httpPort=$HTTP_PORT -ajp13Port=$AJP_PORT"
Note : I can able to access the jenkins site using http://134.68.44.235:8080
But I can't able access it using https, I installed certificates and followed the steps from this article
I'm not sure what I'm missing, Anyone kindly advise me on this.
This snippet below should work well, you'll have to edit the content sections to match the FQDN or IP you want Jenkins to serve the web UI, together with the valid SSL CERT path and SSL KEY path if you want to provide https.
upstream app_server {
server 127.0.0.1:8080 fail_timeout=3;
}
server {
listen 80;
server_name <FQDN OR IP>;
return 301 https://$server_name$request_uri;
}
server {
listen 443;
server_name <FQDN OR IP>;
ssl on;
ssl_certificate /<PATH>/<TO>/<YOUR SSL CERT>;
ssl_certificate_key /<PATH>/<TO>/<YOUR SSL KEY>;
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers RC4:HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Redirect any /* request to port 8080
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
if ($request_method = 'GET') {
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Headers' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST';
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range,Location';
}
proxy_redirect http:// https://;
proxy_pass http://app_server;
}
}

Why does nginx still redirect urls when not configured for these urls - reverse proxy setup

I have the following conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
server {
listen 8443 ssl;
server_name unifi.bob.net;
ssl on;
ssl_protocols TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
ssl_certificate /var/lib/docker/volumes/letsencrypt/_data/live/unifi.bob.net/fullchain.pem;
ssl_certificate_key /var/lib/docker/volumes/letsencrypt/_data/live/unifi.bob.net/privkey.pem;
location /wss/ {
proxy_pass https://192.168.1.3:8443;
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_read_timeout 86400;
}
location / {
proxy_pass https://192.168.1.3:8443/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forward-For $proxy_add_x_forwarded_for;
}
}
server {
listen 8443 ssl;
server_name nas.bob.net;
ssl on;
ssl_protocols TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
ssl_certificate /var/lib/docker/volumes/letsencrypt/_data/live/nas.bob.net/fullchain.pem;
ssl_certificate_key /var/lib/docker/volumes/letsencrypt/_data/live/nas.bob.net/privkey.pem;
location / {
proxy_pass http://192.168.1.254:8080/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forward-For $proxy_add_x_forwarded_for;
}
}
server {
listen 880;
server_name unifi.bob.net;
return 301 https://unifi.bob.net$request_uri;
}
server {
listen 880;
server_name nas.bob.net;
return 301 https://nas.bob.net$request_uri;
}
}
this all works fine if i hit http://nas.bob.net i get redirected to https://nas.bob.net and to the internal resource fine this also works the same for unifi.bob.net
however if i try my external ip or a record i get redirected to the unifi recource?
should it not just do nothing or am i missing something from the config?
Thanks
Found to answer, i had no default_server set in any config. now added this and all working as expected
Thanks

How to set NGINX to url mask a CNAME record

I am migrating some infrastructure and I am having the following issue.
I got a CNAME record from api.oldserver.com to api.newserver.com. I am using a multi-domain ssl with Nginx. And it all works great!
*nginx config below
The issue is that whenever someone navigates to api.oldserver.com it shows at the url bar that the user is actually seeing api.newserver.com. I wish the user could still see the api.olderserver.com.
server {
listen 80;
server_name _;
return 301 http://$http_host$request_uri;
}
server {
listen 443 ssl;
server_name api.newerserver.com;
ssl_certificate "/etc/nginx/domain.crt";
ssl_certificate_key "/etc/nginx/domain.key";
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:9000;
}
}
This is happening because of the redirect. Consider creating another server statement with the api.oldserver.com that would have the appropriate cert and would have the same proxy setup so it would be handled the same way.
server {
listen 80;
server_name _;
return 301 http://$http_host$request_uri;
}
server {
listen 443 ssl;
server_name api.newerserver.com;
ssl_certificate "/etc/nginx/domain.crt";
ssl_certificate_key "/etc/nginx/domain.key";
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:9000;
}
}
server {
listen 443 ssl;
server_name api.oldserver.com;
ssl_certificate "/etc/nginx/olddomain.crt";
ssl_certificate_key "/etc/nginx/olddomain.key";
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:9000;
}
}
Or something like that...

Deploying a node js app with proxypass with ssl enabled

I have ameteor ap which i am running as is the norm and it runs on my server like
http://my-ip:3000
I have nginx installed and i can access the meteor app using this sites-enabled configuration
My file looks like this
server {
listen *:80;
server_name _;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
I am following this tutorial to get letsencrypt to work https://gist.github.com/cecilemuller/a26737699a7e70a7093d4dc115915de8
How would i enable ssl in my configuration above
To run with ssl,make sure you have a letencrypt certificate and this is my configuration
server {
listen 80;
return 301 https://$host$request_uri;
}
server {
listen 443;
server_name domain.com;
ssl_certificate /etc/letsencrypt/live/domain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/domain.com/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/domain.com/fullchain.pem;
ssl on;
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
ssl_prefer_server_ciphers on;
access_log /var/log/nginx/meteor.access.log;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Fix the “It appears that your reverse proxy set up is broken" error.
proxy_pass http://localhost:3000;
proxy_read_timeout 90;
proxy_redirect http://localhost:3000 https://domain.com;
}
}
The above runs the meteor app with ssl enabled.

How to DRY nginx configuration

I have nginx config for the current and legacy application where the only difference between the two blocks is DNS-specific entries and root path. How can I put specific parts of the config in a variable or something and then call that variable in both server config blocks?
server {
listen 0.0.0.0:443 ssl;
server_name mysite.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers RC4:HIGH:!aNULL:!MD5;
ssl_session_cache shared:SSL:15m;
ssl_session_timeout 15m;
root /home/deployer/apps/myapp/current/public;
if ($request_method !~ ^(GET|HEAD|POST)$ ) {
return 444;
}
if ($http_user_agent ~* LWP::Simple|BBBike|wget) {
return 403;
}
if ($http_user_agent ~* (spider|AcoiRobot|msnbot|scrapbot|catall|wget) ) {
return 403;
}
location ^~ /assets/ {
gzip_static on;
gzip_vary on;
expires max;
add_header Cache-Control public;
}
location ~ \.(gif|png|jpe?g|JPE?G|GIF|PNG {
valid_referers none blocked mysite.com *.mysite.com;
if ($invalid_referer) {
return 403;
}
}
location /evil/ {
valid_referers none blocked mysite.com *.mysite.com;
if ($invalid_referer) {
return 403;
}
}
try_files $uri/index.html $uri #puma;
location #puma {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://puma;
}
error_page 500 502 503 504 /500.html;
client_max_body_size 4G;
keepalive_timeout 10;
}
How can I DRY up everything below the root line?
Time has proven Alexey Ten's comment about using include to be the right way to go.
We use this in production:
File structure in /etc/nginx
nginx.conf
sites-enabled/app_config
modules/shared_serve
modules/shared_ssl_settings
In /etc/nginx/sites-enabled/app_config:
upstream puma {
server unix:/tmp/puma.socket fail_timeout=1;
}
server {
server_name example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
include modules/shared_ssl_settings;
include modules/shared_serve;
}
In /etc/nginx/modules/shared_ssl_settings:
listen 443 ssl;
listen [::]:443;
ssl on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers On;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:30m;
ssl_stapling on;
ssl_stapling_verify on;
add_header Strict-Transport-Security max-age=15768000;
In /etc/nginx/modules/shared_serve:
location ~ \.(php|aspx|asp|myadmin)$ { return 444; log_not_found off; }
root /home/deployer/apps/example_app/current/public;
try_files $uri/index.html $uri #puma;
location #puma {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://puma;
}
error_page 500 502 503 504 /500.html;
client_max_body_size 4G;
keepalive_timeout 10;
The only gotcha is that your deploy script has to ensure the file structure in /etc/nginx. Naturally, you can name your module directory anything else. You might even keep the includable files right in /etc/nginx without a subdirectory.
You could use a map to define which app root to use based on $host:
map $host $app_root {
default /home/deployer/apps/myapp/current/public;
legacy.mysite.lv /home/deployer/apps/myapp/legacy/public;
}
Add another server_name directive to match your legacy app (use the same name in the map). Then use the variable in your root directive:
root $app_root;

Resources