Nginx Slow/Failing requests when hosting multiple sites - nginx

I think this is somewhat of an interesting issue, and being new to nginx I'm not sure what configuration I need to look at to resolve this. I am hosting two web applications using the nginx reverse proxy. If a request comes in looking for myapp1.com it will route them to my app hosted at 127.0.0.1:3000, if they request myapp2.com it will route them to my app hosted at 127.0.0.1:3001. The problem is when I try to hit both of them in a relatively short amount of time (IE: hit http://myapp1.com and http://myapp2.com in browser). It will usually serve the first one up and then the request for the other will take too long and fail. Whats going wrong with nginx? Below are my configuration files.
Edit:
After connecting to a VPN this issue does not happen. Is this an issue because I am on the same LAN connection with the host? Is there any way to get around that?
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
log_format main_ext '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'"$host" sn="$server_name" ' 'rt=$request_time '
'ua="$upstream_addr" us="$upstream_status" '
'ut="$upstream_response_time" ul="$upstream_response_length" '
'cs=$upstream_cache_status' ;
access_log /var/log/nginx/access.log main_ext;
error_log /var/log/nginx/error.log warn;
##
# Gzip Settings
##
gzip on;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
/etc/nginx/sites-enabled/default
server {
listen 80;
server_name myapp1.com www.myapp1.com;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/myapp1.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/myapp1.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
listen 80;
server_name myapp2.com www.myapp2.com;
location / {
proxy_pass http://127.0.0.1:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}

Related

"Not Found" problem with QuestDB behind NGINX

I am running a QuestDB 6.6.1-server. Now I want to increase the protection of this server and put the web gui behind an NGINX reverse proxy as described in QuestDB blog post where setting up basic authentication is shown.
When I try to open the QuestDB web gui, the login popup is displayed, I can enter name and password without issues. However, after having successfully passed the login popup, I only see a bare text "Not Found" in the browser (Note: but NOT the NGINX 403 Not Found screen, which I now in other cases). Neither nginx.log, nor questdb.log show entries.
It is POSSIBLE to reach the QuestDB web gui via <server.domain>:9000, no issues there.
The "location" settings are defined in a file reverse_proxy.conf:
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name server.domain;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
listen [::]:443 ssl;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
ssl_certificate <path>/nginx.crt;
ssl_certificate_key <path>/nginx.key;
root /var/www/server.domain/html;
index index.html index.htm;
server_name server.domain;
location /location1 {
proxy_pass https://localhost:port1;
proxy_set_header Host $host;
}
location /location2 {
proxy_pass http://localhost:port2/location2;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10M;
}
location = /questdb/ {
auth_basic "Restricted QuestDB";
auth_basic_user_file <path>/.htpasswd;
proxy_pass http://localhost:9000;
proxy_set_header Host $host;
proxy_read_timeout 300;
proxy_connect_timeout 120;
proxy_send_timeout 300;
proxy_set_header Host $host;
}
}
reverse_proxy.conf is imported into nginx.conf. nginx.conf looks like this:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# For suppression of server version number
server_tokens off;
server_names_hash_bucket_size 64;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
##
# Virtual Host Configs
##
map_hash_max_size 262144;
map_hash_bucket_size 262144;
include /etc/nginx/conf.d/*.conf;
}
I see you are using questdb on a directory. You are proxying to http://localhost:9000/questdb, and QuestDB is saying "Not found"
To avoid that, you would need to add a slash at the end of proxy_pass http://localhost:9000; as in proxy_pass http://localhost:9000/;
Problem then is that relative URLs (/assets, /exec...) will not work and you will need to rewrite them on nginx.
It would be probably easier to just use a subdomain rather than a directory.
update: this is the nginx config I use. As explained, relative links are broken
location /questdb/ {
proxy_pass http://localhost:9000/;
index index.html index.htm;
auth_basic "Restricted Content";
auth_basic_user_file /opt/homebrew/etc/nginx.htpasswd;
}

How to find the page nginx is serving

I have inherited an nginx instance. I am barely more than a newbie when it comes to nginx.
When I navigate to the IP address of the box "http://192.168.1.10" I get a custom page back created by the previous dev somehow.
The nginx.conf looks like this:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
The conf.d directory is empty.
The sites-enabled directory contains a link default which points to default in sites-enabled. Here is that file:
server {
location / {
proxy_pass http://localhost:3002;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
listen 80;
listen [::]:80;
}
I have looked in /var/www/html and found the default index.nginx-debian.html that is not the one being served.
So, my question is, where is the file I'm getting back being served from? I'm running on a ubuntu server distro.

nginx: how to set up server blocks to allow subdomain

I have openresty running on my aws instance (Instance A) and the ip address of the server is already tied to the domain name myapp.john.com.
My app is running on another aws instance (Instance B) within the same private network. It has private ip address of 192.42.56.87 and the app is running on port :80.
I want to set up my openresty / nginx such that when visiting prod.myapp.john.com, nginx directs me to 192.42.56.87:80. And when visiting test.myapp.john.com, nginx directs me to another instance (Instance C) running the test version of my app, say on 192.xx.xx.xx:80
Below are code in (Instance A):
Main config file /usr/local/openresty/nginx/conf/nginx.conf is defined as:
# Main NGNX Config File
#user www-data;
worker_processes auto;
pid logs/nginx.pid;
error_log logs/error.log info;
error_log logs/error.log notice;
error_log logs/error.log debug;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
keepalive_requests 100000;
resolver 8.8.8.8 valid=30s ipv6=off;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
gzip on;
# Include all the sites for the domain
include /usr/local/openresty/nginx/sites/*;
}
/usr/local/openresty/nginx/sites/prod.myapp.john.com is defined as:
server {
listen 80;
listen [::]:80;
server_name prod.myapp.john.com; // this does not work; but "myapp.john.com" works
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name prod.myapp.john.com; // this does not work; but "myapp.john.com" works
ssl_certificate /etc/letsencrypt/live/myapp.john.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/myapp.john.com/privkey.pem;
location / {
proxy_pass http://192.42.56.87:80/;
expires 0;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
}
}
Now, in chrome browser when I visit prod.myapp.john.com, there is no response at all since the request never get to my Instance A;
However, if I change
server_name prod.myapp.john.com
to
server_name myapp.john.com
it works and the web page gets rendered.
Why?
How can I include more site files in /usr/local/openresty/nginx/sites/ and set the server blocks in config correctly to provide more subdomains on my site?

Nginx is not able to handle a large number of requests

I have an scenario on where a nginx is in front of an Artifactory server.
Recently, while trying to pull a big number of docker images in a for loop, all at the same time (first test was with 200 images, second test with 120 images), access to Artifactory gets blocked, as nginx is busy processing all the requests and users will not be able to reach it.
My nginx server is running with 4 cpu cores and 8192 of ram.
I have tried to improve the handling of files in the server, by adding the bellow:
sendfile on;
sendfile_max_chunk 512k;
tcp_nopush on;
This made it a bit better (but of course, pull's of images with 1gb+ take much more time, due to the chunk size) - still, access to the UI would cause a lot of timeouts.
Is there something else that i can do to improve the nginx performance, whenever a bigger load is pushed thru it?
I think that my last option is to increase the size of the machine (more cpu's) aswell as the number of processes on nginx (8 to 16).
The full nginx.conf file follows bellow:
user www-data;
worker_processes 8;
pid /var/run/nginx.pid;
events {
worker_connections 19000;
}
http {
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
include /etc/nginx/mime.types;
default_type application/octet-stream;
gzip on;
gzip_disable "msie6";
sendfile on;
sendfile_max_chunk 512k;
tcp_nopush on;
set_real_ip_from 138.190.190.168;
real_ip_header X-Forwarded-For;
log_format custome '$remote_addr - $realip_remote_addr - $remote_user [$time_local] $request_time'
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
server {
listen 80 default;
listen [::]:80 default;
server_name _;
return 301 https://$server_name$request_uri;
}
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add ssl entries when https has been set in config
ssl_certificate /etc/ssl/certs/{{ hostname }}.cer;
ssl_certificate_key /etc/ssl/private/{{ hostname }}.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
listen 443 ssl;
server_name ~(?<repo>.+)\.{{ hostname }} {{ hostname }} _;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/{{ hostname }}-access.log custome;
error_log /var/log/nginx/{{ hostname }}-error.log warn;
rewrite ^/$ /webapp/ redirect;
rewrite ^//?(/webapp)?$ /webapp/ redirect;
rewrite ^/(v1|v2)/(.*) /api/docker/$repo/$1/$2;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 900;
proxy_max_temp_file_size 10240m;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://{{ appserver }}:8081/artifactory/;
proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
}
Thanks for the tips.
Cheers,
Ricardo

use nginx for websockets with two different host/ports

I have my play project running with non-ssl.So i decided to use nginx as front end proxy server for SSL. Now, i have two play project deployed. One is used to portal and one for rest application stuff.
Within my portal project (front end UI) , i have called websocket urls accessing the rest-api project for getting data.
The problem is i get security error when i use SSL for portal. I need to understand how can i pass the websockets . Here is my nginx.conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log debug;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
#include /etc/nginx/conf.d/*.conf;
server {
listen 443 ssl;
server_name 10.100.10.99;
ssl on;
ssl_certificate /etc/ssl/certs/punvm-core06.crt;
ssl_certificate_key /etc/ssl/private/punvm-core06.key;
ssl_session_timeout 5m;
ssl_protocols SSLv2 SSLv3 TLSv1;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://matrix09:9701; #portal url.
proxy_redirect off;
#WebSocket support (nginx 1.4)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_read_timeout 86400;
}
}
}
The websocket are called from my javascript in my portal play project. From nginx understand i can give only one proxy pass .
So i have two sets of url .
1. http://matrix09:9700 - My rest url .
2. http://matrix09:9701 _ my portal url
In java script, i call websocket like this,
var WS_appstatus = window['MozWebSocket'] ? MozWebSocket : WebSocket
var appstate=new WS_appstatus("ws://matrix09:9700/services/reports/v1/realtimestreaming/smpApplicationStatus")
appstate.onmessage = function(event) {
var datapoint = jQuery.parseJSON(event.data )
app_status(datapoint);
}
PLease suggest.

Resources