Proxy Pass to the same domain but different endpoint using TLS - nginx

I'm trying to add two endpoints in nginx.conf file. The main purpose is to be able to send a POST request to the first endpoint so i can log the body message to the console (stdout). The second endpoint is needed because i need to do a proxy_pass to another endpoint so i can send the body message to stdout (I'm following this tutorial https://matthias-kainer.de/blog/posts/logging-client-console-errors-with-nginx/).
The problem i'm facing is that the server name can be any, so i have the server_name directive as _. I have tried a lot of things but i always get some error: 502 - Bad Gateway or 400 - Bad Request or 400 - No required SSL certificate was sent.
My nginx.conf file (and my actual try) is this:
worker_processes 1;
error_log /dev/stdout warn;
events {
worker_connections 1024;
}
http {
resolver 127.0.0.11 valid=30s;
resolver_timeout 20s;
access_log stdout;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format error_trace '$remote_addr - $remote_user $request_time $upstream_response_time '
'[$time_local] "$request" $status $body_bytes_sent "Client Error: $request_body" "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
error_log /dev/stdout debug;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
default_type application/octet-stream;
gzip on;
gzip_http_version 1.1;
gzip_vary on;
gzip_comp_level 6;
gzip_proxied any;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript text/x-js;
gzip_buffers 16 8k;
gzip_disable "MSIE [1-6].(?!.*SV1)";
server {
listen 60000 ssl http2 default_server;
root /var/www/html;
server_name _;
server_tokens off;
client_body_buffer_size 1k;
client_header_buffer_size 1k;
client_max_body_size 1k;
large_client_header_buffers 4 16k;
ssl_certificate /some/path/some-file.pem;
ssl_certificate_key /some/path/some-file-key.key;
ssl_trusted_certificate /some/path/some-certificate.pem;
ssl_session_cache shared:SSL:50m;
ssl_session_timeout 180m;
ssl_session_tickets off;
ssl_protocols TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:ECDHE-RSA-AES128-GCM-SHA256:AES256+EECDH:DHE-RSA-AES128-GCM-SHA256:AES256+EDH:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
ssl_stapling on;
ssl_stapling_verify on;
ssl_verify_client on;
ssl_verify_depth 10;
ssl_client_certificate /some/path/some-certificate.pem;
add_header Strict-Transport-Security "max-age=31536000" always;
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
location = / {
try_files $uri$args $uri$args/ /index.html;
}
location = /client_error_trace {
access_log /dev/stdout error_trace;
proxy_pass https://127.0.0.1:60000/client_error_trace_proxy;
proxy_redirect off;
proxy_set_header Host $host;
}
location = /client_error_trace_proxy {
access_log off;
return 200 'Error logged';
}
error_page 404 /;
}
}
With this file, im getting an error 400 - Bad Request - No required SSL certificate was sent. Any hint or help would be very appreciated >_<

Related

Nginx config: Embedded variables are showing as empty when checking the container logs

I'm trying to use proxy_set_header directive in my nginx config to add a request header. However my nginx container can't be started due to this error.
So I check the nginx config in my container and it looks like the variable is empty and Nginx treat it as if there's a missing argument.
Here is my nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
server_names_hash_bucket_size 256;
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [cache:$upstream_cache_status] [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/rss+xml text/javascript font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
include /etc/nginx/conf.d/*.conf;
server {
listen 80;
server_name ${DOMAIN_NAME_CLIENT};
large_client_header_buffers 4 16k;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
location /graphql {
auth_basic off;
proxy_pass http://${PRIVATE_IP_CLIENT}:3000;
proxy_set_header HOST nginx;
proxy_pass_request_headers on;
limit_except GET POST OPTIONS { deny all; }
}
location / {
auth_basic off;
limit_except GET POST { deny all; }
proxy_pass http://${PRIVATE_IP_CLIENT}:3000;
proxy_pass_request_headers on;
proxy_set_header proxied nginx;
}
}
}

Nginx https reverse proxy is too slow

I have Implemented the Nginx cache with https reverse proxy in centos, My response time taking more than 1.5 seconds for each request. My nginx server configuration was 4 core, 8gb ram.
My configuration looks like below (nginx.config)
`user nginx;
worker_processes auto;
worker_rlimit_nofile 100000;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 80000;
use epoll;
multi_accept on;
}
http {
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format rt_cache '$remote_addr - $upstream_cache_status [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
# Below pattern will print
# Time stamp | Client IP | client Dev apps Name| Request | Status Returned| Time taken in ms| size Returned in bytes| Referer | hit or miss | User agent
log_format bf_log_format '[$time_local]|'
'$remote_addr|'
'$http_x_developer_username|$http_x_forwarded_for|'
'"$request"|'
'$status|$upstream_response_time|$body_bytes_sent|'
'"$http_referer"|'
'"$upstream_cache_status"|'
'"$http_user_agent"';
log_format json_log_format escape=json '{'
'"time": "$time_iso8601",'
'"trace_id": "$request_id",'
'"http": {'
'"body_bytes_sent": "$body_bytes_sent",'
'"x_developer_username": "$http_x_developer_username",'
'"remote_addr": "$remote_addr",'
'"method": "$request_method",'
'"request": "$request_uri",'
'"schema": "$scheme",'
'"request_time": "$request_time",'
'"host": "$host",'
'"uri": "$uri",'
'"user_agent": "$http_user_agent",'
'"status": "$status"'
'},'
'"proxy": {'
'"host": "$proxy_host"'
'},'
'"upstream": {'
'"response_time": "$upstream_response_time sec",'
'"cache_status": "$upstream_cache_status"'
'}'
'}';
# access_log /var/log/nginx/access.log main;
# access_log /var/log/nginx/access.log json_log_format;
access_log off;
sendfile on;
sendfile_max_chunk 512k;
# directio 4m;
# directio_alignment 512;
tcp_nopush on;
tcp_nodelay on;
reset_timedout_connection on;
keepalive_requests 100000;
types_hash_max_size 2048;
# reduce the data that needs to be sent over network -- for testing environment
gzip on;
# gzip_static on;
gzip_min_length 10240;
gzip_comp_level 1;
gzip_vary on;
gzip_disable msie6;
gzip_proxied expired no-cache no-store private auth;
gzip_types
text/css
text/javascript
text/xml
text/plain
text/x-component
application/javascript
application/x-javascript
application/json
application/xml
application/rss+xml
application/atom+xml
font/truetype
font/opentype
application/vnd.ms-fontobject
image/svg+xml;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
proxy_cache_path /opt/nginx/cache levels=1:2 keys_zone=api-cache:3000m max_size=100g inactive=43200m use_temp_path=off;
proxy_temp_path /opt/nginx/cache/other;
include /etc/nginx/conf.d/ssl.conf;
}`
My ssl.confg looks like below
server {
server_name _;
root /usr/share/nginx/html;
listen 443 ssl http2 default_server;
listen [::]:443 ssl;
ssl_certificate "/etc/private/ssl/cert.pem";
ssl_certificate_key "/etc/private/ssl/key.pem";
# ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
keepalive_timeout 100;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
location /health {
default_type application/json;
return 200 '{"status":"UP"}';
}
location /nginx-status {
stub_status;
}
location /trellotest {
proxy_cache_bypass $http_no_cache_purge $arg_nocache;
proxy_cache_methods GET POST;
add_header Cache-Control "public";
proxy_cache api-cache;
proxy_cache_valid 200 40320m;
add_header X-Cache $upstream_cache_status;
add_header X-Time $request_time;
proxy_ignore_headers X-Accel-Expires Expires Cache-Control;
proxy_pass https://mytrelloapp;
}
}
If possible, Anyone could you please advise me if we have anyway to improve the above configurations?

nginx: nginx: [emerg] "upstream" directive is not allowed here in /etc/nginx/sites-enabled/www.example.com.conf

Please, can anyone help me? I'm having to do a load balance on a reverse proxy server, which was not configured by me.
And when I configure the upstream directive it is giving an error. I have already tried to set it within http {} and within the settings of the site included.
My nginx.conf
load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
user nginx;
worker_processes 16;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
client_max_body_size 20m;
proxy_read_timeout 3600;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Security #
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains";
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
keepalive_timeout 750;
server_tokens off;
#more_clear_headers 'Server' 'X-Powered-By' 'X-Content-Powered-By';
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Content-Type-Options nosniff;
add_header Content-Security-Policy "frame-ancestors digital.fortaleza.ce.gov.br epgm.pgm.fortaleza.ce.gov.br revista.pgm.fortaleza.ce.gov.br";
add_header X-Frame-Options "ALLOW-FROM digital.fortaleza.ce.gov.br epgm.pgm.fortaleza.ce.gov.br";
add_header X-Frame-Options SAMEORIGIN;
add_header X-XSS-Protection "1; mode=block";
#Compression
gzip on;
gzip_proxied any;
gzip_vary on;
gzip_disable “MSIE [1-6]\.(?!.*SV1)”;
gzip_disable "msie6";
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
#SSl Certificate Security
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_dhparam /etc/nginx/ssl/dhparam4096.pem;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
#Proxy
proxy_hide_header on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
server {
listen 80;
error_page 404 /404.html;
location = /basic_status {
stub_status;
allow 172.30.50.100;
allow 10.0.10.100;
deny all;
}
location / {
return 301 https://www.example.com.br;
}
}
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*.conf;
}
My www.example.com.br config file inside sites-enable
upstream example {
server 192.168.0.1;
server 192.168.0.2;
}
server {
listen 443 ssl;
server_name www.example.com.br;
ssl_certificate /etc/nginx/certificates/bundle-pgm.crt;
ssl_certificate_key /etc/nginx/certificates/privatekey.key;
access_log /var/log/nginx/www.example.com.br/www.example.com.br_access.log;
error_log /var/log/nginx/www.example.com.br/www.example.com.br error;
location / {
proxy_pass http://example;
}
}
server {
if ($host = www.example.com.br) {
return 301 https://$host$request_uri;
}
listen 80;
server_name www.example.com.br;
return 404;
}

Nginx not saving cached 404s to disk

Here's my nginx config (using nginx 1.16.1):
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 100000;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"'
'rt=$request_time uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"';
##
# Logging Settings
##
error_log /var/log/nginx/error.log warn;
access_log /var/log/nginx/access.log upstream_time;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;
#Nginx cache
proxy_cache_path /nginx_cache/product levels=1:2 keys_zone=product_cache:100m max_size=20g inactive=2d use_temp_path=off;
#Serve HTML, JS, CSS & Go requests
server {
client_max_body_size 102M;
listen 443 ssl http2;
server_name example.com;
root /html;
index /;
error_page 404 /404.html;
error_page 500 /500.html;
error_page 502 =503 /maintenance.html;
location = /404.html {
add_header x-nginx-cache-status $upstream_cache_status always;
}
location ~^/([a-zA-Z0-9/]+)$ {
set $product_id $1;
rewrite ^ /product?id=$product_id break;
proxy_cache product_cache;
proxy_http_version 1.1;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_background_update on;
proxy_cache_lock on;
proxy_cache_key product-$product_id;
add_header x-nginx-cache-key product-$product_id always;
add_header x-nginx-cache-status $upstream_cache_status always;
proxy_cache_valid 200 404 1d;
proxy_cache_bypass $nocache;
proxy_ignore_headers Cache-Control; #force cache
proxy_ignore_headers Set-Cookie;
proxy_intercept_errors on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_set_header REQUEST_URI $request_uri;
proxy_pass http://go:2053;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
}
}
}
With this configuration, I can see that 404s are actually a cache HIT after the first request, however it's not saved in the nginx_cache folder as set.
Other requests which are 200 responses are cached appropriately and saved to disk as expected.
I've tried also adding the same caching config to the location = /404.html block, but that did not have any effect on whether the file was saved to disk.
I am guessing this has to do with overriding the error page by using proxy_intercept_errors and error_page, so nginx is no longer caching it using the parameters I set.
Is there a way to achieve this?

Nginx : multiple download freeze my server

We have a problem with Nginx. We have a converter server it's convert MP4 video to MP3 file and 300 user online, so when they start download their MP3 files at the same time, server time response become so huge like if it is freezed even if %vCPU doesn't exceeds 10% when he start the conversion using mpeg library.
My server Configuration :
16 vCPU.
RAM:30G
Data transfert :5TB.
Nginx Configuration (nginx.conf)
user www-data;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
worker_rlimit_nofile 20240;
events {
worker_connections 4000;
multi_accept on;
use epoll;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log off;
limit_req_zone $binary_remote_addr zone=one:10m rate=1r/s;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
server_tokens off;
client_header_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
client_max_body_size 700m;
connection_pool_size 256;
client_body_buffer_size 1024k;
client_header_buffer_size 8k;
keepalive_timeout 30;
keepalive_requests 100000;
reset_timedout_connection on;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
gzip on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
#include /etc/nginx/conf.d/*.conf;
}
Web site nginx configuration:
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /var/www/html;
index index.php index.html index.htm;
client_max_body_size 700m;
connection_pool_size 256;
client_body_buffer_size 1024k;
client_header_buffer_size 8k;
limit_rate 125k;
limit_req zone=one burst=5;
# Make site accessible from http://localhost/
server_name localhost;
location / {
try_files $uri $uri/ =404;
}
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/run/php/php7.0-fpm.sock;
}
}
Do you know where the problem may come ?

Resources