I deployed a nginx service on windows server to proxy http requests,here is my nginx config:
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 60000;
gzip on;
gzip_min_length 10k;
gzip_buffers 4 16k;
gzip_http_version 1.1;
gzip_comp_level 5;
gzip_types text/plain application/x-javascript text/css application/xml text/javascript application/x-httpd-php application/javascript application/json;
gzip_disable "MSIE [1-6]\.";
gzip_vary on;
location /api/{
proxy_pass http://www.******.com/;
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
}
}
But every time after running for a few days, the following error message will appear:
8480#12136: *32175 connect() failed (10060: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond) while connecting to upstream, client: 172.10.2.100, server: , request: "POST /api/address/someapi HTTP/1.1", upstream: "http://222.218.***.**:80/address/xxxxxx", host: "341o932a61.xxx.vip"
When I restart nginx, everything goes back to normal.
What is the reason for this?
Environment:
System: Windows Server 2019 Standard 1809
Nginx: 1.20.1
Related
I have Implemented the Nginx cache with https reverse proxy in centos, My response time taking more than 1.5 seconds for each request. My nginx server configuration was 4 core, 8gb ram.
My configuration looks like below (nginx.config)
`user nginx;
worker_processes auto;
worker_rlimit_nofile 100000;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 80000;
use epoll;
multi_accept on;
}
http {
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format rt_cache '$remote_addr - $upstream_cache_status [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
# Below pattern will print
# Time stamp | Client IP | client Dev apps Name| Request | Status Returned| Time taken in ms| size Returned in bytes| Referer | hit or miss | User agent
log_format bf_log_format '[$time_local]|'
'$remote_addr|'
'$http_x_developer_username|$http_x_forwarded_for|'
'"$request"|'
'$status|$upstream_response_time|$body_bytes_sent|'
'"$http_referer"|'
'"$upstream_cache_status"|'
'"$http_user_agent"';
log_format json_log_format escape=json '{'
'"time": "$time_iso8601",'
'"trace_id": "$request_id",'
'"http": {'
'"body_bytes_sent": "$body_bytes_sent",'
'"x_developer_username": "$http_x_developer_username",'
'"remote_addr": "$remote_addr",'
'"method": "$request_method",'
'"request": "$request_uri",'
'"schema": "$scheme",'
'"request_time": "$request_time",'
'"host": "$host",'
'"uri": "$uri",'
'"user_agent": "$http_user_agent",'
'"status": "$status"'
'},'
'"proxy": {'
'"host": "$proxy_host"'
'},'
'"upstream": {'
'"response_time": "$upstream_response_time sec",'
'"cache_status": "$upstream_cache_status"'
'}'
'}';
# access_log /var/log/nginx/access.log main;
# access_log /var/log/nginx/access.log json_log_format;
access_log off;
sendfile on;
sendfile_max_chunk 512k;
# directio 4m;
# directio_alignment 512;
tcp_nopush on;
tcp_nodelay on;
reset_timedout_connection on;
keepalive_requests 100000;
types_hash_max_size 2048;
# reduce the data that needs to be sent over network -- for testing environment
gzip on;
# gzip_static on;
gzip_min_length 10240;
gzip_comp_level 1;
gzip_vary on;
gzip_disable msie6;
gzip_proxied expired no-cache no-store private auth;
gzip_types
text/css
text/javascript
text/xml
text/plain
text/x-component
application/javascript
application/x-javascript
application/json
application/xml
application/rss+xml
application/atom+xml
font/truetype
font/opentype
application/vnd.ms-fontobject
image/svg+xml;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
proxy_cache_path /opt/nginx/cache levels=1:2 keys_zone=api-cache:3000m max_size=100g inactive=43200m use_temp_path=off;
proxy_temp_path /opt/nginx/cache/other;
include /etc/nginx/conf.d/ssl.conf;
}`
My ssl.confg looks like below
server {
server_name _;
root /usr/share/nginx/html;
listen 443 ssl http2 default_server;
listen [::]:443 ssl;
ssl_certificate "/etc/private/ssl/cert.pem";
ssl_certificate_key "/etc/private/ssl/key.pem";
# ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
keepalive_timeout 100;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
location /health {
default_type application/json;
return 200 '{"status":"UP"}';
}
location /nginx-status {
stub_status;
}
location /trellotest {
proxy_cache_bypass $http_no_cache_purge $arg_nocache;
proxy_cache_methods GET POST;
add_header Cache-Control "public";
proxy_cache api-cache;
proxy_cache_valid 200 40320m;
add_header X-Cache $upstream_cache_status;
add_header X-Time $request_time;
proxy_ignore_headers X-Accel-Expires Expires Cache-Control;
proxy_pass https://mytrelloapp;
}
}
If possible, Anyone could you please advise me if we have anyway to improve the above configurations?
My upstream server returns extremely large JSON responses (5~8GB).
I'm trying to condense those responses by enabling gzip on nginx. This is my config
server {
listen 0.0.0.0:8080;
location / {
gzip on;
gzip_comp_level 1;
gzip_types *;
gzip_proxied any;
proxy_pass http://localhost:8081;
}
}
This config technically works. At least, it works for smaller responses (~150MB before compression). When I try to download large response (~7.5GB before compression)
curl -v --compressed --output /path_to_file -X POST http://localhost:8080 --data '{data}'
It gets cut off in the middle, i.e. I see this message from curl
curl: (18) transfer closed with outstanding read data remaining
and the response itself is incomplete (on average it only downloads ~5.3GB out of ~7.5GB)
I also see this log from nginx
2022/04/20 01:18:45 [error] 37#37: *135 upstream prematurely closed connection while reading upstream, client: 127.0.0.1, server: , request: "POST / HTTP/1.1", upstream: "http://127.0.0.1:8081/", host: "localhost:8080"
I tried increasing proxy_max_temp_file_size, and, I tried disabling buffering. nothing works
Any ideas?
Edit: this is the nginx.conf that's built in in the docker image I'm using
worker_processes auto;
error_log "/opt/bitnami/nginx/logs/error.log";
pid "/opt/bitnami/nginx/tmp/nginx.pid";
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log "/opt/bitnami/nginx/logs/access.log" main;
add_header X-Frame-Options SAMEORIGIN;
client_body_temp_path "/opt/bitnami/nginx/tmp/client_body" 1 2;
proxy_temp_path "/opt/bitnami/nginx/tmp/proxy" 1 2;
fastcgi_temp_path "/opt/bitnami/nginx/tmp/fastcgi" 1 2;
scgi_temp_path "/opt/bitnami/nginx/tmp/scgi" 1 2;
uwsgi_temp_path "/opt/bitnami/nginx/tmp/uwsgi" 1 2;
sendfile on;
tcp_nopush on;
tcp_nodelay off;
gzip on;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_proxied any;
gzip_types text/plain text/css application/javascript text/xml application/xml+rss;
keepalive_timeout 65;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
client_max_body_size 80M;
server_tokens off;
absolute_redirect off;
port_in_redirect off;
include "/opt/bitnami/nginx/conf/server_blocks/*.conf";
# HTTP Server
server {
# Port to listen on, can also be set in IP:PORT format
listen 8080;
include "/opt/bitnami/nginx/conf/bitnami/*.conf";
location /status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
}
}
in similar case this line in location block solved the problem
proxy_http_version 1.1;
it seems like gzip'ed response somehow incompatible with http2
After typing service nginx start on my server text below appears:
Job for nginx.service failed because the control process exited with error code.
See "systemctl status nginx.service" and "journalctl -xe" for details.
Does anyone know what's going on? Thanks for any information and help given. bless you.
user www-data;
worker_processes auto;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
multi_accept on;
use epoll;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
#Ustaw limity połączeń. Layer-7 Security Julek
#limit_req_zone $binary_remote_addr zone=one:10m rate=30r/m;
#limit_conn_zone $binary_remote_addr zone=addr:10m;
# INCLUDE ALL WEBSITE
#include /etc/nginx/conf.d/bialogardzianin.conf;
include /etc/nginx/conf.d/vigilance.conf;
#include /etc/nginx/conf.d/envigilance.conf;
include /etc/nginx/conf.d/kasacja-aut.conf;
include /etc/nginx/conf.d/mahulaboni.conf;
include /etc/nginx/conf.d/kowalewscy.conf;
#include /etc/nginx/conf.d/playstories.conf;
#include /etc/nginx/conf.d/odnovit-centrum.conf;
include /etc/nginx/conf.d/lzs.conf;
include /etc/nginx/conf.d/alpha.conf;
#include /etc/nginx/conf.d/testvigi.conf;
include /etc/nginx/conf.d/malinowskafashion.conf;
include /etc/nginx/conf.d/tarasewicz.conf;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
# #Buffers
client_body_buffer_size 10K;
client_header_buffer_size 1k;
client_max_body_size 8m;
large_client_header_buffers 2 1k;
ignore_invalid_headers on;
## GZIP
gzip on;
gzip_vary on;
gzip_comp_level 6;
gzip_proxied any;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript text/x-js;
gzip_buffers 16 8k;
gzip_disable "MSIE [1-6]\.(?!.*SV1)";
# Cache most accessed static files
open_file_cache max=100000 inactive=360m;
open_file_cache_valid 2m;
open_file_cache_min_uses 1;
open_file_cache_errors on;
## Hide the Nginx version number.
server_tokens off;
## Fill in with your own resolver.
resolver 8.8.8.8;
## Timeouts.
client_body_timeout 12;
client_header_timeout 12;
keepalive_timeout 15;
send_timeout 10;
}
Gzip not working.
I am getting response header as gzip but the response is not zipped.
I verified it in https://checkgzipcompression.com/ as well.
Its sitting in front of golang app but I dont think it will affect this in any case.
# Elastic Beanstalk Nginx Configuration File
user nginx;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
worker_processes auto;[enter image description here][1]
worker_rlimit_nofile 133979;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
include conf.d/*.conf;
map $http_upgrade $connection_upgrade {
default "upgrade";
}
server {
listen 80 default_server;
access_log /var/log/nginx/access.log main;
client_header_timeout 60;
client_body_timeout 60;
keepalive_timeout 60;
gzip on;
gzip_vary on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_disable "MSIE [1-6]\.";
# Include the Elastic Beanstalk generated locations
include conf.d/elasticbeanstalk/*.conf;
}
}
I have the following nginx config:
location ~ ^/system/filedir/scripts {
autoindex on;
}
location ~ ^/filedir/scripts {
alias /system/filedir/scripts/;
}
When I visit http://myserver.com/system/filedir/scriptseverything works fine and I can see the list of files contained by the scripts directory.
But when I visit http://myserver.com/filedir/scripts I have the following error:
[error] 2083#0: *83335 "/system/filedir/index.html" is not found (2: No such file or directory), request: "GET /filedir/scripts/ HTTP/1.1"
I cannot get rid of the index.html addition and get the file listing.
The same is happening if I try
location ~ ^/filedir/scripts {
root /system/;
}
Here's my nginx.conf:
# daemon off;
# user nobody;
worker_processes 1;
error_log logs/error.log;
# error_log logs/error.log notice;
# error_log logs/error.log info;
pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
gzip on;
gzip_http_version 1.0;
gzip_proxied any;
gzip_min_length 500;
gzip_disable "MSIE [1-6]\.";
gzip_types text/plain
text/html
text/xml
text/css
text/comma-separated-values
text/javascript application/x-javascript
application/atom+xml
application/json
application/xml
application/rss+xml
image/svg+xml
application/vnd.ms-fontobject
application/x-font-ttf;
include my_sites/*;
}
You can use a rewrite:
server {
location ~ ^/system/filedir/scripts/ {
autoindex on;
}
location ~ ^/filedir/scripts/ {
rewrite ^/filedir/scripts/(.*)$ /system/filedir/scripts/$1 last;
}
Or a proxy_pass:
server {
resolver 8.8.8.8;
location ~ ^/system/filedir/scripts/ {
autoindex on;
}
location ~ ^/filedir/scripts/ {
proxy_pass http://$host/system/$uri;
}
I've tried combinations with alias, root, but they all failed.