$upstream_cache_status is HIT, but the $request_time sometimes last for 5s, What's the problem?
My nginx.conf
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
gzip on;
client_max_body_size 30M;
proxy_temp_path /tmp/proxy_temp_dir;
proxy_cache_path /tmp/proxy_cache_dir levels=1:2 keys_zone=cache:500m inactive=1d max_size=500m;
log_format cache_log '$remote_addr - [$request_time] $status $upstream_cache_status "$request"';
server {
access_log logs/access.log cache_log;
error_log logs/error.log error;
proxy_cache cache;
proxy_cache_valid 10m;
location / {
proxy_next_upstream http_502 error timeout;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://xxxxxx;
}
}
include /usr/local/openresty/nginx/conf/vhosts/*.conf;
}
And access.log:
x.x.x.x - [5.076] 200 HIT "GET /xxx"
x.x.x.x - [0.092] 200 HIT "GET /xxx"
Same request url, both are hit the cache, why $request_time last for 5s or more?
Thanks.
It's a disk IO problem, I moved the proxy_cache_path to another SSD and the problem is solved.
Related
I have some problems to call an API endpoint from my server.
I receive an error 500 and the page content says
The page you are looking for is temporarily unavailable.
Please try again later.
But this problem occurs only on some browser/computer, it works normally on others
For example, I have this problem on Firefox and it works on Chrome
and one of my customer has the error on Chrome.
On the server, I have found this error log (/nginx/localhost):
94.xxx.xxx.xxx:- - - [05/Oct/2022:11:10:33 +0000] "POST /api/auth/login HTTP/3" 500 383 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:104.0) Gecko/20100101 Firefox/104.0" "-" "www.mydomain.com"
This server is hosted on a Jelastic platform and it's used as a load balancer to redirect traffic on other servers.
My nginx config file is (I have masked IPs and domain) :
######## HTTP SECTION PROTOTYPE ########
http {
server_tokens off ;
include /etc/nginx/mime.types;
default_type application/octet-stream;
set_real_ip_from 192.xxx.xxx.xxx/16;
set_real_ip_from 10.xxx.xxx.xxx/8;
set_real_ip_from 172.xxx.xxx.xxx/16;
real_ip_header X-Forwarded-For;
real_ip_recursive on;
log_format main '$remote_addr:$http_x_remote_port - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'"$host" sn="$server_name" '
'rt=$request_time '
'ua="$upstream_addr" us="$upstream_status" '
'ut="$upstream_response_time" ul="$upstream_response_length" '
'cs=$upstream_cache_status' ;
client_header_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
client_max_body_size 100m;
proxy_read_timeout 300s;
connection_pool_size 256;
client_header_buffer_size 1k;
large_client_header_buffers 4 32k; # 8k to 32k
request_pool_size 4k;
#Allow large token
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
#proxy_buffering off;
# gzip on;
gzip_min_length 1100;
gzip_buffers 4 8k;
gzip_types text/plain;
output_buffers 1 32k;
postpone_output 1460;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 75 20;
ignore_invalid_headers on;
### UPSTREAMS LIST FOLLOWS HERE ###
#upstream nodes{ server XXX.XXX.XXX.XXX; server 127.0.0.1:8001 backup # UPSTREAMPROTO # This is upstream prototype line, do not remove this! }
#This config is auto-generated. DO NOT modify the weight property. If changing the rest of settings, please, remember that you are doing this at your own risk.
upstream common { check interval=30000 rise=2 fall=5 timeout=10000 default_down=false type=http; check_http_send "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n"; keepalive 100; server mydomain.jcloud-ver-jpe.ik-server.com weight=100 ; } ### UPSTREAMPROTO for common ###
#GFADMIN
server {
listen *:80;
listen [::]:80;
server_name _;
access_log /var/log/nginx/localhost.access_log main;
error_log /var/log/nginx/localhost.error_log info;
proxy_temp_path /var/nginx/tmp/;
proxy_connect_timeout 5s;
error_page 500 502 503 504 /50x.html;
proxy_next_upstream error timeout http_500;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Host $http_host;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Remote-Port $http_x_remote_port;
proxy_set_header X-URI $request_uri;
proxy_set_header X-ARGS $args;
proxy_set_header Refer $http_refer;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
if ($http_x_remote_port = '' ) {
set $http_x_remote_port $remote_port;
}
location = /50x.html {
root html;
}
location / {
proxy_pass http://common;
}
#USERLOCATIONS
}
# server {
# listen *:8001;
# server_name backup.local;
#
# location / {
# proxy_pass http://default_upstream;
# add_header Set-Cookie "SRVGROUP=$group; path=/; HttpOnly";
# proxy_http_version 1.1;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Host $http_host;
# proxy_set_header X-Forwarded-For $http_x_forwarded_for;
# proxy_set_header X-URI $request_uri;
# proxy_set_header X-ARGS $args;
# proxy_set_header Refer $http_refer;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "upgrade";
# }
# }
include /etc/nginx/conf.d/*.conf;
}
######## TCP SECTION PROTOTYPE ########
It's the first time I look at nginx config file, any advice or idea are welcome :)
I am using Nginx as reverse proxy to my backend (Java app with Spring boot). In overall (avg, p50, p90, p95, p99 latencies) it performs well. But time to time, I see latency spikes around 100-200 milliseconds. When I enabled the access logs, I see that upstream response time (upstream_response_time) is very low even though request time (request_time) is high. For example,
[25/Apr/2020:18:28:17 +0000] "XXX" XXX - request="POST /v1/composite-monitoring-data HTTP/1.1" status=429 request_time=0.081 trace_id="Root=1-5ea48141-2f8e07a4c7c71a1360d9c5f5" request_length=9864 bytes_sent=979 body_bytes_sent=623 upstream_addr=127.0.0.1:5000 upstream_status=429 upstream_response_time=0.004 upstream_connect_time=0.000 upstream_header_time=0.004 user_agent="okhttp/3.10.0" current_time_msec=1587839297.256
...
[25/Apr/2020:18:28:17 +0000] "XXX" XXX - request="POST /v1/composite-monitoring-data HTTP/1.1" status=429 request_time=0.084 trace_id="Root=1-5ea48141-51f0d12a6f7c4b0651f6ef42" request_length=20534 bytes_sent=979 body_bytes_sent=623 upstream_addr=127.0.0.1:5000 upstream_status=429 upstream_response_time=0.000 upstream_connect_time=0.000 upstream_header_time=0.000 user_agent="okhttp/3.10.0" current_time_msec=1587839297.278
Also here is my nginx.conf file:
user nginx;
pid /var/run/nginx.pid;
error_log /var/log/nginx/error.log;
worker_processes auto;
worker_rlimit_nofile 32768;
events {
worker_connections 4096;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
include /etc/nginx/conf.d/*.conf;
default_type application/json;
sendfile on;
tcp_nopush off;
tcp_nodelay on;
keepalive_timeout 300;
keepalive_requests 10000;
client_body_timeout 15;
client_header_timeout 15;
client_body_buffer_size 4m;
client_max_body_size 4m;
log_format main '[$time_local] "$http_x_forwarded_for" $remote_addr - '
'request="$request" status=$status request_time=$request_time trace_id="$http_x_amzn_trace_id" '
'request_length=$request_length bytes_sent=$bytes_sent body_bytes_sent=$body_bytes_sent '
'upstream_addr=$upstream_addr '
'upstream_status=$upstream_status '
'upstream_response_time=$upstream_response_time '
'upstream_connect_time=$upstream_connect_time '
'upstream_header_time=$upstream_header_time '
'user_agent="$http_user_agent" '
'current_time_msec=$msec';
access_log /var/log/nginx/access.log main;
upstream http_backend {
server 127.0.0.1:5000;
keepalive 1024;
}
server {
listen 80;
listen [::]:80;
server_name _ localhost;
location /v1 {
proxy_pass http://http_backend/v1;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Request-Start $msec;
proxy_set_header Connection "";
proxy_http_version 1.1;
keepalive_timeout 300;
keepalive_requests 10000;
}
location /ping {
proxy_pass http://http_backend/ping;
}
}
}
What might cause this big difference between the request time and upstream response time? Is there anything I need to configure and not configured properly?
I currently have filebeat reading nginx logs and pushing them to a logstash, I am trying to determine which application the log is coming from by looking at the URI context root (not sure if this is the correct way to do it), but the issue is when there is no context root. Logstash will parse the value right after the host.
Here is my nginx config.
server {
listen 443;
server_name MyServer.com;
access_log /var/log/nginx/access_dev.log main if=$loggable;
error_log /var/log/nginx/error_dev.log;
ssl on;
ssl_certificate ssl/bundle.pem;
ssl_certificate_key ssl/wildcard.key;
ssl_session_timeout 5m;
proxy_ssl_verify off;
ssl_protocols TLSv1.2;
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:!aNULL;
ssl_prefer_server_ciphers on;
add_header X-Forwarded-For $host;
add_header X-Forwarded-Proto $scheme;
add_header Host $host;
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
location / {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header Host $host;
proxy_pass https://app01.domain.local:443/;
}
location /applicationOne {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header Host $host;
proxy_read_timeout 120s;
proxy_pass https://app02.domain.local:443;
}
}
Is it possible to add variables to the specific location and say what the application name is? So for location / I would add lets say "Portal" and then in the nginx log it will log "Portal" at then end?
Here is my current nginx.conf
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" --
$sent_http_x_username';
map $request_uri $loggable {
default 1;
~*\.(ico|css|js|gif|jpg|jpeg|png|svg|woff|woff2|ttf|eot)$ 0;
}
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
Example of what I am asking for with Portal on the end of a log.
198.143.37.12 - - [31/Jul/2019:10:44:13 -0400] "GET /nosession HTTP/1.1" 200 3890 "-" "Safari/14607.2.6.1.1 CFNetwork/978.0.7 Darwin/18.6.0 (x86_64)" "199.83.71.22" "Portal"
And if that is not possible are there any other ideas on how to solve this?
I used the approach to add a header and then log that header.
In my
location / {
add_header x-application "App Name";
}
In the nginx.conf
$sent_http_x_application
was added to log_fromat
I have an Nginx reverse proxy set up which is being used as an SSL offload for several servers such as confluence. I've got it successfully working for taking http://confluence and https://confluence but when I try to redirect http://confluence:8090, it tries to go to https://confluence:8090 and fails.
How can I remove the port from the URL?
The config below is a bit trimmed but maybe helpful? Is the $server_port bit in the headers causing the problem?
server {
listen 8090;
server_name confluence;
return 301 https://confluence$request_uri;
}
server {
listen 443 ssl http2;
server_name confluence;
location / {
proxy_http_version 1.1;
proxy_pass http://confbackend:8091
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $server_name:$server_port;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade; #WebSocket Support
proxy_set_header Connection $connection_upgrade; #WebSocket Support
}
}
Seems like a lot of answers here involve http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect but I find no solace in that confusing mess.
I also would have thought you'd have a single server but I was trying the advice from https://serverfault.com/questions/815797/nginx-rewrite-to-new-protocol-and-port
I tried messing with the port_in_redirect off; option but maybe I was using it wrong?
EDIT 1: Add conf files
The files below are modifications from the Artifactory nginx setup. I used their setup initially and added additional conf files (in ./conf.d/) for other RP endpoints.
Confluence.conf
server {
listen 8090 ssl http2;
server_name confluence.domain.com confluence;
## return 301 https://confluence.domain.com$request_uri;
proxy_redirect https://confluence.domain.com:8090 https://confluence.domain.com;
}
server {
## add ssl entries when https has been set in config
ssl_certificate /data/rpssl/confluence.pem;
ssl_certificate_key /data/rpssl/confluence_unencrypted.key;
## server configuration
listen 443 ssl http2;
server_name confluence.domain.com confluence;
add_header Strict-Transport-Security max-age=31536000;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/confluence-access.log timing;
error_log /var/log/nginx/confluence-error.log;
client_max_body_size 0;
proxy_read_timeout 1200;
proxy_connect_timeout 240;
location / {
proxy_http_version 1.1;
proxy_pass http://backendconfluence.domain.com:8091;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $server_name:$server_port;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade; # WebSocket Support
proxy_set_header Connection $connection_upgrade; # WebSocket support
}
}
nginx.conf
# Main Nginx configuration file
worker_processes 4;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
worker_rlimit_nofile 4096;
events {
worker_connections 2048;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
map $http_upgrade $connection_upgrade { #WebSocket support
default upgrade;
'' '';
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format timing 'ip = $remote_addr '
'user = \"$remote_user\" '
'local_time = \"$time_local\" '
'host = $host '
'request = \"$request\" '
'status = $status '
'bytes = $body_bytes_sent '
'upstream = \"$upstream_addr\" '
'upstream_time = $upstream_response_time '
'request_time = $request_time '
'referer = \"$http_referer\" '
'UA = \"$http_user_agent\"';
access_log /var/log/nginx/access.log timing;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
Your problem is the STS header
add_header Strict-Transport-Security max-age=31536000;
When you add the STS header. The first request to http://example.com:8090 generates a redirect to https://example.com
This https://example.com then returns the STS header in the response and the browser remembers the example.com always needs to be served on https no matter what. The port doesn't make a difference
Now when you make another request to http://example.com:8090, STS kicks in and then converts it to https://example.com:8090, which is your problem here
Because a port can only serve http or https, you can't use 8090 to redirect http to https AND redirect https 8090 to https 443
This is my first question on Stack Overflow. I could not find a solution to this while searching the web.
I am working on an OpenBSD server running Nginx, which I did not setup, that is throwing a 502 Bad Gateway error in the browser. The Nginx error log shows this error:
014/04/29 09:43:49 [error] 5236#0: *263 connect() to unix:/tmp/unicorn.sock failed (61: Connection refused) while connecting to upstream, client: ###.###.###.###, server: , request: "GET / HTTP/1.1", upstream: "http://unix:/tmp/unicorn.sock:/", host: "sub.domain.com"
Here is the contents of the nginx.conf
user _nginx;
worker_processes 2;
pid /var/run/nginx.pid;
error_log /var/log/httpd.err debug;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format access '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent $bytes_sent '
'"$http_referer" "$http_user_agent" "$sent_http_content_type"';
access_log /var/log/httpd.log access;
upload_progress proxied 1m;
sendfile on;
server_name_in_redirect off;
client_body_timeout 120;
client_header_timeout 120;
keepalive_timeout 20;
send_timeout 120;
upstream mongrel {
server unix:/tmp/unicorn.sock;
}
gzip on;
gzip_vary on;
gzip_min_length 0;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_proxied any;
gzip_types text/plain text/javascript text/css text/stylesheet application/x-javascript application/javascript;
gzip_disable "MSIE [1-6]\.";
client_max_body_size 128000M;
client_body_buffer_size 512k;
ssl_session_timeout 60m;
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:1m;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/private/server.key;
server {
listen 80;
listen [::]:80;
listen 443 default ssl;
listen [::]:443 default ssl;
charset utf-8;
root /var/sfta/current/public;
location ~* ^/(message\/create|dropbox\/create|attachment\/create|attachments) {
upload_pass #internal_upload;
upload_resumable on;
upload_pass_args on;
upload_store /var/data/tmp;
upload_state_store /var/data/tmp/resume;
upload_store_access user:rw group:rw all:rw;
chunked_transfer_encoding on;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_Forwarded_Proto $scheme;
if ($request_method != POST) {
proxy_pass http://mongrel;
break;
}
# Set specified fields in request body
upload_set_form_field $upload_field_name.name "$upload_file_name";
upload_set_form_field $upload_field_name.content_type "$upload_content_type";
upload_set_form_field $upload_field_name.path "$upload_tmp_path";
upload_aggregate_form_field "$upload_field_name.sha1" "$upload_file_sha1";
upload_aggregate_form_field "$upload_field_name.size" "$upload_file_size";
upload_max_output_body_len 0;
upload_pass_form_field "^authenticity_token$|message|dropbox";
upload_cleanup 400 404 499 500-505;
# track uploads in the 'proxied' zone
# remember connections for 30s after they finished
track_uploads proxied 120s;
}
location ~* ^/(send|messages) {
rewrite ^(.*)$ /message redirect;
}
location / {
proxy_pass http://mongrel;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_Forwarded_Proto $scheme;
proxy_read_timeout 120;
proxy_connect_timeout 120;
# track uploads in the 'proxied' zone
# remember connections for 30s after they finished
track_uploads proxied 120s;
}
location #internal_upload {
proxy_pass http://mongrel;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_Forwarded_Proto $scheme;
proxy_read_timeout 120;
proxy_connect_timeout 120;
}
location ~* ^/send {
rewrite ^(.*)$ /message redirect;
}
location ^~ /files/ {
alias /var/data/files/;
chunked_transfer_encoding on;
post_action #protected_done;
if_modified_since off;
gzip off;
internal;
}
location #protected_done {
internal;
proxy_pass http://mongrel;
proxy_set_header RateBytes $body_bytes_sent;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass_request_body off;
proxy_pass_request_headers off;
}
location ^~ /images/custom {
alias /var/data/assets/images;
}
location ~* /(javascripts|stylesheets|images)/.*\.(ico|css|gif|js|jp?g|png)(\?[0-9]+)?$ {
access_log off;
expires 1w;
break;
}
location = /favicon.ico {
alias /var/data/assets/images/favicon.ico;
access_log off;
}
location = /alive {
access_log off;
return 200;
}
location ^~ /progress {
access_log off;
report_uploads proxied;
upload_progress_json_output;
}
error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /500.html;
}
}
I have very little experience with Nginx. Any help is greatly appreciated.