how to stop nginx setting http connection header to close? - http

I have Spring Security servers running behind an nginx proxy. Currently I am having an issue with nginx setting http header Connection: close, as a result tomcat socket connections are closed too early.
NGINX configuration file
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 4096;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_certificate /etc/nginx/cert.crt;
ssl_certificate_key /etc/nginx/cert.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name _;
root /data/www;
absolute_redirect off;
location / {
index index.html index.php;
proxy_set_header Referer "";
}
location /authenticate {
proxy_http_version 1.1;
proxy_pass http://127.0.0.1:8080/authenticate;
proxy_redirect http://auth-server:9000/ https://111.111.11.111:22222/;
proxy_redirect http://127.0.0.1:8080/ https://111.111.11.111:22222/;
}
location /oauth2/authenticate {
proxy_http_version 1.1;
proxy_pass http://127.0.0.1:8080//oauth2/authenticate;
proxy_redirect http://auth-server:9000/ https://111.111.11.111:22222/;
}
...
Spring Security Client server logs (below) have the following lines that show
Socket status is set to "Status in: [OPEN_READ], State out: [CLOSED] "
HTTP header has Connection: close (I expect this set to keep-alive by default but it is not)
Requested session id is invalid.
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] o.s.s.web.DefaultRedirectStrategy : Redirecting to http://auth-server:9000/oauth2/authorize?response_type=code&client_id=client&scope=openid&state=giGRLf_DyH5GziZfHvjWlVlMo-cDELAX6E5sfLJkPM8%3D&redirect_uri=http://127.0.0.1:8080/login/oauth2/code/client-oidc&nonce=2GNlYXwjZS2bqoZ-73kSkqc_TFu_eImnH05iEsxO2UE
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] w.c.HttpSessionSecurityContextRepository : Did not store empty SecurityContext
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] w.c.HttpSessionSecurityContextRepository : Did not store empty SecurityContext
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] s.s.w.c.SecurityContextPersistenceFilter : Cleared SecurityContextHolder to complete request
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] o.apache.coyote.http11.Http11Processor : Socket: [org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper#278d5e31:org.apache.tomcat.util.net.NioChannel#2be48f79:java.nio.channels.SocketChannel[connected local=/127.0.0.1:8080 remote=/192.111.111.222:55200]], Status in: [OPEN_READ], State out: [CLOSED]
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] o.apache.tomcat.util.threads.LimitLatch : Counting down[http-nio-8080-exec-8] latch=1
2022-07-07 15:00:17.596 DEBUG 11690 --- [nio-8080-exec-8] org.apache.tomcat.util.net.NioEndpoint : Calling [org.apache.tomcat.util.net.NioEndpoint#64e25661].closeSocket([org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper#278d5e31:org.apache.tomcat.util.net.NioChannel#2be48f79:java.nio.channels.SocketChannel[connected local=/127.0.0.1:8080 remote=/192.111.111.222:55200]])
2022-07-07 15:01:02.647 DEBUG 11690 --- [o-8080-Acceptor] o.apache.tomcat.util.threads.LimitLatch : Counting up[http-nio-8080-Acceptor] latch=1
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.coyote.http11.Http11InputBuffer : Before fill(): parsingHeader: [true], parsingRequestLine: [true], parsingRequestLinePhase: [0], parsingRequestLineStart: [0], byteBuffer.position(): [0], byteBuffer.limit(): [0], end: [629]
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.tomcat.util.net.SocketWrapperBase : Socket: [org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper#462a89b9:org.apache.tomcat.util.net.NioChannel#2be48f79:java.nio.channels.SocketChannel[connected local=/127.0.0.1:8080 remote=/192.111.111.222:55200]], Read from buffer: [0]
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] org.apache.tomcat.util.net.NioEndpoint : Socket: [org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper#462a89b9:org.apache.tomcat.util.net.NioChannel#2be48f79:java.nio.channels.SocketChannel[connected local=/127.0.0.1:8080 remote=/192.111.111.222:55200]], Read direct from socket: [556]
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.coyote.http11.Http11InputBuffer : Received [GET /authenticate HTTP/1.1
Host: 127.0.0.1:8080
Connection: close
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:102.0) Gecko/20100101 Firefox/102.0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8
Accept-Language: en-GB,en;q=0.5
Accept-Encoding: gzip, deflate, br
Cookie: JSESSIONID=CBE7BC7B1ABAC7F69908F6D0536A32CE
Upgrade-Insecure-Requests: 1
Sec-Fetch-Dest: document
Sec-Fetch-Mode: navigate
Sec-Fetch-Site: same-origin
Sec-Fetch-User: ?1
Pragma: no-cache
Cache-Control: no-cache
]
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.t.util.http.Rfc6265CookieProcessor : Cookies: Parsing b[]: JSESSIONID=CBE7BC7B1ABAC7F69908F6D0536A32CE
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.catalina.connector.CoyoteAdapter : Requested cookie session id is CBE7BC7B1ABAC7F69908F6D0536A32CE
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.c.authenticator.AuthenticatorBase : Security checking request GET /authenticate
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] org.apache.catalina.realm.RealmBase : No applicable constraints defined
2022-07-07 15:01:02.648 DEBUG 11690 --- [nio-8080-exec-9] o.a.c.authenticator.AuthenticatorBase : Not subject to any constraint
2022-07-07 15:01:02.649 DEBUG 11690 --- [nio-8080-exec-9] o.s.security.web.FilterChainProxy : Securing GET /authenticate
2022-07-07 15:01:02.649 DEBUG 11690 --- [nio-8080-exec-9] s.s.w.c.SecurityContextPersistenceFilter : Set SecurityContextHolder to empty SecurityContext
2022-07-07 15:01:02.649 DEBUG 11690 --- [nio-8080-exec-9] o.s.s.w.a.AnonymousAuthenticationFilter : Set SecurityContextHolder to anonymous SecurityContext
2022-07-07 15:01:02.649 DEBUG 11690 --- [nio-8080-exec-9] org.apache.tomcat.util.http.Parameters : Set encoding to UTF-8
2022-07-07 15:01:02.649 DEBUG 11690 --- [nio-8080-exec-9] o.s.s.w.session.SessionManagementFilter : Request requested invalid session id CBE7BC7B1ABAC7F69908F6D0536A32CE
2022-07-07 15:01:02.649 DEBUG 11690 --- [nio-8080-exec-9] o.s.s.w.a.i.FilterSecurityInterceptor : Failed to authorize filter invocation [GET /authenticate] with attributes [authenticated]
2022-07-07 15:01:02.650 DEBUG 11690 --- [nio-8080-exec-9] o.s.s.w.s.HttpSessionRequestCache
...
I suspect the socket connection is closed due to Connection being set to close in the http request header. Is it expected behaviour for nginx to set Connection header to close? How can I control this?
Many thanks

Related

Nginx Ingress Controller cache not being hit

We are using the Nginx Ingress Controller image as described here (https://docs.nginx.com/nginx-ingress-controller/) in our Kubernetes (EKS) environment, and we are having big problems trying to implement caching.
We have a JSON-based service sitting behind our ingress controller.
The Ingress generates Nginx config that looks like this:
# configuration for dcjson-mlang25/terminology-ingress
upstream dcjson-mlang25-terminology-ingress-mlang25.test.domain-jsonserver-authoring-8080 {
zone dcjson-mlang25-terminology-ingress-mlang25.test.domain-jsonserver-authoring-8080 256k;
random two least_conn;
server 10.220.2.66:8080 max_fails=1 fail_timeout=10s max_conns=0;
}
server {
listen 80;
listen [::]:80;
listen 443 ssl;
listen [::]:443 ssl;
ssl_certificate /etc/nginx/secrets/dcjson-mlang25-jsonserver-tls-secret;
ssl_certificate_key /etc/nginx/secrets/dcjson-mlang25-jsonserver-tls-secret;
server_tokens on;
server_name mlang25.test.domain;
set $resource_type "ingress";
set $resource_name "terminology-ingress";
set $resource_namespace "dcjson-mlang25";
if ($scheme = http) {
return 301 https://$host:443$request_uri;
}
location /authoring/ {
set $service "jsonserver-authoring";
proxy_http_version 1.1;
proxy_cache STATIC;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout updating http_404 http_500 http_502 http_503 http_504;
proxy_cache_revalidate on;
proxy_set_header Connection "";
proxy_hide_header 'Access-Control-Allow-Origin';
proxy_hide_header 'Access-Control-Allow-Methods';
proxy_hide_header 'Access-Control-Allow-Headers';
proxy_hide_header 'Access-Control-Expose-Headers';
proxy_hide_header 'Access-Control-Allow-Credentials';
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, DELETE, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,AcceptX-FHIR-Starter,Origin,Accept,X-Requested-With,Content-Type,Access-Control-Request-Method,Access-Control-Request-Headers,Authorization,Prefer,Pragma,If-Match,If-None-Match' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
add_header 'Access-Control-Allow-Credentials' 'true';
add_header X-Cache-Status $upstream_cache_status;
proxy_connect_timeout 60s;
proxy_read_timeout 1800s;
proxy_send_timeout 1800s;
client_max_body_size 4096m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_buffers 4 256k;
proxy_buffer_size 128k;
proxy_max_temp_file_size 4096m;
proxy_pass http://dcjson-mlang25-terminology-ingress-mlang25.test.domain-jsonserver-authoring-8080/;
}
}
The Nginx.conf file itself declares the cache like so:
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
proxy_cache_path /tmp/nginx_cache levels=1:2 keys_zone=STATIC:32m inactive=24h max_size=10g;
proxy_cache_key $scheme$proxy_host$request_uri;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
map $upstream_trailer_grpc_status $grpc_status {
default $upstream_trailer_grpc_status;
'' $sent_http_grpc_status;
}
** snipped**
}
The backend app does not return any Set-Cookie headers, which I know to be an issue - it's not that.
When placing a simple GET request I see this in Nginx Logs
2023/02/07 20:46:49 [debug] 416#416: *171 http script var: "https"
2023/02/07 20:46:49 [debug] 416#416: *171 http script var: "dcjson-mlang25-terminology-ingress-mlang25.test.domain-jsonserver-authoring-8080"
2023/02/07 20:46:49 [debug] 416#416: *171 http script var: "/authoring/fhir/CodeSystem/genenames.geneId-small"
2023/02/07 20:46:49 [debug] 416#416: *171 http cache key: "httpsdcjson-mlang25-terminology-ingress-mlang25.test.domain-jsonserver-authoring-8080/authoring/fhir/CodeSystem/genenames.geneId-small"
2023/02/07 20:46:49 [debug] 416#416: *171 add cleanup: 000055C5DDA4ED00
2023/02/07 20:46:49 [debug] 416#416: shmtx lock
2023/02/07 20:46:49 [debug] 416#416: slab alloc: 120 slot: 4
2023/02/07 20:46:49 [debug] 416#416: slab alloc: 00007FECD6324080
2023/02/07 20:46:49 [debug] 416#416: shmtx unlock
2023/02/07 20:46:49 [debug] 416#416: *171 http file cache exists: -5 e:0
2023/02/07 20:46:49 [debug] 416#416: *171 cache file: "/tmp/nginx_cache/8/b4/9ac307cbf4540372616c09cd894b9b48"
Repeated requests seconds later look exactly the same.
To my eyes, this is saying the cache isn't hit?
Every response header set looks something like this, with the status always being MISS
2023/02/07 20:46:49 [debug] 416#416: *171 HTTP/1.1 200
Server: nginx/1.23.2
Date: Tue, 07 Feb 2023 20:46:49 GMT
Content-Type: application/fhir+json;charset=UTF-8
Transfer-Encoding: chunked
Connection: keep-alive
X-Request-Id: sJ4yXmP1ziSF3fJt
Cache-Control: no-cache
Vary: Accept,Origin,Accept-Encoding,Accept-Language,Authorization
X-Powered-By: HAPI FHIR 6.0.0 REST Server (FHIR Server; FHIR 4.0.1/R4)
ETag: W/"1"
Content-Location: https://mlang25.test.domain/authoring/fhir/CodeSystem/genenames.geneId-small/_history/1
Last-Modified: Tue, 07 Feb 2023 20:08:35 GMT
Content-Encoding: gzip
X-Content-Type-Options: nosniff
X-XSS-Protection: 1; mode=block
Strict-Transport-Security: max-age=31536000 ; includeSubDomains
X-Frame-Options: DENY
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: PUT, GET, POST, DELETE, OPTIONS
Access-Control-Allow-Headers: DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested- With,If-Modified-Since,Cache-Control,Content-Type,Authorization,AcceptX-FHIR- Starter,Origin,Accept,X-Requested-With,Content-Type,Access-Control-Request-Method,Access- Control-Request-Headers,Authorization,Prefer,Pragma,If-Match,If-None-Match
Access-Control-Expose-Headers: Content-Length,Content-Range
Access-Control-Allow-Credentials: true
X-Cache-Status: MISS
I am really struggling to work out why the cache is never being hit.
For anyone who stumbles across this - our backend had a change from a 3rd party and had started returning Cache-Control no-cache meaning nginx will never cache the result.

grpc_send_timeout doesn't work, Nginx closes GRPC streams unexpectedly

everyone!
I have a config for TLS NGINX server, which proxies stream (bidirectional/unidirectional) to my golang GRPC server. I use params in NGINX conf (server context):
grpc_read_timeout 7d;
grpc_send_timeout 7d;
But! My bidirectional streams close after 60s (send data from server frequently, doesn't send any data from client within 60s), as if grpc_send_timeout is set to default value (60s)
But! If I send echo requests from client every 20s it works fine!
I have no idea why grpc_send_timeout doen't work!
nginx.conf:
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log debug;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
resolver 127.0.0.1 valid=10s;
resolver_timeout 10s;
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
}
conf.d/my.service.conf
server {
listen 443 ssl http2;
ssl_certificate my-cert.crt;
ssl_certificate_key my-key.key;
access_log "/var/log/nginx/my.service.access.log" main;
error_log "/var/log/nginx/my.service.error.log" debug;
grpc_set_header x-real-ip $remote_addr;
grpc_set_header x-ray-id $request_id;
grpc_read_timeout 7d;
grpc_send_timeout 7d; // why it does not work?????
location /MyGoPackage.MyService {
grpc_pass grpc://my.service.host:4321;
}
}
nginx logs:
/ # cat /var/log/nginx/my_host_access.log
59.932 192.168.176.1 - - [06/May/2021:14:57:30 +0000] "POST /MyGoPackege.MyService/MyStreamEndpoint HTTP/2.0" 200 1860 "-" "grpc-go/1.29.1" "-"
client logs (with GRPC debug logs)
2021-05-06T17:56:30.609+0300 DEBUG grpc_mobile_client/main.go:39 open connection {"address": "localhost:443"}
INFO: 2021/05/06 17:56:30 parsed scheme: ""
INFO: 2021/05/06 17:56:30 scheme "" not registered, fallback to default scheme
INFO: 2021/05/06 17:56:30 ccResolverWrapper: sending update to cc: {[{localhost:443 <nil> 0 <nil>}] <nil> <nil>}
INFO: 2021/05/06 17:56:30 ClientConn switching balancer to "pick_first"
INFO: 2021/05/06 17:56:30 Channel switches to new LB policy "pick_first"
INFO: 2021/05/06 17:56:30 Subchannel Connectivity change to CONNECTING
INFO: 2021/05/06 17:56:30 Subchannel picks a new address "localhost:443" to connect
INFO: 2021/05/06 17:56:30 pickfirstBalancer: HandleSubConnStateChange: 0xc0004b2d60, {CONNECTING <nil>}
INFO: 2021/05/06 17:56:30 Channel Connectivity change to CONNECTING
INFO: 2021/05/06 17:56:30 Subchannel Connectivity change to READY
INFO: 2021/05/06 17:56:30 pickfirstBalancer: HandleSubConnStateChange: 0xc0004b2d60, {READY <nil>}
INFO: 2021/05/06 17:56:30 Channel Connectivity change to READY
2021-05-06T17:56:30.628+0300 DEBUG main.go:54 open stream {"address": localhost:443"}
2021-05-06T17:56:30.974+0300 INFO main.go:81 new msg from server {"msg": "hello world"}
// some logs within a 60s
2021-05-06T17:57:30.567+0300 FATAL main.go:79 receive new msg from stream {"error": "rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR"}
server logs (only this one at the moment of connect closing, GRPC debug log):
INFO: 2021/05/06 17:57:30 transport: loopyWriter.run returning. connection error: desc = "transport is closing"
client_header_timeout 7d;
client_body_timeout 7d;
adding this params to nginx conf solved the problem

Nginx - Is it possible to know which server is chosen?

In Nginx,
Is it possible to get which server is chosen (for each request) when using load balance?
Thanks!!
First add new logging format,
$ log_format upstreamlog '[$time_local] $remote_addr - $remote_user - $server_name to: $upstream_addr: $request upstream_response_time $upstream_response_time msec $msec request_time $request_time';
and then redefine access_log as
access_log /var/log/nginx/access.log upstreamlog;

How to record reverse proxy upstream server serving request in Nginx log?

We use Nginx as a reverse proxy with this setup:
upstream frontends {
server 127.0.0.1:8000;
server 127.0.0.1:8001;
server 127.0.0.1:8002;
[...]
}
server {
location / {
proxy_pass http://frontends;
[...]
}
[...]
}
As part of the access log, I would like to record the upstream server that has served the request, which in our case just means the associated localhost port.
The variables in the documentation (http://wiki.nginx.org/HttpProxyModule#Variables) mention $proxy_host and $proxy_port but in the log they always end up with the values "frontends" and "80".
First add new logging format
log_format upstreamlog '[$time_local] $remote_addr - $remote_user - $server_name $host to: $upstream_addr: $request $status upstream_response_time $upstream_response_time msec $msec request_time $request_time';
Example output:
[18/Nov/2019:10:08:15 -0700] <request IP> - - - <config host> <request host> to: 127.0.0.1:8000: GET /path/requested HTTP/1.1 200 upstream_response_time 0.000 msec 1574096895.474 request_time 0.001
and then redefine accesslog as
access_log /var/log/nginx/access.log upstreamlog;
log_format goes to http {} section, access_log can be inside location.
Use $upstream_addr and you will get, for example, 127.0.0.1:8000 or unix:/home/my_user/www/my_site/tmp/.unicorn.sock

Reverse Proxy from nginx won't run. Sites are normal

There's some problem with my nginx. At first, starting is OK, surfing through the proxy is fast enough. But after a while, 5 -> 10 visit later, the proxy become slower and slower. Until it stop working. Even if i try to stop the nginx using "-s stop", double check if there are any nginx.exe running, and restart nginx. It's still not working.
Nginx.exe is still running.
Port is still in used.
I am running on Windows Server 2003 Enterprise Sp2 IIS6
This is the error i read from the log.
2010/08/20 21:14:37 [debug] 1688#3548: posted events 00000000
2010/08/20 21:14:37 [debug] 1688#3548: worker cycle
2010/08/20 21:14:37 [debug] 1688#3548: accept mutex lock failed: 0
2010/08/20 21:14:37 [debug] 1688#3548: select timer: 500
2010/08/20 21:14:37 [debug] 1580#5516: select ready 0
2010/08/20 21:14:37 [debug] 1580#5516: timer delta: 500
2010/08/20 21:14:37 [debug] 1580#5516: posted events 00000000
2010/08/20 21:14:37 [debug] 1580#5516: worker cycle
2010/08/20 21:14:37 [debug] 1580#5516: accept mutex locked
2010/08/20 21:14:37 [debug] 1580#5516: select event: fd:176 wr:0
2010/08/20 21:14:37 [debug] 1580#5516: select timer: 500
2010/08/20 21:14:38 [debug] 1688#3548: select ready 0
2010/08/20 21:14:38 [debug] 1688#3548: timer delta: 500
2010/08/20 21:14:38 [debug] 1688#3548: posted events 00000000
2010/08/20 21:14:38 [debug] 1688#3548: worker cycle
2010/08/20 21:14:38 [debug] 1688#3548: accept mutex lock failed: 0
2010/08/20 21:14:38 [debug] 1688#3548: select timer: 500
And this is the config file i wrote:
#user deploy;
worker_processes 2;
error_log /app/nginx/logs/error.log debug;
events {
worker_connections 64;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
tcp_nodelay on;
gzip on;
gzip_min_length 1100;
gzip_buffers 4 8k;
gzip_types text/plain;
upstream mongrel {
server 127.0.0.1:5000;
server 127.0.0.1:5001;
server 127.0.0.1:5002;
#server 127.0.0.1:5003;
#server 127.0.0.1:5004;
#server 127.0.0.1:5005;
#server 127.0.0.1:5006;
}
server {
listen 81;
server_name site.com;
root C:/app/sub/public;
index index.html index.htm;
try_files $uri/index.html $uri.html $uri #mongrel;
location #mongrel {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://mongrel;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}

Resources