Caching not working using Ngnix and 3scale - nginx

I'm using ngnix openreg with the default configuration form 3scale with a few additions. No matter what I do I'm getting all cache MISS in the logs.
The headers from the upstream server:
HTTP/1.1 200
Content-Length: 1517
Content-Type: application/xml
Date: Thu, 6 Oct 2016 11:03:56 UTC
Expires: Thu, 6 Oct 2016 11:04:11 UTC
Cache-Control: max-age=15
Length: 1517 (1.5K) [application/xml]
The Nginx config file:
# NEED CHANGE (defines the user of the nginx workers)
# user user group;
## THIS PARAMETERS BE SAFELY OVER RIDDEN BY YOUR DEFAULT NGINX CONF
worker_processes 2;
env THREESCALE_DEPLOYMENT_ENV;
# error_log stderr notice;
# daemon off;
error_log logs/error.log warn;
events {
worker_connections 256;
}
http {
include mime.types;
#caching setup
proxy_cache_path /cache levels=1:2
keys_zone=main:10m
max_size=5g;
proxy_temp_path /cache/tmp;
proxy_ignore_headers "Set-Cookie";
proxy_hide_header "Set-Cookie";
##extra logging
log_format rt_cache '$remote_addr - $upstream_cache_status [$time_local] '
'Cache-Control: $upstream_http_cache_control '
'upstream_cache_status: $upstream_cache_status '
'Expires: $upstream_http_expires '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"'
'origin="$upstream_addr"'
'rt=$request_time uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"';
lua_shared_dict api_keys 10m;
server_names_hash_bucket_size 128;
lua_package_path ";;$prefix/?.lua;$prefix/conf/?.lua";
init_by_lua 'math.randomseed(ngx.time()) ; cjson = require("cjson")';
resolver 8.8.8.8 8.8.4.4;
upstream backend_* {
# service name: API
server 10.3.1.177:4001 max_fails=1 fail_timeout=15s;
server 10.3.1.111:4001 max_fails=1 fail_timeout=15s;
server 10.3.1.177:4002 max_fails=1 fail_timeout=15s;
server 10.3.1.111:4002 max_fails=1 fail_timeout=15s;
}
upstream local {
server 127.0.0.1:81;
}
# server {
# server_name testapi.itoworld.com
# listen 8088;
#
# location / {
# proxy_pass $proxy_pass ;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header Host 10.3.1.36;
# proxy_cache my_zone;
# add_header X-Proxy-Cache $upstream_cache_status;
#
#
# }
server {
server_name apcheck1.itoworld.com;
access_log /var/log/nginx/local.access.log rt_cache;
location / {
proxy_pass http://local/check.html;
}
}
server {
# Enabling the Lua code cache is strongly encouraged for production use. Here it is enabled by default for testing and development purposes
lua_code_cache on;
listen 80;
## CHANGE YOUR SERVER_NAME TO YOUR CUSTOM DOMAIN OR LEAVE IT BLANK IF ONLY HAVE ONE
#server_name $hostname;
server_name api.itoworld.com;
underscores_in_headers on;
set_by_lua $deployment 'return os.getenv("THREESCALE_DEPLOYMENT_ENV")';
set $threescale_backend "https://su1.3scale.net:443";
access_log /var/log/nginx/api.access.log rt_cache;
location = /threescale_authrep {
internal;
set $provider_key "******";
proxy_pass $threescale_backend/transactions/authrep.xml?provider_key=$provider_key&service_id=$service_id&$usage&$credentials&log%5Bcode%5D=$arg_code&log%5Brequest%5D=$arg_req&log%5Bresponse%5D=$arg_resp;
proxy_set_header Host "su1.3scale.net";
proxy_set_header X-3scale-User-Agent "nginx$deployment";
proxy_set_header X-3scale-Version "2016-06-17T15:47:50Z";
}
location = /out_of_band_authrep_action {
internal;
proxy_pass_request_headers off;
##set $provider_key "*";
##needs to be in both places, better not to have it on location / for potential security issues, req. are internal
set $provider_key "******";
content_by_lua "require('nginx_*').post_action_content()";
}
location / {
set $provider_key null;
set $cached_key null;
set $credentials null;
set $usage null;
set $service_id *********;
set $proxy_pass null;
set $secret_token null;
set $resp_body null;
set $resp_headers null;
proxy_cache main;
proxy_cache_key $host$uri$is_args$args;
proxy_cache_valid 200 301 302 30m ;
proxy_ignore_client_abort on;
## CHANGE THE PATH TO POINT TO THE RIGHT FILE ON YOUR FILESYSTEM IF NEEDED
access_by_lua "require('nginx_*').access()";
body_filter_by_lua 'ngx.ctx.buffered = (ngx.ctx.buffered or "") .. string.sub(ngx.arg[1], 1, 1000)
if ngx.arg[2] then ngx.var.resp_body = ngx.ctx.buffered end';
header_filter_by_lua 'ngx.var.resp_headers = cjson.encode(ngx.resp.get_headers())';
proxy_pass $proxy_pass ;
proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header Host 10.3.1.36;
proxy_set_header X-3scale-proxy-secret-token $secret_token;
post_action /out_of_band_authrep_action;
}
}
}

I have just tried to run a configuration with the same cache setup, and it seems to be working as expected, and it prints MISS, HIT, and then EXPIRED in the access log.
Feel free to send us the complete configuration along with the calls that you are making to support#3scale.net, and we can take a look :)
Cheers,
Daria

Related

Kubernetes Nginx Ingress file upload returning 502

I am trying to upload files from a client through an nginx ingress. I have set the following annotations on the ingress after receiving a 413 response;
Annotations: nginx.ingress.kubernetes.io/body-size: 1024m
nginx.ingress.kubernetes.io/client-body-buffer-size: 50m
nginx.ingress.kubernetes.io/client-max-body-size: 50m
nginx.ingress.kubernetes.io/proxy-body-size: 1024m
nginx.ingress.kubernetes.io/proxy-buffer-size: 32k
nginx.ingress.kubernetes.io/proxy-buffers-number: 8
The client is an Angular application. It sends a base64 string of the file in the request body. I have tried uploading images of a few KB, so I definitely ain't hitting any of these limits. I'm new to Kubernetes. Do I need to restart the ingress for these annotations to take effect?
I have also tried creating a ConfigMap;
apiVersion: v1
kind: ConfigMap
metadata:
name: app-ingress-configuration
namespace: development
labels:
app.kubernetes.io/name: [name of ingress]
app.kubernetes.io/part-of: [name of ingress]
data:
proxy-connect-timeout: "50"
proxy-read-timeout: "120"
proxy-send-timeout: "120"
body-size: "1024m"
client-body-buffer-size: "50m"
client-max-body-size: "50m"
proxy-body-size: "1024m"
proxy-buffers: "8 32k"
proxy-buffer-size: "32k"
Still getting a 502.
Not sure how to access the nginx.conf through kubectl, it seems from the docs if I update this ConfigMap the settings are changed in nginx anyway.
Any help appreciated.
UPDATE
nginx.conf
# Configuration checksum: 1961171210939107273
# setup custom paths that do not require root access
pid /tmp/nginx.pid;
daemon off;
worker_processes 2;
worker_rlimit_nofile 523264;
worker_shutdown_timeout 240s ;
events {
multi_accept on;
worker_connections 16384;
use epoll;
}
http {
client_max_body_size 100M;
lua_package_path "/etc/nginx/lua/?.lua;;";
lua_shared_dict balancer_ewma 10M;
lua_shared_dict balancer_ewma_last_touched_at 10M;
lua_shared_dict balancer_ewma_locks 1M;
lua_shared_dict certificate_data 20M;
lua_shared_dict certificate_servers 5M;
lua_shared_dict configuration_data 20M;
init_by_lua_block {
collectgarbage("collect")
-- init modules
local ok, res
ok, res = pcall(require, "lua_ingress")
if not ok then
error("require failed: " .. tostring(res))
else
lua_ingress = res
lua_ingress.set_config({
use_forwarded_headers = false,
use_proxy_protocol = false,
is_ssl_passthrough_enabled = false,
http_redirect_code = 308,
listen_ports = { ssl_proxy = "442", https = "443" },
hsts = true,
hsts_max_age = 15724800,
hsts_include_subdomains = true,
hsts_preload = false,
})
end
ok, res = pcall(require, "configuration")
if not ok then
error("require failed: " .. tostring(res))
else
configuration = res
end
ok, res = pcall(require, "balancer")
if not ok then
error("require failed: " .. tostring(res))
else
balancer = res
end
ok, res = pcall(require, "monitor")
if not ok then
error("require failed: " .. tostring(res))
else
monitor = res
end
ok, res = pcall(require, "certificate")
if not ok then
error("require failed: " .. tostring(res))
else
certificate = res
end
ok, res = pcall(require, "plugins")
if not ok then
error("require failed: " .. tostring(res))
else
plugins = res
end
-- load all plugins that'll be used here
plugins.init({})
}
init_worker_by_lua_block {
lua_ingress.init_worker()
balancer.init_worker()
monitor.init_worker()
plugins.run()
}
geoip_country /etc/nginx/geoip/GeoIP.dat;
geoip_city /etc/nginx/geoip/GeoLiteCity.dat;
geoip_org /etc/nginx/geoip/GeoIPASNum.dat;
geoip_proxy_recursive on;
aio threads;
aio_write on;
tcp_nopush on;
tcp_nodelay on;
log_subrequest on;
reset_timedout_connection on;
keepalive_timeout 75s;
keepalive_requests 100;
client_body_temp_path /tmp/client-body;
fastcgi_temp_path /tmp/fastcgi-temp;
proxy_temp_path /tmp/proxy-temp;
ajp_temp_path /tmp/ajp-temp;
client_header_buffer_size 1M;
client_header_timeout 60s;
large_client_header_buffers 4 5M;
client_body_buffer_size 1M;
client_body_timeout 60s;
http2_max_field_size 1M;
http2_max_header_size 5M;
http2_max_requests 1000;
http2_max_concurrent_streams 128;
types_hash_max_size 2048;
server_names_hash_max_size 1024;
server_names_hash_bucket_size 64;
map_hash_bucket_size 64;
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 64;
variables_hash_bucket_size 256;
variables_hash_max_size 2048;
underscores_in_headers off;
ignore_invalid_headers on;
limit_req_status 503;
limit_conn_status 503;
include /etc/nginx/mime.types;
default_type text/html;
gzip on;
gzip_comp_level 5;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_types application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component;
gzip_proxied any;
gzip_vary on;
# Custom headers for response
server_tokens on;
# disable warnings
uninitialized_variable_warn off;
# Additional available variables:
# $namespace
# $ingress_name
# $service_name
# $service_port
log_format upstreaminfo '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id';
map $request_uri $loggable {
default 1;
}
access_log /var/log/nginx/access.log upstreaminfo if=$loggable;
error_log /var/log/nginx/error.log notice;
resolver 10.245.0.10 valid=30s;
# See https://www.nginx.com/blog/websocket-nginx
map $http_upgrade $connection_upgrade {
default upgrade;
# See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive
'' '';
}
# Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server.
# If no such header is provided, it can provide a random value.
map $http_x_request_id $req_id {
default $http_x_request_id;
"" $request_id;
}
# Create a variable that contains the literal $ character.
# This works because the geo module will not resolve variables.
geo $literal_dollar {
default "$";
}
server_name_in_redirect off;
port_in_redirect off;
ssl_protocols TLSv1.2;
ssl_early_data off;
# turn on session caching to drastically improve performance
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_session_timeout 10m;
# allow configuring ssl session tickets
ssl_session_tickets on;
# slightly reduce the time-to-first-byte
ssl_buffer_size 4k;
# allow configuring custom ssl ciphers
ssl_ciphers '';
ssl_prefer_server_ciphers on;
ssl_ecdh_curve auto;
# PEM sha: ---
ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;
proxy_ssl_session_reuse on;
upstream upstream_balancer {
### Attention!!!
#
# We no longer create "upstream" section for every backend.
# Backends are handled dynamically using Lua. If you would like to debug
# and see what backends ingress-nginx has in its memory you can
# install our kubectl plugin https://kubernetes.github.io/ingress-nginx/kubectl-plugin.
# Once you have the plugin you can use "kubectl ingress-nginx backends" command to
# inspect current backends.
#
###
server 0.0.0.1; # placeholder
balancer_by_lua_block {
balancer.balance()
}
keepalive 32;
keepalive_timeout 60s;
keepalive_requests 100;
}
# Cache for internal auth checks
proxy_cache_path /tmp/nginx-cache-auth levels=1:2 keys_zone=auth_cache:10m max_size=128m inactive=30m use_temp_path=off;
# Global filters
## start server _
server {
server_name _ ;
listen 80 default_server reuseport backlog=511 ;
listen [::]:80 default_server reuseport backlog=511 ;
listen 443 default_server reuseport backlog=511 ssl http2 ;
listen [::]:443 default_server reuseport backlog=511 ssl http2 ;
set $proxy_upstream_name "-";
ssl_certificate_by_lua_block {
certificate.call()
}
location / {
set $namespace "";
set $ingress_name "";
set $service_name "";
set $service_port "";
set $location_path "/";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = false,
ssl_redirect = false,
force_no_ssl_redirect = false,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
# be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
# will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
# other authentication method such as basic auth or external auth useless - all requests will be allowed.
#access_by_lua_block {
#}
header_filter_by_lua_block {
lua_ingress.header()
plugins.run()
}
body_filter_by_lua_block {
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
access_log off;
port_in_redirect off;
set $balancer_ewma_score -1;
set $proxy_upstream_name "upstream-default-backend";
set $proxy_host $proxy_upstream_name;
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
set $proxy_alternative_upstream_name "";
client_max_body_size 1m;
proxy_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
# Allow websocket connections
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Request-ID $req_id;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 5M;
proxy_buffers 4 5M;
proxy_max_temp_file_size 1024M;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_timeout 0;
proxy_next_upstream_tries 3;
proxy_pass http://upstream_balancer;
proxy_redirect off;
}
# health checks in cloud providers require the use of port 80
location /healthz {
access_log off;
return 200;
}
# this is required to avoid error if nginx is being monitored
# with an external software (like sysdig)
location /nginx_status {
allow 127.0.0.1;
allow ::1;
deny all;
access_log off;
stub_status on;
}
}
## end server _
## start server dev-api
server {
server_name dev-api ;
listen 80 ;
listen [::]:80 ;
listen 443 ssl http2 ;
listen [::]:443 ssl http2 ;
set $proxy_upstream_name "-";
ssl_certificate_by_lua_block {
certificate.call()
}
location / {
set $namespace "development";
set $ingress_name "app-ingress";
set $service_name "app-api-svc";
set $service_port "80";
set $location_path "/";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = false,
ssl_redirect = true,
force_no_ssl_redirect = false,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
# be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
# will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
# other authentication method such as basic auth or external auth useless - all requests will be allowed.
#access_by_lua_block {
#}
header_filter_by_lua_block {
lua_ingress.header()
plugins.run()
}
body_filter_by_lua_block {
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
port_in_redirect off;
set $balancer_ewma_score -1;
set $proxy_upstream_name "development-app-api-svc-80";
set $proxy_host $proxy_upstream_name;
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
set $proxy_alternative_upstream_name "";
client_max_body_size 1024M;
client_body_buffer_size 50M;
proxy_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
# Allow websocket connections
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Request-ID $req_id;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 50s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 5M;
proxy_buffers 8 5M;
proxy_max_temp_file_size 1024M;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_timeout 0;
proxy_next_upstream_tries 3;
proxy_pass http://upstream_balancer;
proxy_redirect off;
}
}
## end server dev-api
.......
UPDATE 2
Log from kubectl logs -n nginx-ingress-controller-XXX command
127.0.0.1 - - [16/Jul/2020:10:11:14 +0000] "POST [Ingress/Service endpoint] HTTP/2.0" 502 4 "https://[client-host-name]/[client-path]" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36 Edg/83.0.478.58" 9351 0.659 [service-name-80] [] 10.244.1.72:80 14 0.652 502 7b7bdf8a9319e88c80ba3444372daf2d
You need to make sure you have the file size set up on the ingress controller. Nginx will catch up the settings. try this. For more information about the annotations follow this. https://docs.nginx.com/nginx-ingress-controller/configuration/ingress-resources/advanced-configuration-with-annotations/
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: service-api-tls-ingress
namespace: production
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: 8m
My original issue was nginx, but after I changed the limits it was forwarding the request to the service but I wasn't checking the correct logs. So #mWatney was correct to double check directly to the service/pod.
For anyone else the issue I was seeing was to do with running a .NET core 3.1 application in the Linux alpine container. Within the app I was using a version of System.Drawing.Common which causes exceptions running under Linux, whihc look like this;
System.TypeInitializationException: The type initializer for 'Gdip' threw an exception.
---> System.DllNotFoundException: Unable to load shared library 'libgdiplus'
The solution was to add to the dockerfile;
RUN apk add libgdiplus-dev fontconfig ttf-dejavu --update-cache --repository http://dl-3.alpinelinux.org/alpine/edge/testing/ --allow-untrusted
This allows the use of System.Drawing.Common under Linux by adding the ability to load shared library libgdiplus.
Credit here: https://github.com/dotnet/dotnet-docker/issues/618#issuecomment-467619498
The more permanent solution is to remove the dependency on System.Drawing.Common entirely from the application. Thanks #mWatney for help, you got me on the right track.

Google App Engine Docker Container 502 Bad Gateway

I am trying to deploy my docker image to google app engine, I succfully mananged to build the image and push it to GCR. And deploy it using gcloud app deploy --image 'link-to-image-on-gcr'
But when accessing the application I'm getting a 502 bad gateway. I ssh into the server and checked the logs of the nginx container in docker and discovered the below log
2020/05/04 00:52:50 [error] 33#33: *127 connect() failed (111: Connection refused) while connecting to upstream, client: 74.125.24.153, server: , request: "GET /wp-login.php HTTP/1.1", upstream: "http://172.17.0.1:8080/wp-login.php", host: "myappengineservice-myrepo.ue.r.appspot.com"
By default, my docker image only has one container (its a Wordpress image), when deployed to app engine I suppose by default app engine will start my docker container within docker and expose the frontend via an Nginx proxy, so all the requests are routed through the Nginx proxy.
After playing around for a while, I edited the Nginx configuration file and came across this line
location / {
proxy_pass http://app_server;
I edited this a replaced it with my Wordpress docker containers internal IP address.
(proxy_pass http://172.17.0.6;)
And voila it seemed to have worked, and the requests are now been routed to my docker container.
This was obviously a temporary fix, how can I make this permanent and any idea on why this is happening?
app.yaml
runtime: custom
service: my-wordpress
env: flex
nginx.conf (inside the Nginx container)
daemon off;
worker_processes auto;
events {
worker_connections 4096;
multi_accept on;
}
http {
include mime.types;
server_tokens off;
variables_hash_max_size 2048;
# set max body size to 32m as appengine supports.
client_max_body_size 32m;
tcp_nodelay on;
tcp_nopush on;
underscores_in_headers on;
# GCLB uses a 10 minutes keep-alive timeout. Setting it to a bit more here
# to avoid a race condition between the two timeouts.
keepalive_timeout 650;
# Effectively unlimited number of keepalive requests in the case of GAE flex.
keepalive_requests 4294967295;
upstream app_server {
keepalive 192;
server gaeapp:8080;
}
geo $source_type {
default ext;
127.0.0.0/8 lo;
169.254.0.0/16 sb;
35.191.0.0/16 lb;
130.211.0.0/22 lb;
172.16.0.0/12 do;
}
map $http_upgrade $ws_connection_header_value {
default "";
websocket upgrade;
}
# ngx_http_realip_module gets the second IP address from the last of the X-Forwarded-For header
# X-Forwarded-For: [USER REQUEST PROVIDED X-F-F.]USER-IP.GCLB_IP
set_real_ip_from 0.0.0.0/0;
set_real_ip_from 0::/0;
real_ip_header X-Forwarded-For;
iap_jwt_verify off;
iap_jwt_verify_project_number 96882395728;
iap_jwt_verify_app_id my-project-id;
iap_jwt_verify_key_file /iap_watcher/iap_verify_keys.txt;
iap_jwt_verify_iap_state_file /iap_watcher/iap_state;
iap_jwt_verify_state_cache_time_sec 300;
iap_jwt_verify_key_cache_time_sec 43200;
iap_jwt_verify_logs_only on;
server {
iap_jwt_verify on;
# self signed ssl for load balancer traffic
listen 8443 default_server ssl;
ssl_certificate /etc/ssl/localcerts/lb.crt;
ssl_certificate_key /etc/ssl/localcerts/lb.key;
ssl_protocols TLSv1.2;
ssl_ciphers EECDH+AES256:!SHA1;
ssl_prefer_server_ciphers on;
ssl_session_timeout 3h;
proxy_pass_header Server;
gzip on;
gzip_proxied any;
gzip_types text/html text/plain text/css text/xml text/javascript application/json application/javascript application/xml application/xml+rss application/protobuf application/x-protobuf;
gzip_vary on;
# Allow more space for request headers.
large_client_header_buffers 4 32k;
# Allow more space for response headers. These settings apply for response
# only, not requests which buffering is disabled below.
proxy_buffer_size 64k;
proxy_buffers 32 4k;
proxy_busy_buffers_size 72k;
# Explicitly set client buffer size matching nginx default.
client_body_buffer_size 16k;
# If version header present, make sure it's correct.
if ($http_x_appengine_version !~ '(?:^$)|(?:^my-wordpress:20200504t053100(?:\..*)?$)') {
return 444;
}
set $x_forwarded_for_test "";
# If request comes from sb, lo, or do, do not care about x-forwarded-for header.
if ($source_type !~ sb|lo|do) {
set $x_forwarded_for_test $http_x_forwarded_for;
}
# For local health checks only.
if ($http_x_google_vme_health_check = 1) {
set $x_forwarded_for_test "";
}
location / {
proxy_pass http://app_server;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $server_name;
proxy_send_timeout 3600s;
proxy_read_timeout 3600s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $ws_connection_header_value;
proxy_set_header X-AppEngine-Api-Ticket $http_x_appengine_api_ticket;
proxy_set_header X-AppEngine-Auth-Domain $http_x_appengine_auth_domain;
proxy_set_header X-AppEngine-BlobChunkSize $http_x_appengine_blobchunksize;
proxy_set_header X-AppEngine-BlobSize $http_x_appengine_blobsize;
proxy_set_header X-AppEngine-BlobUpload $http_x_appengine_blobupload;
proxy_set_header X-AppEngine-Cron $http_x_appengine_cron;
proxy_set_header X-AppEngine-Current-Namespace $http_x_appengine_current_namespace;
proxy_set_header X-AppEngine-Datacenter $http_x_appengine_datacenter;
proxy_set_header X-AppEngine-Default-Namespace $http_x_appengine_default_namespace;
proxy_set_header X-AppEngine-Default-Version-Hostname $http_x_appengine_default_version_hostname;
proxy_set_header X-AppEngine-Federated-Identity $http_x_appengine_federated_identity;
proxy_set_header X-AppEngine-Federated-Provider $http_x_appengine_federated_provider;
proxy_set_header X-AppEngine-Https $http_x_appengine_https;
proxy_set_header X-AppEngine-Inbound-AppId $http_x_appengine_inbound_appid;
proxy_set_header X-AppEngine-Inbound-User-Email $http_x_appengine_inbound_user_email;
proxy_set_header X-AppEngine-Inbound-User-Id $http_x_appengine_inbound_user_id;
proxy_set_header X-AppEngine-Inbound-User-Is-Admin $http_x_appengine_inbound_user_is_admin;
proxy_set_header X-AppEngine-QueueName $http_x_appengine_queuename;
proxy_set_header X-AppEngine-Request-Id-Hash $http_x_appengine_request_id_hash;
proxy_set_header X-AppEngine-Request-Log-Id $http_x_appengine_request_log_id;
proxy_set_header X-AppEngine-TaskETA $http_x_appengine_tasketa;
proxy_set_header X-AppEngine-TaskExecutionCount $http_x_appengine_taskexecutioncount;
proxy_set_header X-AppEngine-TaskName $http_x_appengine_taskname;
proxy_set_header X-AppEngine-TaskRetryCount $http_x_appengine_taskretrycount;
proxy_set_header X-AppEngine-TaskRetryReason $http_x_appengine_taskretryreason;
proxy_set_header X-AppEngine-Upload-Creation $http_x_appengine_upload_creation;
proxy_set_header X-AppEngine-User-Email $http_x_appengine_user_email;
proxy_set_header X-AppEngine-User-Id $http_x_appengine_user_id;
proxy_set_header X-AppEngine-User-Is-Admin $http_x_appengine_user_is_admin;
proxy_set_header X-AppEngine-User-Nickname $http_x_appengine_user_nickname;
proxy_set_header X-AppEngine-User-Organization $http_x_appengine_user_organization;
proxy_set_header X-AppEngine-Version "";
add_header X-AppEngine-Flex-AppLatency $request_time always;
}
include /var/lib/nginx/extra/*.conf;
}
server {
# expose /nginx_status but on a different port (8090) to avoid
# external visibility / conflicts with the app.
listen 8090;
location /nginx_status {
stub_status on;
access_log off;
}
location / {
root /dev/null;
}
}
server {
# expose health checks on a different port to avoid
# external visibility / conflicts with the app.
listen 10402 ssl;
ssl_certificate /etc/ssl/localcerts/lb.crt;
ssl_certificate_key /etc/ssl/localcerts/lb.key;
ssl_protocols TLSv1.2;
ssl_ciphers EECDH+AES256:!SHA1;
ssl_prefer_server_ciphers on;
ssl_session_timeout 3h;
location = /liveness_check {
if ( -f /tmp/nginx/lameducked ) {
return 503 'lameducked';
}
if ( -f /var/lib/google/ae/unhealthy/sidecars ) {
return 503 'unhealthy sidecars';
}
if ( !-f /var/lib/google/ae/disk_not_full ) {
return 503 'disk full';
}
if ( -f /tmp/nginx/app_lameducked ) {
return 200 'ok';
}
return 200 'ok';
}
location = /readiness_check {
if ( -f /tmp/nginx/lameducked ) {
return 503 'lameducked';
}
if ( -f /var/lib/google/ae/unhealthy/sidecars ) {
return 503 'unhealthy sidecars';
}
if ( !-f /var/lib/google/ae/disk_not_full ) {
return 503 'disk full';
}
if ( -f /tmp/nginx/app_lameducked ) {
return 503 'app lameducked';
}
return 200 'ok';
}
}
# Add session affinity entry to log_format line i.i.f. the GCLB cookie
# is present.
map $cookie_gclb $session_affinity_log_entry {
'' '';
default sessionAffinity=$cookie_gclb;
}
# Output nginx access logs in the standard format, plus additional custom
# fields containing "X-Cloud-Trace-Context" header, the current epoch
# timestamp, the request latency, and "X-Forwarded-For" at the end.
# If you make changes to the log format below, you MUST validate this against
# the parsing regex at:
# GoogleCloudPlatform/appengine-sidecars-docker/fluentd_logger/managed_vms.conf
# (In general, adding to the end of the list does not require a change if the
# field does not need to be logged.)
log_format custom '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'tracecontext="$http_x_cloud_trace_context" '
'timestampSeconds="${msec}000000" '
'latencySeconds="$request_time" '
'x-forwarded-for="$http_x_forwarded_for" '
'uri="$uri" '
'appLatencySeconds="$upstream_response_time" '
'appStatusCode="$upstream_status" '
'upgrade="$http_upgrade" '
'iap_jwt_action="$iap_jwt_action" '
'$session_affinity_log_entry';
access_log /var/log/nginx/access.log custom;
error_log /var/log/nginx/error.log warn;
}
/etc/hosts (inside Nginx container)
root#f9c9cb5df8e2:/etc/nginx# cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.1 gaeapp
172.17.0.5 f9c9cb5df8e2
docker ps result
I was able to solve the issue by exposing my Wordpress site through port 8080 from my docker container, it was exposed through port 80 before. It does not make much sense but if anyone knows the roots cause, please do go ahead and explain.

Odoo 13 Web Assets Failure

I have an Odoo instance on a VPS behind a nginx reverse proxy. When I access the Odoo instance directly at the VPS IP:8069 or domain:8069 then the page is displayed correctly and there are no asset errors. If I try to access through the proxy, using ip or domain name (default port 80), some javascript resources don't load and/or images are truncated.
nginx.conf
user nobody;
worker_processes 1;
error_log logs/error.log;
worker_rlimit_nofile 8192;
events {
worker_connections 4096; ## Default: 1024
}
http{
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
server {
listen 80;
server_name www.mysite.com;
rewrite ^(.*) https://$host$1 permanent;
}
server {
listen 443 ssl http2;
server_name www.mysite.com;
proxy_read_timeout 720s;
proxy_connect_timeout 720s;
proxy_send_timeout 720s;
# Add Headers for odoo proxy mode
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
#SSL parameters
ssl on;
ssl_certificate /etc/mycert/location/fullcert.pem;
ssl_certificate_key /etc/mycert/location/pkey.pem;
ssl_session_timeout 30m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers EECDH+CHACHA20:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
ssl_session_tickets off;
#log
access_log /var/log/nginx/odoo.access.log;
error_log /var/log/nginx/odoo.error.log;
location ~* / {
proxy_redirect off;
proxy_pass http://odoo;
}
location /longpolling {
proxy_pass http://odoo-chat;
}
# gzip
gzip_types text/css text/less text/plain text/xml application/xml application/json application/javascript;
gzip on;
}
upstream odoo {
server 127.0.0.1:8069;
}
upstream odoo-chat {
server 127.0.0.1:8072;
}
}
I've tried Odoo 12 and 13 with Python 3.6 based on some Python errors that were mentioned in a forum, no change. I have proxy = true in my odoo.conf file.
2019/10/21 11:56:46 [crit] 2803#0: *1 open() "/var/lib/nginx/tmp/proxy/1/00/0000000001" failed (13: Permission denied) while reading upstream, client: 99.99.254.254, server: www.example.com, request: "GET /web/image/366?access_token=ff2c51a8-716b-448f-98b7-85b97349d39a HTTP/1.1", upstream: "http://127.0.0.1:8069/web/image/366?access_token=ff2c51a8-716b-448f-98b7-85b97349d39a", host: "example.com", referrer: "https://example.com/"
I experienced this problem just now. This is because nginx is dynamically creating temporary files in /var/lib/nginx/tmp/proxy and chown-ing will not solve the issue. What I learned was to set proxy_temp_path:
create a directory wherein user nobody has permissions: e.g. mkdir /home/nobody/tmp
Set proxy_temp_path in nginx.config: proxy_temp_path /home/nobody/tmp 1 2;
An excerpt of config is shown below:
location ~* / {
proxy_temp_path /home/nobody/tmp 1 2;
proxy_redirect off;
proxy_pass http://odoo;
}

NGINX configuration for gunicorn and prerender.io

I am currently serving my website using Nginx and Gunicorn.
In particular, Nginx is serving static files and Gunicorn is serving rest-api.
This is my current Nginx configuration:
worker_processes 2;
user nobody nogroup;
# 'user nobody nobody;' for systems with 'nobody' as a group instead
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024; # increase if you have lots of clients
accept_mutex on; # set to 'on' if nginx worker_processes > 1
# 'use epoll;' to enable for Linux 2.6+
# 'use kqueue;' to enable for FreeBSD, OSX
}
http {
include mime.types;
# fallback in case we can't determine a type
default_type application/octet-stream;
access_log /var/log/nginx/access.log combined;
sendfile on;
proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
upstream app_server {
# fail_timeout=0 means we always retry an upstream even if it failed
# to return a good HTTP response
# for UNIX domain socket setups
# server unix:/tmp/gunicorn.sock fail_timeout=0;
# for a TCP configuration
server 127.0.0.1:8181 fail_timeout=0;
}
server {
listen 80;
listen [::]:80;
server_name www.miralytics.social;
return 301 https://www.miralytics.social$request_uri;
}
server {
# if no Host match, close the connection to prevent host spoofing
listen 443 default ssl;
ssl_certificate /certificates/fullchain1.pem;
ssl_certificate_key /certificates/privkey1.pem;
server_name www.miralytics.social;
gzip on;
gzip_vary on;
gzip_types text/plain text/html text/xml text/css application/x-javascript image/png image/jpeg application/javascript application/octet-stream application/json;
gzip_proxied any;
gzip_http_version 1.1;
gzip_min_length 0;
gzip_comp_level 9;
gzip_buffers 16 8k;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
keepalive_timeout 5;
# path for static files
root /home/edge7/UIBackend/dist;
location ~ ^/(images|javascript|js|css|flash|media|static)/ {
root /home/edge7/UIBackend/dist;
expires 1d;
}
location /auth/register {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
proxy_pass http://localhost:8181;
}
location /auth/login {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
proxy_pass http://localhost:8181;
}
location / {
# checks for static file, if not found proxy to app
try_files $uri #proxy_to_app;
}
location #proxy_to_app {
proxy_pass http://localhost:8181;
}
add_header Cache-Control no-cache; #(no cache for testing reasons)
}
}
Here the official prerender configuration for Nginx, but as you can see it does not fit my current configuration because I already have #proxy_to_app.
Has anyone experience with this?
You can just modify your config a little bit so where you have this:
location / {
# checks for static file, if not found proxy to app
try_files $uri #proxy_to_app;
}
You would want to change that to:
location / {
proxy_set_header X-Prerender-Token YOUR_TOKEN;
set $prerender 0;
if ($http_user_agent ~* "googlebot|bingbot|yandex|baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
if ($uri ~* "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff|svg|eot)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "service.prerender.io";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
# checks for static file, if not found proxy to app
try_files $uri #proxy_to_app;
}

How do I make nginx accept unencrypted http traffic on the local network?

I'm running nginx bundled with gitlab, and it has a ssl cert, but the ssl cert is only for the public domain, so now nginx wont accept traffic that isn't encrypted, and because of this I cant access my server from the local network (which is my home network). Is there a way that I can change this so that nginx will accept unencrypted traffic only on the local network?
EDIT: Similar to this question.
here is my nginx config:
user gitlab-www gitlab-www;
worker_processes 4;
error_log stderr;
pid nginx.pid;
daemon off;
events {
worker_connections 10240;
}
http {
log_format gitlab_access '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"';
log_format gitlab_ci_access '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"';
log_format gitlab_mattermost_access '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"';
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
gzip on;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_proxied any;
gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript application/json;
include /opt/gitlab/embedded/conf/mime.types;
include /var/opt/gitlab/nginx/conf/gitlab-http.conf;
}
here is the gitlab-http config:
upstream gitlab {
server unix:/var/opt/gitlab/gitlab-rails/sockets/gitlab.socket fail_timeout=0;
}
upstream gitlab-workhorse {
server unix:/var/opt/gitlab/gitlab-workhorse/socket;
}
## Redirects all HTTP traffic to the HTTPS host
server {
listen 0.0.0.0:80;
listen [::]:80;
server_name git.team2roblox.tk;
server_tokens off; ## Don't show the nginx version number, a security best practice
return 301 https://git.team2roblox.tk:443$request_uri;
access_log /var/log/gitlab/nginx/gitlab_access.log gitlab_access;
error_log /var/log/gitlab/nginx/gitlab_error.log;
}
server {
listen 0.0.0.0:443 ssl spdy;
listen [::]:443 ssl spdy;
server_name git.team2roblox.tk;
server_tokens off; ## Don't show the nginx version number, a security best practice
root /opt/gitlab/embedded/service/gitlab-rails/public;
## Increase this if you want to upload large attachments
## Or if you want to accept large git objects over http
client_max_body_size 250m;
## Strong SSL Security
## https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html & https://cipherli.st/
ssl on;
ssl_certificate /etc/gitlab/ssl/cert.pem;
ssl_certificate_key /etc/gitlab/ssl/fullchain.pem;
# GitLab needs backwards compatible ciphers to retain compatibility with Java IDEs
ssl_ciphers 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4';
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_session_timeout 5m;
## Individual nginx logs for this GitLab vhost
access_log /var/log/gitlab/nginx/gitlab_access.log gitlab_access;
error_log /var/log/gitlab/nginx/gitlab_error.log;
location / {
## Serve static files from defined root folder.
## #gitlab is a named location for the upstream fallback, see below.
try_files $uri /index.html $uri.html #gitlab;
}
location /uploads/ {
## If you use HTTPS make sure you disable gzip compression
## to be safe against BREACH attack.
gzip off;
## https://github.com/gitlabhq/gitlabhq/issues/694
## Some requests take more than 30 seconds.
proxy_read_timeout 300;
proxy_connect_timeout 300;
proxy_redirect off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Ssl on;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_pass http://gitlab;
}
## If a file, which is not found in the root folder is requested,
## then the proxy passes the request to the upsteam (gitlab unicorn).
location #gitlab {
## If you use HTTPS make sure you disable gzip compression
## to be safe against BREACH attack.
gzip off;
## https://github.com/gitlabhq/gitlabhq/issues/694
## Some requests take more than 30 seconds.
proxy_read_timeout 300;
proxy_connect_timeout 300;
proxy_redirect off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Ssl on;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_pass http://gitlab;
}
location ~ ^/[\w\.-]+/[\w\.-]+/gitlab-lfs/objects {
client_max_body_size 0;
# 'Error' 418 is a hack to re-use the #gitlab-workhorse block
error_page 418 = #gitlab-workhorse;
return 418;
}
location ~ ^/[\w\.-]+/[\w\.-]+/(info/refs|git-upload-pack|git-receive-pack)$ {
client_max_body_size 0;
# 'Error' 418 is a hack to re-use the #gitlab-workhorse block
error_page 418 = #gitlab-workhorse;
return 418;
}
location ~ ^/[\w\.-]+/[\w\.-]+/repository/archive {
client_max_body_size 0;
# 'Error' 418 is a hack to re-use the #gitlab-workhorse block
error_page 418 = #gitlab-workhorse;
return 418;
}
location ~ ^/api/v3/projects/.*/repository/archive {
client_max_body_size 0;
# 'Error' 418 is a hack to re-use the #gitlab-workhorse block
error_page 418 = #gitlab-workhorse;
return 418;
}
# Build artifacts should be submitted to this location
location ~ ^/[\w\.-]+/[\w\.-]+/builds/download {
client_max_body_size 0;
# 'Error' 418 is a hack to re-use the #gitlab-workhorse block
error_page 418 = #gitlab-workhorse;
return 418;
}
# Build artifacts should be submitted to this location
location ~ /ci/api/v1/builds/[0-9]+/artifacts {
client_max_body_size 0;
# 'Error' 418 is a hack to re-use the #gitlab-workhorse block
error_page 418 = #gitlab-workhorse;
return 418;
}
location #gitlab-workhorse {
client_max_body_size 0;
## If you use HTTPS make sure you disable gzip compression
## to be safe against BREACH attack.
gzip off;
## https://github.com/gitlabhq/gitlabhq/issues/694
## Some requests take more than 30 seconds.
proxy_read_timeout 300;
proxy_connect_timeout 300;
proxy_redirect off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Ssl on;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://gitlab-workhorse;
}
## Enable gzip compression as per rails guide:
## http://guides.rubyonrails.org/asset_pipeline.html#gzip-compression
## WARNING: If you are using relative urls remove the block below
## See config/application.rb under "Relative url support" for the list of
## other files that need to be changed for relative url support
location ~ ^/(assets)/ {
root /opt/gitlab/embedded/service/gitlab-rails/public;
gzip_static on; # to serve pre-gzipped version
expires max;
add_header Cache-Control public;
}
error_page 502 /502.html;
}
Take a look at iptables or whatever firewall you have. This may possibly be different if you're using a different OS.
On the NGINX server (assuming you're using a Linux derivative), use iptables to allow network only connections and block any other connections. The first entry below is the local network CIDR range, which might be different for your network. The second is the loopback address. The last entry is for everything else.
iptables -A INPUT -p tcp --dport 80 -s 192.168.0.0/24 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 -s 127.0.0.0/8 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 -j DROP

Resources