nginx uwsgi flask ERR_CONTENT_LENGTH_MISMATCH 200 (OK)
first time calling get API with response body size-2 MB - data is coming
second time calling get API with response body size-2 MB - error coming - ERR_CONTENT_LENGTH_MISMATCH 200 (OK)
Dockerfile
FROM python:3.7.6
RUN apt-get update
RUN apt-get install -y --no-install-recommends \
libatlas-base-dev gfortran nginx supervisor
RUN pip3 install uwsgi
RUN useradd --no-create-home nginx
RUN rm /etc/nginx/sites-enabled/default
RUN rm -r /root/.cache
COPY nginx.conf /etc/nginx/
COPY flask-site-nginx.conf /etc/nginx/conf.d/
COPY uwsgi.ini /etc/uwsgi/
COPY supervisord.conf /etc/
flask-site-nginx.conf
server {
listen 80;
client_max_body_size 100M;
location / {
try_files $uri #application;
client_max_body_size 100M;
}
location #application {
include uwsgi_params;
uwsgi_pass unix:///tmp/uwsgi.sock;
uwsgi_read_timeout 17200;
uwsgi_send_timeout 17200;
proxy_send_timeout 17200;
proxy_read_timeout 17200;
client_max_body_size 100M;
}
}
nginx.conf
user nginx;
worker_processes auto;
pid /tmp/nginx.pid;
daemon off;
pcre_jit on;
error_log /var/log/nginx/error.log warn;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
# Define the default file type that is returned to the user
default_type text/html;
# Don't tell nginx version to clients.
server_tokens off;
# Specifies the maximum accepted body size of a client request, as
# indicated by the request header Content-Length. If the stated content
# length is greater than this size, then the client receives the HTTP
# error code 413. Set to 0 to disable.
#client_max_body_size 0;
client_max_body_size 100M;
# Define the format of log messages.
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
# Define the location of the log of access attempts to NGINX
access_log /var/log/nginx/access.log main;
# Define the parameters to optimize the delivery of static content
sendfile on;
tcp_nopush on;
tcp_nodelay on;
# Define the timeout value for keep-alive connections with the client
#keepalive_timeout 7200;
keepalive_timeout 65;
types_hash_max_size 2048;
# Define the usage of the gzip compression algorithm to reduce the amount of data to transmit
gzip on;
# Include additional parameters for virtual host(s)/server(s)
include /etc/nginx/conf.d/*.conf;
}
uwsgi.ini
[uwsgi]
module = app
callable = app
uid = nginx
gid = nginx
socket = /tmp/uwsgi.sock
chown-socket = nginx:nginx
chmod-socket = 666
master = true
enable-threads = true
vacuum = true
die-on-term = true
need-app = true
cheaper = 50
#cheaper-step=2
#cheaper-algo=spare
#limit-post = 7516192768
harakiri = 120
max-requests = 5000
processes = 51
http-timeout=120
py-autoreload = 1
Please let me if I am missing something?
Related
I am using centos 7 with python 2.7.15 and uwsgi + nginx to host my app.
step by step i am getting closer to make it work.
I had to set the python 2.7.15 to work as python insted of 2.7.5
then I had some uwsgi probmels with emperor service.
but now... the app works when I run uwsgi trough
uwsgi --http :8000 --chdir /opt/web2py -w wsgihandler:application
but when I try to put it together with nginx I cannot access the page
My nginx config ATM is
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
map $request_uri $loggable {
~/engine/getTasks.* 0;
~/static/* 0;
default 1;
}
access_log /var/log/nginx/access.log main if=$loggable;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
server {
client_max_body_size 10M;
listen 80 default_server;
listen [::]:80 default_server;
server_name localhost;
root /usr/share/nginx/html;
include /etc/nginx/default.d/*.conf;
location ^~ /.well-known/acme-challenge/ {
default_type "text/plain";
root /opt/web2py_cert/web2py.com;
}
location / {
uwsgi_pass unix:/run/uwsgi/web2py.sock;
include uwsgi_params;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
}
And my uwsgi.ini file
[uwsgi]
plugin = python2.7
logto = /opt/web2py/uwsgi.log
chdir = /opt/web2py
http = 0.0.0.0:80
module = wsgihandler:application
master = true
processes = 5
uid = woshi
socket = /run/uwsgi/web2py.sock
chown-socket = woshi:nginx
chmod-socket = 660
vacuum = true
any suggestions???
thank you
I'm trying to reverse proxy to another endpoint on /alertmanager but it fails to connect. Weirdly enough I'm able to connect the endpoint directly from inside the pod running nginx.
A quick overview of my application architecture is this:
nginx ingress on cluster-> nginx load balancer -> <many services on different endpoints>
This is a minimized nginx configuration that replicates the issue:
worker_processes 5; ## Default: 1
error_log /dev/stderr;
pid /tmp/nginx.pid;
worker_rlimit_nofile 8192;
events {}
http {
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stderr main;
sendfile on;
tcp_nopush on;
resolver kube-dns.kube-system.svc.cluster.local;
server {
listen 8080;
proxy_set_header X-Scope-OrgID 0;
location = / {
return 200 'OK';
auth_basic off;
}
location /alertmanager {
proxy_pass http://mimir-distributed-alertmanager.mimir.svc.cluster.local:8080$request_uri;
}
}
}
I'm able to curl to the mimir endpoint in /alertmanager but I can't reach /alertmanager without getting a 404 error but I can get to / and if I put the proxy_pass inside of / it does work.
Example of what I'm seeing:
/ $ curl localhost:8080/
OK
/ $ curl localhost:8080/alertmanager
the Alertmanager is not configured
Curling http://mimir-distributed-alertmanager.mimir.svc.cluster.local does infact return the html of the page I'm expecting
One of the pages on my website requires a long computation on the server(~2 minutes). If I run the website on localhost it works fine. But in production when ~30 seconds. Here's my http section of the nginx conf:
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 120;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
proxy_read_timeout 300;
proxy_connect_timeout 300;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
I tried adding:
fastcgi_read_timeout 300;
proxy_read_timeout 300;
at the end(after "server") but it didn't do anything.
If you get 502 Bad Gateway error it means your Application server(i guess its Unicorn according to your tags) is sending the timeout and not Nginx, you should increase the timeout in your unicorn.rb file in your production server.
worker_processes 2
listen "/tmp/xxx.socket"
##equal to your proxy read timeout in the Nginx config.
timeout 300
pid "/tmp/unicorn.xxx.pid"
In case of Python Green Unicorn please do the following:
NUM_WORKERS=3
TIMEOUT=300
exec gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $NUM_WORKERS \
--timeout $TIMEOUT \
--log-level=debug \
--bind=x.x.x.x \
--pid=$PIDFILE
I am trying to configure cgit with nginx through uwsgi. I managed to get the main page working on example.com/ and added my repos but when I try to access a repo in example.com/somerepo I get a 502 error.
I know cgit is working fine because I can run cgit.cgi with and without the QUERY_STRING="url=somerepo"environmental variable and it generates the correct html for the main page and the somerepo page respectively.
I have been trying to debug the issue using the nginx error logs with debug level, strace and gdb on both nginx and cgit.cgi and the output from uwsgi, this is what I've found so far:
When I click on a somerepo link on cgit's main page uwsgi makes a GET request to /somerepo and nginx tries to open a directory in /htdocs/somerepo which it can't find because it doesn't exist. (I suppose cgit.cgi should generate this on the fly). I know this from strace stat("/usr/share/webapps/cgit/1.2.1/htdocs/olisrepo/", 0x7ffdf4c817c0) = -1 ENOENT (No such file or directory)
When I click on a somerepo link I get read(8, 0x561749c8afa0, 65536) = -1 EAGAIN (Resource temporarily unavailable) from cgit.cgi's strace.
When I try to visit a invalid url like somerepotypo it correctly generates a 404 page saying 'no repositories found'.
These are my configuration files:
/etc/nginx/nginx.conf
user nginx nginx;
worker_processes 1;
error_log /var/log/nginx/error_log debug;
events {
worker_connections 1024;
use epoll;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main
'$remote_addr - $remote_user [$time_local] '
'"$request" $status $bytes_sent '
'"$http_referer" "$http_user_agent" '
'"$gzip_ratio"';
client_header_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
connection_pool_size 256;
client_header_buffer_size 1k;
large_client_header_buffers 4 2k;
request_pool_size 4k;
gzip off;
output_buffers 1 32k;
postpone_output 1460;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 75 20;
ignore_invalid_headers on;
# Cgit
server {
listen 80;
server_name example.com;
root /usr/share/webapps/cgit/1.2.1/htdocs;
access_log /var/log/nginx/access_log main;
error_log /var/log/nginx/error_log debug;
location ~* ^.+(cgit.(css|png)|favicon.ico|robots.txt) {
root /usr/share/webapps/cgit/1.2.1/htdocs;
expires 30d;
}
location / {
try_files $uri #cgit;
}
location #cgit {
include uwsgi_params;
uwsgi_modifier1 9;
uwsgi_pass unix:/run/uwsgi/cgit.sock;
}
}
}
cgit.ini (I load this using uwsgi --ini /etc/uwsgi.d/cgit.ini)
[uwsgi]
master = true
plugins = cgi
chmod-socket = 666
socket = /run/uwsgi/%n.sock
uid = nginx
gid = nginx
processes = 1
threads = 1
cgi = /usr/share/webapps/cgit/1.2.1/hostroot/cgi-bin/cgit.cgi
/etc/cgitrc
css=/cgit.css
logo=/cgit.png
mimetype-file=/etc/mime.types
virtual-root=/
remove-suffix=1
enable-git-config=1
scan-path=/usr/local/cgitrepos
Can you help me fix this? Thanks in advance
Error:
2014/11/28 01:02:16 [error] 2501#0: *4 upstream prematurely closed connection while reading response header from upstream, client: 75.64.105.189, server: xxx.yyy.com.au, request: "GET / HTTP/1.1", upstream: "uwsgi://127.0.0.1:8080", host: "xxx.yyy.com.au"
Calling uwsgi in a virtual env using the following
cd /home/ec2-user/prod_demo && /home/ec2-user/venv/bin/uwsgi --socket :8080 --wsgi-file /home/ec2-user/prod_demo/manage.py --callable app --processes 4 --threads 2 --stats :18080 --protocol=http &
Nginx Config
/etc/nginx/nginx.conf
user ec2-user;
worker_processes 1;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80;
server_name xxx.yyy.com.au *.xxx.yyy.com.au;
access_log /var/log/prod_demo/access_log;
root /home/ec2-user/prod_demo;
location / {
uwsgi_pass 127.0.0.1:8080;
include uwsgi_params;
}
location /static {
alias /home/ec2-user/prod_demo/app/static;
}
location = /favicon.ico {
alias /home/ec2-user/prod_demo/app/static/images/favicon.ico;
}
}
}
seems to be working now after changing to the uwsgi ini config format instead of the cmd line
uwsgi.ini
[uwsgi]
socket = :8080
chdir = /home/ec2-user/prod_demo
master = True
venv = /home/ec2-user/venv
callable = app
wsgi-file = /home/ec2-user/prod_demo/manage.py
enable-threads = True
calling uwsgi.ini in shell / upstart (hopefully)
/home/ec2-user/venv/bin/uwsgi --ini uwsgi.ini