502 bad gateway nginx shiny file upload - nginx

I have uploaded a big file (5Gb) using fileUpload. I have increased the file upload limit to 10Gb in the shiny server code. The file upload succeeds but returns the error after the upload is completed:
Error : html head title 502 bad gateway /title /head
Below are my config info:
options(shiny.maxRequestSize = 10000 * 1024 ^ 2)
The nginx config /etc/nginx/nginx.conf has the basic settings in http block as below:
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
client_max_body_size 100G;
large_client_header_buffers 8 64k;
}
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
}
The /etc/nginx/sites-available/default configuration is as shown below:
server {
listen 80 default_server;
listen [::]:80 default_server
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name _;
location /shiny/ {
proxy_pass http://X.X.X.X:3838/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
rewrite ^(/shiny/[^/]+)$ $1/ permanent;
}
location /rstudio/ {
proxy_pass http://X.X.X:8787/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
rewrite ^(/rstudio/[^/]+)$ $1/ permanent;
client_max_body_size 100000M;
}
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
try_files $uri $uri/ =404;
proxy_buffer_size 16k;
proxy_buffers 8 32k;
proxy_busy_buffers_size 224K;
keepalive 64
}
Any hints to try will be helpful.

Adding the following lines to the location/ to the config file in sites-available/myApp.com fixed the issue:
location / {
proxy_http_version 1.1; // you need to set this in order to use params below.
proxy_pass http://XXXXXX.XX.XX:3838;
proxy_redirect http://XXXXXX.XX.XX:3838/ https://$host/;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_temp_file_write_size 64k;
proxy_connect_timeout 10080s;
proxy_send_timeout 10080;
proxy_read_timeout 10080;
proxy_buffer_size 64k;
proxy_buffers 16 32k;
proxy_busy_buffers_size 64k;
proxy_redirect off;
proxy_request_buffering off;
proxy_buffering off;
}

Related

uwsgi uwsgi flask app and nginx gives 404 not found

I got stuck. I want to deploy python dash app on my vps. I started with flask app following this tutorial:
https://www.digitalocean.com/community/tutorials/how-to-serve-flask-applications-with-uswgi-and-nginx-on-ubuntu-18-04
So I created service myproject. Checking status seems to be ok:
My nginx configuration:
server {
listen 443 ssl http2;
#listen [::]:443 ssl http2;
server_name www.mysite.com mysite.com;
root /var/www/wordpress;
index index.php;
access_log /var/log/nginx/wordpress.log;
error_log /var/log/nginx/wordpress_error.log error;
ssl_certificate_key /etc/nginx/ssl/www.XXX.key;
ssl_certificate /etc/nginx/ssl/www.XXX.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
add_header Strict-Transport-Security max-age=31536000;
location / {
try_files $uri $uri/ =404;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/var/run/php/php7.4-fpm.sock;
}
location /shiny/ {
rewrite ^/shiny/(.*)$ /$1 break;
proxy_pass http://127.0.0.1:3838;
proxy_redirect http://127.0.0.1:3838 $scheme://$host/shiny/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_temp_file_write_size 64k;
proxy_connect_timeout 10080s;
proxy_send_timeout 10080;
proxy_read_timeout 10080;
proxy_buffer_size 64k;
proxy_buffers 16 32k;
proxy_busy_buffers_size 64k;
proxy_redirect off;
proxy_request_buffering off;
proxy_buffering off;
}
location /rstudio/ {
rewrite ^/rstudio/(.*)$ /$1 break;
proxy_pass http://127.0.0.1:8787;
proxy_redirect http://127.0.0.1:8787 $scheme://$host/rstudio/;
#proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
#proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_http_version 1.1;
#proxy_redirect off;
proxy_buffering off;
#proxy_set_header Upgrade $http_upgrade;
#proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
location /myproject/ {
uwsgi_pass unix:/home/XXX/XXX/python_dash/myproject/myproject.sock;
include uwsgi_params;
}
}
But when i go to mysite.com/myproject/ I get:
However it's diffrent than regular nginx (appearing in response to misspelling):
So something is going on. Its also visible on myproject service status. Every time when I try to access it it shows a line at the bottom:
[pid: 1696|app: 0|req: 1/1] 213.134.X.XX () {58 vars in 1334 bytes} [Fri Dec 3 13:22:08 2021] GET /myproject/ => generated 232 bytes in 40 msecs (HTTP/2.0 404) 2 headers in 87 bytes (2 switches on core 0)
my myproject.ini:
[uwsgi]
module = wsgi:app
master = true
processes = 5
socket = myproject.sock
chmod-socket = 666
vacuum = true
die-on-term = true
Can you give me a hint? I got stuck on that and cant solve it...
Best regards
Tomasz
Finally found it!
#app.route must match with location in nginx config
so in my case when:
location /myproject/
app route must be:
#app.route("/myproject/")
Remember about trailing "/" because if you don't add it it will return not found if you type trailing "/" in address bar (like https://website.com/myproject/ will not work, only https://website.com/myproject)
Similar thing is with python dash, but this time #app.route (or #server.route) doesn't work. You need to add routes_pathname_prefix="/myproject/" in dash.Dash(...).
I don't understand why it works that way, but it works.

Why do I get 404 on nginx reverse proxy?

Below is my config and I'm getting 404 on all routes defined apart from the well-known route and I don't understand why.
If I make a request to http://example.tech/connect I get a 404 and if I make a request to http://api.example.tech I also get a 404.
I can't see where I've gone wrong as this looks like it should work!
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
#REMOVE REFERENCE TO FILES THAT HAVE "server" and "location" blocks in them so we can do it all in this file
#include /etc/nginx/conf.d/*.conf;
# issue with ip and the nginx proxy
real_ip_header X-Forwarded-For;
set_real_ip_from 0.0.0.0/0;
server {
listen 80;
listen [::]:80;
server_name example.tech;
location /.well-known/openid-configuration {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
location /connect {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
location /auth {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
}
server {
listen 80;
listen [::]:80;
server_name api.example.tech;
location /auth/ {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
}
}
Needed a forward slash on the end of the proxy_pass for some reason
You need a specific uri in proxy_pass directive, not a backslash. But in your case here, a backslash is acting as the specific uri. Nginx replaces '/auth'(for example) with '/'(you've added).
In fact, the answer you put is right, turning proxy_pass http://myapp.net; to proxy_pass http://myapp.net/;.
The reason is that proxy_pass would work in two different ways with/without a specific uri. More details about this directive on nginx.org. Blow is some content quoted in that link.
If the proxy_pass directive is specified with a URI, then when a request is passed to the server, the part of a normalized request URI
matching the location is replaced by a URI specified in the directive:
location /name/ {
proxy_pass http://127.0.0.1/remote/;
}
If proxy_pass is specified without a URI, the request URI is passed to the server in the same form as sent by a client when the original
request is processed, or the full normalized request URI is passed
when processing the changed URI:
location /some/path/ {
proxy_pass http://127.0.0.1;
}
In your case, without URI in proxy_pass directive, so /auth would be passed to backend server. Unfortunately, your backend server does not have the /auth resource, so 404 is returned. If your backend server does have /auth to be processed, you would never get 404 error while requesting uri /auth.
Here are two examples, which hopefully clarify things.
location /some-path {
proxy_pass http://server:3000;
}
In this case the proxied server (target) must handle the route /some-path. If handling something else, like / only, it will return an error to Nginx.
One solution is to add a trailing / e.g.:
location /some-path {
proxy_pass http://server:3000/;
}
Now requests sent to /some-path can (and must) be handled by the route / on the proxied server side. However, this may cause issues with some servers. For example, with Express, curl localhost/some-path would be handled fine by Express, whereas curl localhost/some-path/ would cause Express to return Cannot GET //.
This might be different for your target server, but the principle is the same: if you specify the server only, the full path in location is passed to the server, so it must be handled accordingly.
This is my case how I've get 404 instead of 502:
# let's define some proxy pass
location ~ /.well-known/acme-challenge {
proxy_pass http://127.0.0.1:5080; # this backend doesn't exist which leads to 502
proxy_set_header Host $host;
}
# this is a default directives
error_page 500 502 503 504 /50x.html; # this is a reason to redirect 502 to 50x.html
location = /50x.html { # but this file doesn't exist in root so we get 404 instead of 502
root /usr/share/nginx/html;
}

NGINX: proxy_pass microservices

I have a mesos cluster working in environment test and I set up nginx + nixy for service discovery. It is working fine, but when nginx make a proxy_pass for docker container, I receive a 404 error.
ex: I have a Tomcat container listen in privateip:37130, if I configure proxy_pass for location /, it is working! If i configure for location /service, I get the 404 error code.
Someone have any idea?
My nginx.conf
worker_processes auto;
pid /run/nginx.pid;
events {
use epoll;
worker_connections 2048;
multi_accept on;
}
http {
add_header X-Proxy always;
access_log off;
error_log /var/log/nginx/error.log warn;
server_tokens off;
client_max_body_size 128m;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
proxy_redirect off;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# time out settings
proxy_send_timeout 120;
proxy_read_timeout 120;
send_timeout 120;
keepalive_timeout 10;
upstream tomcat {
server 172.30.119.214:31816;
}
server {
listen 80;
server_name app.org;
location / {
return 503;
}
location /tomcat/ {
proxy_set_header HOST $host;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_connect_timeout 30;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_pass http://tomcat;
}
}
}
Change below
proxy_pass http://tomcat;
to
proxy_pass http://tomcat/;
Adding the trailing / would make sure that the /tomcat/ from the location will not be sent to the tomcat service

Direct IP access not allowed cloudflare in nginx proxy server

I have one site in openshift rhc ,So i want to make one reserve Proxy with nginx software in Openshift Rhc servers, which from this server have access to many other server with nginx so i configured my nginx server by this kind of configuration :
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
error_log {{OPENSHIFT_HOMEDIR}}/app-root/logs/nginx_error.log debug;
pid {{NGINX_DIR}}/logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
#access_log $OPENSHIFT_DIY_LOG_DIR/access.log main;
port_in_redirect off;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 165;
gzip on;
upstream frontends {
#server pr4ss.tk;
#server 222.66.115.233:80 weight=1;
server {{OPENSHIFT_INTERNAL_IP}}:8081 ;
}
upstream frontends2 {
server google.com;
#server 222.66.115.233:80 weight=1;
#server {{OPENSHIFT_INTERNAL_IP}}:8081 ;
}
upstream index {
server free-papers.elasa.ir weight=1;
server diy4tornado-tornado4ss.rhcloud.com weight=2;
}
upstream comment {
server vb2-fishsmarkets.rhcloud.com;
#server community.elasa.ir;
}
server {
listen {{OPENSHIFT_INTERNAL_IP}}:{{OPENSHIFT_INTERNAL_PORT}};
server_name {{OPENSHIFT_GEAR_DNS}} www.{{OPENSHIFT_GEAR_DNS}};
root {{OPENSHIFT_REPO_DIR}};
set_real_ip_from {{OPENSHIFT_INTERNAL_IP}};
real_ip_header X-Forwarded-For;
#charset koi8-r;
#access_log logs/host.access.log main;
location /main {
root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
try_files $uri $uri/ =404;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
}
location ~* ^/(.*) {
#proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
#proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ http://diy-elasa2.rhcloud.com/;
#proxy_pass http://comment/$1$is_args$args;
proxy_pass http://index/$1$is_args$args;
}
location ^~ /admincp {
if (!-f $request_filename) {
rewrite ^/admincp/(.*)$ /index.php?routestring=admincp/$1 last;
}
proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ http://diy-elasa2.rhcloud.com/;
proxy_pass http://comment/$1$is_args$args;
}
location /www {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends;
}
location /categories {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends2;
}
location /index {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
# an HTTP header important enough to have its own Wikipedia entry:
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# enable this if you forward HTTPS traffic to unicorn,
# this helps Rack set the proper URL scheme for doing redirects:
# proxy_set_header X-Forwarded-Proto $scheme;
# pass the Host: header from the client right along so redirects
# can be set properly within the Rack application
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
# set "proxy_buffering off" *only* for Rainbows! when doing
# Comet/long-poll/streaming. It's also safe to set if you're using
# only serving fast clients with Unicorn + nginx, but not slow
# clients. You normally want nginx to buffer responses to slow
# clients, even with Rails 3.1 streaming because otherwise a slow
# client can become a bottleneck of Unicorn.
#
# The Rack application may also set "X-Accel-Buffering (yes|no)"
# in the response headers do disable/enable buffering on a
# per-response basis.
# proxy_buffering off;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 10;
proxy_send_timeout 5;
proxy_read_timeout 3600;
proxy_buffer_size 4k;
proxy_buffers 4 132k;
proxy_busy_buffers_size 264k;
proxy_temp_file_write_size 164k;
proxy_pass http://index;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
location ~ \.php$ {
root html;
fastcgi_pass {{OPENSHIFT_INTERNAL_IP}}:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
include fastcgi_params;
}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443;
# server_name localhost;
# ssl on;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_timeout 5m;
# ssl_protocols SSLv2 SSLv3 TLSv1;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
I used Upstream index:
upstream index {
server free-papers.elasa.ir weight=1;
server diy4tornado-tornado4ss.rhcloud.com weight=2;
}
but when i use this configuration ( with cloudflare or rhc server) i get this error from cloudflare:
Direct IP access not allowed cloudflare
So what is proper configuration for nginx or Apache server software for this kind of servers.
Thanks a lot for your attentions.
i found the answer:
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain application/xml application/javascript text/javas$
gzip_disable "msie6";
gzip_http_version 1.1;
upstream comment {
#server vb-fishsmarkets.rhcloud.com;
#server vb.elasa.ir;
#server vb-elasa3.rhcloud.com ;
#server vb2-fishsmarkets.rhcloud.com;
#server forums.fishsmarket.tk;
#server community.elasa.ir;
#server free-papers.elasa.ir;
#server diy4tornado-tornado4ss.rhcloud.com weight=1;
server free-papers.elasa.ir weight=1;
}
limit_req_zone $binary_remote_addr zone=one:10m rate=30r/m;
limit_req_zone $binary_remote_addr zone=one2:10m rate=1r/m;
limit_req_zone $http_x_forwarded_for zone=one3:10m rate=1r/m;
proxy_cache_path /tmp levels=1:2 keys_zone=RUBYGEMS:10m
inactive=24h max_size=1g;
server {
listen 127.6.145.1:8080;
server_name diy-elasa2.rhcloud.com community.elasa.ir ;
#charset koi8-r;
and :
location ~* ^/(.*) {
#root html;
#index index.html index.htm;
#proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
proxy_set_header Host free-papers.elasa.ir;
#proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ h$
#proxy_redirect http://fm.elasa.ir/ http://community.e$
proxy_pass http://comment/$1$is_args$args;
proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header X-Scheme $scheme;
sub_filter 'http://fm.elasa.ir/' 'http://community.el$
sub_filter_once on;
proxy_set_header X-outside-url $scheme://$host;
#proxy_set_header X-Real-IP $remote_addr;
proxy_cache RUBYGEMS;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout invalid_header upd$
http_500 http_502 http_503 http_504;
proxy_http_version 1.1;
proxy_cache RUBYGEMS;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout invalid_header upd$
http_500 http_502 http_503 http_504;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_cache_bypass $http_upgrade;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded$
client_max_body_size 100M;
client_body_buffer_size 1m;
proxy_intercept_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 256 16k;
proxy_busy_buffers_size 256k;
proxy_temp_file_write_size 256k;
#proxy_max_temp_file_size 0;
proxy_read_timeout 300;
}
Important things is proxy header Host:
proxy_set_header Host free-papers.elasa.ir;

Cannot Access Glassfish4 Admin console via nginx location and proxy pass

Folks,
We have a java application running under Glassfish4. I wanted to disable direct access to the Glassfish admin server by closing 4848 at the firewall level and accessing it via a location directive in nginx (also offloading the SSL to nginx).
with asadmin enable-secure-admin turned on I can get into the admin server via https://foo.domain.com:4848 and administer it normally.
However when I disable secure admin via asadmin disable-secure-admin and access with the following location block
# Reverse proxy to access Glassfish Admin server
location /Glassfish {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_max_temp_file_size 0;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_pass http://127.0.0.1:4848;
}
ala https://foo.domain.com/Glassfish I get a blank screen, and the only reference I can find in the nginx error logs is
2015/10/05 09:13:57 [error] 29429#0: *157 open() "/usr/share/nginx/html/resource/community-theme/images/login-product_name_open.png" failed (2: No such file or directory), client: 104.17.0.4, server: foo.domain.com, request: "GET /resource/community-theme/images/login-product_name_open.png HTTP/1.1", host: "foo.domain.com", referrer: "https://foo.domain.com/Glassfish"
Reading docs and on the net I do see that:
Secure Admin must be enabled to access the DAS remotely
Is what I'm trying to do simply impossible?
Edit: As requested below is the full nginx configuration.
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
#sendfile off;
tcp_nopush on;
tcp_nodelay off;
#keepalive_timeout 65;
types_hash_max_size 2048;
# Default HTTP server on 80 port
server {
listen 192.168.1.10:80 default_server;
#listen [::]:80 default_server;
server_name foo-dev.domain.com;
return 301 https://$host$request_uri;
}
# Default HTTPS server on 443 port
server {
listen 443;
server_name foo-dev.domain.com;
ssl_certificate /etc/ssl/certs/foo-dev.domain.com.crt;
ssl_certificate_key /etc/ssl/certs/foo-dev.domain.com.key;
ssl on;
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
ssl_prefer_server_ciphers on;
access_log /var/log/nginx/foo-dev.domain.com.access.ssl.log;
# Reverse proxy access to foo hospitality service implementation at BC back-end
location /AppEndPoint {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_max_temp_file_size 0;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_pass http://foo-dev.domain.com:8080;
}
# Reverse proxy to access Glassfish Admin server
location /Glassfish {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_max_temp_file_size 0;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_pass http://127.0.0.1:4848;
}
# Reverse proxy access to all processed servers by both client and server component
location /messages {
alias /integration/archive/app-messages/;
autoindex on;
#auth_basic "Integration Team Login";
#auth_basic_user_file /integration/archive/app-messages/requests/.htpasswd;
}
}
}
The /AppEndPoint location block is the Glassfish application server which works properly, it's only the /Glassfish location block that's giving me trouble.
Ok thx, for your edit.
try with:
listen: 443 ssl;
btw a good config help is offered by Mozilla: SSL Generator
and if you forward request to location /Glassfish you will have to trim the request url to remove /Glassfish. Credits to Rewrite.
Btw does the rest of your config work on SSL?
Only change in proxy_pass the http for https
location / {
proxy_pass https://localhost:4848;
#proxy_http_version 1.1;
#proxy_set_header Upgrade $http_upgrade;
#proxy_set_header Connection 'upgrade';
#proxy_set_header Host $host;
#proxy_cache_bypass $http_upgrade;
}
As you ask, I suppose you are having problems accessing to the Glassfish Admin Console using nginx. However I share an example of entire nginx.conf file for Glassfish server.
Note that the 'proxy_pass' directive for location '/admin' should be https because is mandatory for glassfish access to Admin Console using https.
One reason that can cause you can't see the Admin Console is because when you access to the page, the resources aren't properly loaded. You can verify the different loaded resources using developer options of your preferred browser to see the generated URLs; what can show you a part of the solution.
With this configuration you should be able to access both parts of glassfish, main and admin console pages.
If you don't have DNS server, you can access using server IP.
The SSL certificates used where made as Self-signed only for test purposes, consider using a valid SSL certificate like Let's Encrypt or generated by a valid CA.
Ex:
http://192.168.1.15/glassfish
http://192.168.1.15/admin
The https redirection should work and finally you will be redirected at:
https://192.168.1.15/glassfish
https://192.168.1.15/admin
glassfish-ngix.conf
upstream glassfish {
server 127.0.0.1:8080;
}
upstream glassfishadmin {
server 127.0.0.1:4848;
}
server {
listen 80;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2;
set $glassfish_server glassfish;
set $glassfish_admin glassfishadmin;
server_name mydomain.com;
# sample site certificates
ssl_certificate /etc/nginx/server.crt;
ssl_certificate_key /etc/nginx/server.key;
ssl_trusted_certificate /etc/nginx/server.crt;
location /glassfish {
charset utf-8;
# limits
client_max_body_size 100m;
proxy_read_timeout 600s;
# buffers
proxy_buffers 16 64k;
proxy_buffer_size 128k;
# gzip
gzip on;
gzip_min_length 1100;
gzip_buffers 4 32k;
gzip_types text/css text/less text/plain text/xml application/xml application/json application/javascript;
gzip_vary on;
proxy_redirect off;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://$glassfish_server/;
}
location ~* .(png|ico|gif|jpg|jpeg|css|js)$ {
proxy_pass https://$glassfish_admin/$request_uri;
}
location /admin {
proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
proxy_pass_request_headers on;
proxy_no_cache $cookie_nocache $arg_nocache$arg_comment;
proxy_no_cache $http_pragma $http_authorization;
proxy_cache_bypass $cookie_nocache $arg_nocache $arg_comment;
proxy_cache_bypass $http_pragma $http_authorization;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host:$server_port; #Very nb to add :$server_port here
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
add_header Access-Control-Allow-Origin *;
proxy_set_header Access-Control-Allow-Origin *;
proxy_pass https://$glassfish_admin/;
}
}

Resources