I'm using nginx to host multiple node applications. I am working on linking localhost:80 to the forever applications I have running.
I have multiple applications that initially work like below where I just go to each port:
____________ ___________
| | | my |
| localhost | -->| running |
| | | app(s) |
| (port #) | | |
|___________| |___________|
But since we have about 4 apps on different ports we want it to work like this:
____________ ___________ ____________
| | | | | my |
| nginx | -->| proxy |-->| running |
|localhost80| | pass | | app(s) |
|___________| |___________| |____________|
nginx should be routing to a page however I get "404 Not Found" which means the nginx is working but it has no idea what it's looking for.
Here is my code for one sample server application that I am trying to run:
Its a forever application (.js file) that is listening to a certain port.
#Server proxy for haiyan_dashboard
server {
listen 80; #
server_name localhost/haiyan;
location / {
proxy_pass http://localhost:8080/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Also, here is my entire config file:
user nobody;
worker_processes 5;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
#root html;
#root /var/www/webviz.redcross.org/public_html;
proxy_pass https://webviz.redcross.org/services/tables;
#index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
#Server proxy for haiyan_dashboard
server {
listen 8080; #
server_name localhosthaiyan;
location / {
proxy_pass http://localhost:8080/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
#Proxy for Haiti_Baseline
server {
listen 3001;
server_name localhaiti_baseline;
location / {
proxy_pass http://localhost:3001/secure/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
#ServerProxy for RFL Border
server {
listen 80; #is this also 3002???
server_name rfl_border;
location / {
proxy_pass http://localhost:3002/secure/; #Change the URL!!!
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
#ServerProxy for Basic Borders
server {
listen 80;
server_name basic_border;
location / {
proxy_pass http://localhost:9001/secure/; #This URL is Different
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 80;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
Am I using proxy pass properly? If not, how does it work in lamest terms. Additionally, ALL 4 pieces of server code sits in the nginx.conf file. Is this also a good practice.
(Note: I have already read the documentation on the nginx website)
If you need me to provide more clarification, I'll be happy to add it.
Here is the solution:
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
# Default Route
location / {
root html;
}
# Route to haiyan
location /haiyan_dashboard {
#root html;
#root /var/www/webviz.redcross.org/public_html;
#proxy_pass https://webviz.redcross.org/services/tables;
proxy_pass http://localhost:8080/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
#index index.html index.htm;
}
# Route to Haiti Baseline
location /Haiti_baseline {
proxy_pass http://localhost:3001/secure/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
# Route to RFL_Border
location /RFL_border-master {
proxy_pass http://localhost:3002/secure/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
# Route to Basic Borders
location /basic-borders {
proxy_pass http://localhost:9001/secure/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
`
Related
How do I fix net::ERR_SSL_PROTOCOL_ERROR error when I put nginx in front of Tomcat?
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
upstream tomcat {
server 127.0.0.1:8330;
}
charset utf-8;
#gzip on;
server {
listen 8331;
server_name localhost;
# return 301 https://$host$request_uri;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
# root html;
# index index.html index.htm;
# proxy_pass http://sgmng.pluggolf.com;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Origin "";
# new
# Copy the upstream path set above
proxy_pass https://tomcat;
# proxy_redirect off;
charset utf-8;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
# server {
# listen 8080;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
# }
# HTTPS server
server {
listen 8332 ssl;
server_name localhost;
charset utf-8;
ssl_certificate D:/service/cbdc_dt/nginx/cert/plusity.crt;
ssl_certificate_key D:/service/cbdc_dt/nginx/cert/plusity.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
# root html;
# index index.html index.htm;
proxy_pass http://tomcat;
proxy_set_header Host $http_host;
proxy_set_header X-Real_IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header X-NginX-Proxy true;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection '';
proxy_set_header Origin "";
proxy_http_version 1.1;
chunked_transfer_encoding off;
proxy_buffering off;
proxy_cache off;
proxy_read_timeout 7d;
proxy_redirect off;
charset utf-8;
}
}
}
After applying ssl to all requests coming to ports 8331 and 8332 as follows, I wrote a code that redirects to port 8330 of tomcat.
However, the problem is that if you go to 8330 through nginx like this, GET https://serverIP:8330/itf/subscribe net::ERR_SSL_PROTOCOL_ERROR error occurs.
/itf/subscribe works normally when nginx is not present.
But there seems to be a problem when calling from within the address of https.
I am wondering how can I solve that problem.
Best Regards!
However, the problem is that if you go to 8330 through nginx like this, GET https://serverIP:8330/itf/subscribe net::ERR_SSL_PROTOCOL_ERROR error occurs.
Port 8330 is served by Tomcat, not Nginx. Hence the protocol error. You currently have Nginx listening on:
http://localhost:8331 -> http://127.0.0.1:8330
https://localhost:8332 -> http://127.0.0.1:8330
I got stuck. I want to deploy python dash app on my vps. I started with flask app following this tutorial:
https://www.digitalocean.com/community/tutorials/how-to-serve-flask-applications-with-uswgi-and-nginx-on-ubuntu-18-04
So I created service myproject. Checking status seems to be ok:
My nginx configuration:
server {
listen 443 ssl http2;
#listen [::]:443 ssl http2;
server_name www.mysite.com mysite.com;
root /var/www/wordpress;
index index.php;
access_log /var/log/nginx/wordpress.log;
error_log /var/log/nginx/wordpress_error.log error;
ssl_certificate_key /etc/nginx/ssl/www.XXX.key;
ssl_certificate /etc/nginx/ssl/www.XXX.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
add_header Strict-Transport-Security max-age=31536000;
location / {
try_files $uri $uri/ =404;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/var/run/php/php7.4-fpm.sock;
}
location /shiny/ {
rewrite ^/shiny/(.*)$ /$1 break;
proxy_pass http://127.0.0.1:3838;
proxy_redirect http://127.0.0.1:3838 $scheme://$host/shiny/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_temp_file_write_size 64k;
proxy_connect_timeout 10080s;
proxy_send_timeout 10080;
proxy_read_timeout 10080;
proxy_buffer_size 64k;
proxy_buffers 16 32k;
proxy_busy_buffers_size 64k;
proxy_redirect off;
proxy_request_buffering off;
proxy_buffering off;
}
location /rstudio/ {
rewrite ^/rstudio/(.*)$ /$1 break;
proxy_pass http://127.0.0.1:8787;
proxy_redirect http://127.0.0.1:8787 $scheme://$host/rstudio/;
#proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
#proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_http_version 1.1;
#proxy_redirect off;
proxy_buffering off;
#proxy_set_header Upgrade $http_upgrade;
#proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
location /myproject/ {
uwsgi_pass unix:/home/XXX/XXX/python_dash/myproject/myproject.sock;
include uwsgi_params;
}
}
But when i go to mysite.com/myproject/ I get:
However it's diffrent than regular nginx (appearing in response to misspelling):
So something is going on. Its also visible on myproject service status. Every time when I try to access it it shows a line at the bottom:
[pid: 1696|app: 0|req: 1/1] 213.134.X.XX () {58 vars in 1334 bytes} [Fri Dec 3 13:22:08 2021] GET /myproject/ => generated 232 bytes in 40 msecs (HTTP/2.0 404) 2 headers in 87 bytes (2 switches on core 0)
my myproject.ini:
[uwsgi]
module = wsgi:app
master = true
processes = 5
socket = myproject.sock
chmod-socket = 666
vacuum = true
die-on-term = true
Can you give me a hint? I got stuck on that and cant solve it...
Best regards
Tomasz
Finally found it!
#app.route must match with location in nginx config
so in my case when:
location /myproject/
app route must be:
#app.route("/myproject/")
Remember about trailing "/" because if you don't add it it will return not found if you type trailing "/" in address bar (like https://website.com/myproject/ will not work, only https://website.com/myproject)
Similar thing is with python dash, but this time #app.route (or #server.route) doesn't work. You need to add routes_pathname_prefix="/myproject/" in dash.Dash(...).
I don't understand why it works that way, but it works.
I have alfresco 5.2 community edition installed on ubuntu machine with nginx as proxy. updated the SSL recently for my new domain. It is working fine and site is accessible through https://new.domain.com/share/
I want to redirect all http traffic to https://new.domain.com/share/. Tried changing the server config block but getting an error too many redirects .
currently new.domain.com is redirecting to https://new.domain.com/ and need to change it from new.domain.com to https://new.domain.com/share/
nginx.conf
#user nginx;
#worker_processes auto;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
#events {
# worker_connections 1024;
#}
events {}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
index index.html index.htm;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name new.domain.com;
return 301 https://new.domain.com$request_uri;
location / {
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 504 /50x.html;
location = /50x.html {
}
}
}
server {
listen 443 default ssl;
server_name new.domain.com;
access_log on;
ssl on;
ssl_certificate /etc/nginx/ssl/NEW.DOMAIN.COM.crt;
ssl_certificate_key /etc/nginx/ssl/new.domain.com.key;
location / {
client_max_body_size 4000M;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8080;
sub_filter_types text/javascript;
sub_filter_once off;
}
}
}
alfresco.conf
# Set proxy cache path
proxy_cache_path /var/cache/nginx/alfresco levels=1 keys_zone=alfrescocache:256m max_size=512m inactive=1440m;
# Alfresco Repository Tomcat instance
upstream alfresco {
server localhost:8080;
}
# Share Web client Tomcat instance
upstream share {
server localhost:8080;
}
# Default server config. Update server name.
server {
listen 80 ;
listen [::]:80 ;
server_name example.com www.example.com;
root /opt/alfresco/www;
index index.html index.htm;
# Redirect root requests to Share. Do not do this if you use AOS
# rewrite ^/$ /share;
# redirect server error pages to the static page /50x.html
#
error_page 502 503 504 /maintenance.html;
location = /maintenance.html {
root /opt/alfresco/www;
}
# Access to old Alfresco web client. Remove this location if not needed.
location /alfresco {
# Allow for large file uploads
client_max_body_size 4000M;
# Proxy all the requests to Tomcat
proxy_http_version 1.1;
#proxy_buffering off;
proxy_pass http://alfresco;
proxy_set_header Proxy "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Server $host;
}
location /share {
# Allow for large file uploads
client_max_body_size 4000M;
# Proxy all the requests to Tomcat
proxy_http_version 1.1;
#proxy_buffering off;
proxy_pass http://share;
proxy_set_header Proxy "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Server $host;
}
location /share/proxy/alfresco {
# This section is for allowing to rewrite 50x response to 401 on Ajax req.
# This forces Share to reload page, and thus display maintenance page
# Allow for large file uploads
client_max_body_size 4000M;
# Proxy all the requests to Tomcat
proxy_http_version 1.1;
#proxy_buffering off;
proxy_pass http://share;
proxy_set_header Proxy "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Server $host;
proxy_intercept_errors on;
error_page 502 503 504 =401 /maintenance.html;
}
location /share/res/ {
# Proxy all the requests to Tomcat
proxy_http_version 1.1;
proxy_pass http://share;
proxy_set_header Host $http_host;
proxy_set_header Proxy "";
proxy_cache alfrescocache;
proxy_cache_min_uses 1;
proxy_cache_valid 200 302 1440m;
proxy_cache_valid 404 1m;
proxy_cache_use_stale updating error timeout invalid_header http_500 http_502 http_503 http_504;
}
location / {
# Allow for large file uploads
client_max_body_size 4000M;
# Proxy all the requests to Tomcat
proxy_http_version 1.1;
#proxy_buffering off;
proxy_pass http://alfresco;
proxy_set_header Proxy "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Server $host;
}
location /downtime.js {
# Allow the maintenance page to pick up downtime.js script
root /opt/alfresco/www;
}
}
Redirection code:
server {
if ($host = new.domain.com) {
return 301 https://$host$request_uri;
}
listen 80;
listen [::]:80;
server_name new.domain.com;
return 404;
}
I have below folder structure in my app
/app/server - has content for api
/app/static - has images, js and css files in respective folder like (/static/images,/static/js... etc)
upstream pythonapp {
server 127.0.0.1:8080;
}
server {
listen 80;
server_name www.example.com;
error_log /home/LogFiles/docker/error.log warn;
root /app;
index index.html index.htm;
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires 1d;
}
location /static {
autoindex on;
alias /app/static/;
}
location / {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_pass http://pythonapp;
proxy_cache_bypass $http_upgrade;
}
}
Everything worked as expected but today I realized that we were using
http://xyz.abc/images/abc.gif instead of
http://xyz.abc/static/images/abc.gif
As there is no static in URL, my /static rule won't be honored anymore.
I have read articles online on different approaches but none of them had anything close to URL without /static in it. Anyone had this issue before and aware of a solution ?
NEW EDITS :
I have changed content to below as #shdaws mentioned. I had to change root in server level too to make it work.
server {
listen 80;
server_name www.example.com;
error_log /home/LogFiles/docker/error.log warn;
root /app/static;
index index.html index.htm;
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires 1d;
}
location ^~ /(audio|build|images|style) {
autoindex on;
root /app/static;
}
location / {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_pass http://pythonapp;
proxy_cache_bypass $http_upgrade;
}
}
Now it serves static content in /app/static/images folder for http://abc.xyz/images/test.jpg url. But http://abc.xyz/images/d1/test.jpg doesn't work.
Edit :
- Above configuration works, it does serve sub-folder content too. Looks like I have some config issues with .mp3 files.
Change your static location to be images assuming abc.gif is located at /app/static/abc.gif:
# Serve /images/file.ext from /app/static/file.ext
location ^~ /images {
autoindex on;
alias /app/static;
}
If you're trying to serve the file from /app/static/images/abc.gif use root instead of alias, this will append the URL to the root:
# Serve /images/file.ext from /app/static/images/file.ext
location ^~ /images {
autoindex on;
root /app/static;
}
You can also use regex in the location for all directories you want:
location ^~ /(images|js|css) {
Thanks #sjdaws for helping with this issue. Below config works perfectly for my scenario.
worker_processes auto;
pid /var/run/nginx.pid;
daemon off;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /home/LogFiles/docker/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
gzip on;
upstream pythonapp {
server 127.0.0.1:8080;
}
server {
listen 80;
server_name www.example.com;
error_log /home/LogFiles/docker/error.log warn;
root /app/static;
index index.html index.htm;
location ~* \.(js|css|png|jpg|jpeg|gif|ico|mp3|html)$ {
expires 1d;
}
location / {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_pass http://pythonapp;
proxy_cache_bypass $http_upgrade;
}
}
}
I have one site in openshift rhc ,So i want to make one reserve Proxy with nginx software in Openshift Rhc servers, which from this server have access to many other server with nginx so i configured my nginx server by this kind of configuration :
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
error_log {{OPENSHIFT_HOMEDIR}}/app-root/logs/nginx_error.log debug;
pid {{NGINX_DIR}}/logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
#access_log $OPENSHIFT_DIY_LOG_DIR/access.log main;
port_in_redirect off;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 165;
gzip on;
upstream frontends {
#server pr4ss.tk;
#server 222.66.115.233:80 weight=1;
server {{OPENSHIFT_INTERNAL_IP}}:8081 ;
}
upstream frontends2 {
server google.com;
#server 222.66.115.233:80 weight=1;
#server {{OPENSHIFT_INTERNAL_IP}}:8081 ;
}
upstream index {
server free-papers.elasa.ir weight=1;
server diy4tornado-tornado4ss.rhcloud.com weight=2;
}
upstream comment {
server vb2-fishsmarkets.rhcloud.com;
#server community.elasa.ir;
}
server {
listen {{OPENSHIFT_INTERNAL_IP}}:{{OPENSHIFT_INTERNAL_PORT}};
server_name {{OPENSHIFT_GEAR_DNS}} www.{{OPENSHIFT_GEAR_DNS}};
root {{OPENSHIFT_REPO_DIR}};
set_real_ip_from {{OPENSHIFT_INTERNAL_IP}};
real_ip_header X-Forwarded-For;
#charset koi8-r;
#access_log logs/host.access.log main;
location /main {
root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
try_files $uri $uri/ =404;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
}
location ~* ^/(.*) {
#proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
#proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ http://diy-elasa2.rhcloud.com/;
#proxy_pass http://comment/$1$is_args$args;
proxy_pass http://index/$1$is_args$args;
}
location ^~ /admincp {
if (!-f $request_filename) {
rewrite ^/admincp/(.*)$ /index.php?routestring=admincp/$1 last;
}
proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ http://diy-elasa2.rhcloud.com/;
proxy_pass http://comment/$1$is_args$args;
}
location /www {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends;
}
location /categories {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends2;
}
location /index {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
# an HTTP header important enough to have its own Wikipedia entry:
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# enable this if you forward HTTPS traffic to unicorn,
# this helps Rack set the proper URL scheme for doing redirects:
# proxy_set_header X-Forwarded-Proto $scheme;
# pass the Host: header from the client right along so redirects
# can be set properly within the Rack application
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
# set "proxy_buffering off" *only* for Rainbows! when doing
# Comet/long-poll/streaming. It's also safe to set if you're using
# only serving fast clients with Unicorn + nginx, but not slow
# clients. You normally want nginx to buffer responses to slow
# clients, even with Rails 3.1 streaming because otherwise a slow
# client can become a bottleneck of Unicorn.
#
# The Rack application may also set "X-Accel-Buffering (yes|no)"
# in the response headers do disable/enable buffering on a
# per-response basis.
# proxy_buffering off;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 10;
proxy_send_timeout 5;
proxy_read_timeout 3600;
proxy_buffer_size 4k;
proxy_buffers 4 132k;
proxy_busy_buffers_size 264k;
proxy_temp_file_write_size 164k;
proxy_pass http://index;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
location ~ \.php$ {
root html;
fastcgi_pass {{OPENSHIFT_INTERNAL_IP}}:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
include fastcgi_params;
}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443;
# server_name localhost;
# ssl on;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_timeout 5m;
# ssl_protocols SSLv2 SSLv3 TLSv1;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
I used Upstream index:
upstream index {
server free-papers.elasa.ir weight=1;
server diy4tornado-tornado4ss.rhcloud.com weight=2;
}
but when i use this configuration ( with cloudflare or rhc server) i get this error from cloudflare:
Direct IP access not allowed cloudflare
So what is proper configuration for nginx or Apache server software for this kind of servers.
Thanks a lot for your attentions.
i found the answer:
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain application/xml application/javascript text/javas$
gzip_disable "msie6";
gzip_http_version 1.1;
upstream comment {
#server vb-fishsmarkets.rhcloud.com;
#server vb.elasa.ir;
#server vb-elasa3.rhcloud.com ;
#server vb2-fishsmarkets.rhcloud.com;
#server forums.fishsmarket.tk;
#server community.elasa.ir;
#server free-papers.elasa.ir;
#server diy4tornado-tornado4ss.rhcloud.com weight=1;
server free-papers.elasa.ir weight=1;
}
limit_req_zone $binary_remote_addr zone=one:10m rate=30r/m;
limit_req_zone $binary_remote_addr zone=one2:10m rate=1r/m;
limit_req_zone $http_x_forwarded_for zone=one3:10m rate=1r/m;
proxy_cache_path /tmp levels=1:2 keys_zone=RUBYGEMS:10m
inactive=24h max_size=1g;
server {
listen 127.6.145.1:8080;
server_name diy-elasa2.rhcloud.com community.elasa.ir ;
#charset koi8-r;
and :
location ~* ^/(.*) {
#root html;
#index index.html index.htm;
#proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
proxy_set_header Host free-papers.elasa.ir;
#proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ h$
#proxy_redirect http://fm.elasa.ir/ http://community.e$
proxy_pass http://comment/$1$is_args$args;
proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header X-Scheme $scheme;
sub_filter 'http://fm.elasa.ir/' 'http://community.el$
sub_filter_once on;
proxy_set_header X-outside-url $scheme://$host;
#proxy_set_header X-Real-IP $remote_addr;
proxy_cache RUBYGEMS;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout invalid_header upd$
http_500 http_502 http_503 http_504;
proxy_http_version 1.1;
proxy_cache RUBYGEMS;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout invalid_header upd$
http_500 http_502 http_503 http_504;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_cache_bypass $http_upgrade;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded$
client_max_body_size 100M;
client_body_buffer_size 1m;
proxy_intercept_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 256 16k;
proxy_busy_buffers_size 256k;
proxy_temp_file_write_size 256k;
#proxy_max_temp_file_size 0;
proxy_read_timeout 300;
}
Important things is proxy header Host:
proxy_set_header Host free-papers.elasa.ir;