Nginx Caching for specific location with regular expression - nginx

I want to cache the content from specific location only. But when i am trying to use regular expression, it is not caching.
proxy_cache_path /AINginxService/nginx-1.16.1/cache/ levels=1:2 keys_zone=one:10m max_size=8g inactive=5d use_temp_path=off;
proxy_cache one;
location ~* /v1/mydata/studies/[0-9.]+/series/[0-9.]+/instances/[0-9.]+/rendered {
#rewrite http://([^/]+)/rendered break;
proxy_cache_valid 200 120h;
proxy_pass http://127.0.0.1:9000/v1/mydata;
proxy_set_header Host $host;
proxy_pass_request_headers on;
proxy_http_version 1.1;
proxy_set_header Origin "";
proxy_connect_timeout 1d;
proxy_send_timeout 1d;
proxy_read_timeout 1d;
send_timeout 1d;
}
# Rest api entry point
location /v1/mydata {
#proxy_cache_valid 200 120h;
proxy_pass http://127.0.0.1:9000/v1/mydata;
proxy_set_header Host $host;
proxy_pass_request_headers on;
proxy_http_version 1.1;
proxy_set_header Origin "";
proxy_connect_timeout 1d;
proxy_send_timeout 1d;
proxy_read_timeout 1d;
send_timeout 1d;
}
}
PS: If I uncomment proxy_cache_valid in /v1/mydata, it caches everything.
NOTE: Possible URL patterns
1. /v1/mydata/studies/{study_id}/ # Not to cahce
2. /v1/mydata/studies//series/{series_id}/ # Not to cache
3. /v1/mydata/studies//series/{series_id}/instances/{instance_id}/ # Not to cahce
4. /v1/mydata/studies//series/{series_id}/instances/{instance_id}/rendered # Cache this

Related

Scalling flask socket io with redis connecting to only one server port

I have real time chatapplication, unable to scale them.
so i have frontend ngnix which proxy pass to backend ngnix , now backend ngnix will upstream to flasksocket io multiple instance,
server {
listen 443 ssl;
#server_name xx.xxx.xx.xxx;
client_max_body_size 300M;
ssl_certificate /etc/nginx/ssl/newssl/ssl_bundle.crt;
ssl_certificate_key /etc/nginx/ssl/newssl/example.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers 'xxx+xxxx+xxxx';
set_real_ip_from 0.0.0.0/0;
real_ip_header X-Real-IP;
real_ip_recursive on;
add_header X-Frame-Options "SAMEORIGIN" ;
add_header Access-Control-Allow-Origin *;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_read_timeout 120s;
proxy_connect_timeout 120s;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 65s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 100 128k;
client_max_body_size 0M;
proxy_pass http://live_chatnodes;
backend ngnix
server {
listen 80;
server_name 10.0.2.9;
client_max_body_size 300M;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_read_timeout 120s;
proxy_connect_timeout 120s;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 65s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 100 128k;
client_max_body_size 0M;
proxy_pass http://live_nodes;
}
this now pass request to upstream and upstream has 15 nodes, how ever all the request are going to one flasksocket io. which is causing my system go down. and last info upstream has iphash as per flaskcoketio documentation.
all my clients are using vpn so they have static ip as well,

Nginx proxy_cache miss if URI has slash

My nginx location block is:
location ^~ /get/preview {
add_header X-Proxy-Cache $upstream_cache_status;
proxy_buffering on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_ignore_headers Cache-Control Set-Cookie;
proxy_ssl_protocols TLSv1.3;
proxy_ssl_session_reuse on;
proxy_cache upstream;
proxy_cache_key $scheme$host$uri$is_args$args;
proxy_cache_methods GET HEAD;
proxy_cache_min_uses 0;
proxy_cache_valid 200 301 302 1h;
proxy_cache_use_stale updating;
proxy_cache_background_update on;
proxy_cache_lock on;
proxy_pass https://tar.backend.com;
}
This will be a HIT after the 1st request:
https://example.com/get/preview?fileId=17389&x=256&y=256&a=true&v=5fe320ede1bb5
This is always a MISS:
https://example.com/get/preview.png?file=/zedje/118812514_3358890630894241_5001264763560347393_n.jpg&c=5fe3256d45a8c&x=150&y=150
You should check "Expires" header from your upstream. Documentation said "parameters of caching may be set in the header fields “Expires” or “Cache-Control”."
Another option - maybe you have another location for .(png|jpg|css|js)$ files with different options.

Why do I get 404 on nginx reverse proxy?

Below is my config and I'm getting 404 on all routes defined apart from the well-known route and I don't understand why.
If I make a request to http://example.tech/connect I get a 404 and if I make a request to http://api.example.tech I also get a 404.
I can't see where I've gone wrong as this looks like it should work!
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
#REMOVE REFERENCE TO FILES THAT HAVE "server" and "location" blocks in them so we can do it all in this file
#include /etc/nginx/conf.d/*.conf;
# issue with ip and the nginx proxy
real_ip_header X-Forwarded-For;
set_real_ip_from 0.0.0.0/0;
server {
listen 80;
listen [::]:80;
server_name example.tech;
location /.well-known/openid-configuration {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
location /connect {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
location /auth {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
}
server {
listen 80;
listen [::]:80;
server_name api.example.tech;
location /auth/ {
proxy_pass https://myapp.net;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
#proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffers 32 4k;
}
}
}
Needed a forward slash on the end of the proxy_pass for some reason
You need a specific uri in proxy_pass directive, not a backslash. But in your case here, a backslash is acting as the specific uri. Nginx replaces '/auth'(for example) with '/'(you've added).
In fact, the answer you put is right, turning proxy_pass http://myapp.net; to proxy_pass http://myapp.net/;.
The reason is that proxy_pass would work in two different ways with/without a specific uri. More details about this directive on nginx.org. Blow is some content quoted in that link.
If the proxy_pass directive is specified with a URI, then when a request is passed to the server, the part of a normalized request URI
matching the location is replaced by a URI specified in the directive:
location /name/ {
proxy_pass http://127.0.0.1/remote/;
}
If proxy_pass is specified without a URI, the request URI is passed to the server in the same form as sent by a client when the original
request is processed, or the full normalized request URI is passed
when processing the changed URI:
location /some/path/ {
proxy_pass http://127.0.0.1;
}
In your case, without URI in proxy_pass directive, so /auth would be passed to backend server. Unfortunately, your backend server does not have the /auth resource, so 404 is returned. If your backend server does have /auth to be processed, you would never get 404 error while requesting uri /auth.
Here are two examples, which hopefully clarify things.
location /some-path {
proxy_pass http://server:3000;
}
In this case the proxied server (target) must handle the route /some-path. If handling something else, like / only, it will return an error to Nginx.
One solution is to add a trailing / e.g.:
location /some-path {
proxy_pass http://server:3000/;
}
Now requests sent to /some-path can (and must) be handled by the route / on the proxied server side. However, this may cause issues with some servers. For example, with Express, curl localhost/some-path would be handled fine by Express, whereas curl localhost/some-path/ would cause Express to return Cannot GET //.
This might be different for your target server, but the principle is the same: if you specify the server only, the full path in location is passed to the server, so it must be handled accordingly.
This is my case how I've get 404 instead of 502:
# let's define some proxy pass
location ~ /.well-known/acme-challenge {
proxy_pass http://127.0.0.1:5080; # this backend doesn't exist which leads to 502
proxy_set_header Host $host;
}
# this is a default directives
error_page 500 502 503 504 /50x.html; # this is a reason to redirect 502 to 50x.html
location = /50x.html { # but this file doesn't exist in root so we get 404 instead of 502
root /usr/share/nginx/html;
}

Nginx - adding js css png jpg browser cache breaks the page

I am using Wordpress and when I add following, it breaks the page, it dont load any JS and CSS
server {
listen 80;
server_name domain.com;
client_max_body_size 20M;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://domainserver.com;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
error_page 404 =301 /;
# proxy_cache STATIC;
# proxy_cache_valid 200 1d;
# proxy_cache_bypass $cookie_nocache $arg_nocache$arg_comment;
# proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
}
location /ads/ {
proxy_pass http://domainserver.com;
proxy_cache STATIC;
proxy_cache_key "$proxy_host$uri$is_args$args";
proxy_cache_valid 30d;
proxy_cache_valid any 10s;
proxy_cache_lock on;
proxy_cache_use_stale error invalid_header timeout updating;
proxy_http_version 1.1;
expires 30d;
}
# location ~* \.(ico|css|js|gif|jpe?g|png)$ {
# expires 30d;
# add_header Pragma public;
# add_header Cache-Control "public";
# }
}
You can see the location block where I am trying to catch css|js etc

Cache some API requests in Nginx

I'm seeking advise from experts here.
We have the following scenario. We have a java application. Java app is running on tomcat7. tomcat7 acting as API server. User interface files ( Static html and css ) are served by nginx. Nginx is acting as reverse proxy here. All API request are passed to API server and rest are being server by nginx directly.
What we want is to implement cache mechanism here. That is means we want to enable cache for all but with few exception. We want to exclude some API requests from being cached.
Our configuration is like as shown below
server {
listen 443 ssl;
server_name ~^(?<subdomain>.+)\.ourdomain\.com$;
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
if ($request_method !~ ^(GET|HEAD|POST)$ )
{
return 405;
}
open_file_cache max=1000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
location / {
root /var/www/html/userUI;
location ~* \.(?:css|js)$ {
expires 1M;
access_log off;
add_header Pragma public;
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
}
}
location /server {
proxy_pass http://upstream/server;
proxy_set_header Host $subdomain.ourdomain.com;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_temp_path /var/nginx/proxy_temp;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_redirect off;
proxy_cache sd6;
add_header X-Proxy-Cache $upstream_cache_status;
proxy_cache_bypass $http_cache_control;
}
ssl on;
ssl_certificate /etc/nginx/ssl/ourdomain.com.bundle.crt;
ssl_certificate_key /etc/nginx/ssl/ourdomain.com.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
#ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
ssl_ciphers "EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EECDH EDH+aRSA HIGH !RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS";
ssl_dhparam /etc/nginx/ssl/dhparams.pem;
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_prefer_server_ciphers on;
ssl_session_timeout 24h;
keepalive_timeout 300;
As above, we use cache only for static files located in /var/www/html/userUI
We want to implement as such in location /server. This our api server. Means nginx passes api request to tomcat7 ( upstream ) server. We want to enable cache for specific API requests only but need to disable cache for rest of all requests.
We want to do the following
Exclude all json requests from cache and but need to enable cache for few.
Request url will be something like as shown below
Request URL:https://ourdomain.com/server/user/api/v7/userProfileImage/get?loginName=user1&_=1453442399073
What this url does is to get the Profile image. We want to enable cache for this specific url. So condition we would like to use is , if request url contains "/userProfileImage/get" we want to set cache and all other requests shouldn't cache.
To achieve this we changed the settings to following
location /server {
set $no_cache 0;
if ($request_uri ~* "/server/user/api/v7/userProfileImage/get*")
{
set $no_cache 1;
}
proxy_pass http://upstream/server;
proxy_set_header Host $subdomain.ourdomain.com;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_temp_path /var/nginx/proxy_temp;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_redirect off;
proxy_cache sd6;
add_header X-Proxy-Cache $upstream_cache_status;
proxy_no_cache $no_cache;
proxy_cache_bypass $no_cache;
}
Below are the results of http responses
General :
Request URL:https://ourdomain.com/server/common/api/v7/userProfileImage/get?loginName=user1
Request Method:GET
Status Code:200 OK
Remote Address:131.212.98.12:443
Response Headers :
Cache-Control:no-cache, no-store, must-revalidate
Connection:keep-alive
Content-Type:image/png;charset=UTF-8
Date:Fri, 22 Jan 2016 07:36:56 GMT
Expires:Thu, 01 Jan 1970 00:00:00 GMT
Pragma:no-cache
Server:nginx
Transfer-Encoding:chunked
X-Proxy-Cache:MISS
Please advise us a solution.

Resources