How to serve part of a file with Nginx? - nginx

I'm using X-Accel to serve protected content, using X-Accel-Redirect.
Is it possible to serve only a part of the file? for example, bytes range 0-x, or first 5 minutes of a video (my final goal)
It's important to do that on the server-side, so the client will not have access to the rest of the file.
Currently this is how I send the whole file:
X-Accel-Cache-Control: no-store, no-cache, must-revalidate
Pragma: no-cache
Content-Type: application/octet-stream
Content-Length: {file_size}
Content-Disposition: attachment; filename="myfile.mp4"
Accept-Ranges: bytes
X-Accel-Buffering: yes
X-Accel-Redirect: /protected/myfile.mp4
Nginx conf:
location /protected {
internal;
alias /dir/of/protected/files/;
if_modified_since off;
output_buffers 2 1m;
open_file_cache max=50000 inactive=10m;
open_file_cache_valid 15m;
open_file_cache_min_uses 1;
open_file_cache_errors off;
}

The massive hack would be to use nginx to proxy to itself with a Range header that would limit the request to a range of bytes
something like this (not tested so this probably wont work, but the idea should work):
{
... snip config ...
server {
listen 80 default_server;
listen [::]:80 default_server;
root /html;
index index.html;
location / {
proxy_pass http://localhost/content;
add_header Range btyes=0,100000;
}
location /content {
try_files $uri $uri/ =404;
}
}
}

I haven't tested Slice and X-Accel together. If each file can have a different limit defined by the backend you might configure Slice in the location and send the limit with the X-Accel-Redirect URL as below:
X-Accel-Cache-Control: no-store, no-cache, must-revalidate
Pragma: no-cache
Content-Type: application/octet-stream
Content-Length: {file_size}
Content-Disposition: attachment; filename="myfile.mp4"
Accept-Ranges: bytes
X-Accel-Buffering: yes
X-Accel-Redirect: /protected/myfile.mp4?s=0&e=$PHP_VAR
Nginx.conf
location /protected {
slice; # enable slicing
slice_start_arg s;
slice_end_arg e;
internal;
alias /dir/of/protected/files/;
if_modified_since off;
output_buffers 2 1m;
open_file_cache max=50000 inactive=10m;
open_file_cache_valid 15m;
open_file_cache_min_uses 1;
open_file_cache_errors off;
}
A global file limit
You would need to redirect the original request including the Slice parameters to truncate the file being served.
Nginx conf:
location /sample {
slice; # enable slicing
slice_start_arg s;
slice_end_arg e;
internal;
alias /dir/of/protected/files/;
if_modified_since off;
output_buffers 2 1m;
open_file_cache max=50000 inactive=10m;
open_file_cache_valid 15m;
open_file_cache_min_uses 1;
open_file_cache_errors off;
}
location /protected {
rewrite ^ /sample&s=0&e=1024; # replace for the desired file limit in bytes
}
If the rewrite directive above doesn't work, I suggest the following option using proxy_pass.
location /protected {
set $file_limit 1024 # replace for the desired file limit in bytes
set $delimiter "";
if ($is_args) {
set $delimiter "&";
}
set $args $args${delimiter}s=0&e=$file_limit;
proxy_pass $scheme://127.0.0.1/sample;
}

Related

nginx Nextcloud config not working (ERR_TOO_MANY_REDIRECTS)

I wanted to switch from apache2 to nginx. Now I tried moving my nextcloud config to nginx (using following "template": https://docs.nextcloud.com/server/latest/admin_manual/installation/nginx.html) but I only get ERR_TOO_MANY_REDIRECTS (since it worked before switching to nginx, it should be a nginx configuration problem).
upstream php-handler {
server 127.0.0.1:9000;
#server unix:/var/run/php/php7.4-fpm.sock;
}
# Set the `immutable` cache control options only for assets with a cache busting `v` argument
map $arg_v $asset_immutable {
"" "";
default "immutable";
}
server {
listen 80;
listen [::]:80;
server_name cloud.link.com;
# Prevent nginx HTTP Server Detection
server_tokens off;
# Enforce HTTPS
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name cloud.link.com;
# Path to the root of your installation
root /var/www/cloud.link.com;
# Use Mozilla's guidelines for SSL/TLS settings
# https://mozilla.github.io/server-side-tls/ssl-config-generator/
ssl_certificate /root/cloudflare/file.pem;
ssl_certificate_key /root/cloudflare/file.key;
# Prevent nginx HTTP Server Detection
server_tokens off;
# HSTS settings
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload" always;
# set max upload size and increase upload timeout:
client_max_body_size 512M;
client_body_timeout 300s;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/wasm application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
# Pagespeed is not supported by Nextcloud, so if your server is built
# with the `ngx_pagespeed` module, uncomment this line to disable it.
#pagespeed off;
# The settings allows you to optimize the HTTP2 bandwitdth.
# See https://blog.cloudflare.com/delivering-http-2-upload-speed-improvements/
# for tunning hints
client_body_buffer_size 512k;
# HTTP response headers borrowed from Nextcloud `.htaccess`
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
# Specify how to handle directories -- specifying `/index.php$request_uri`
# here as the fallback means that Nginx always exhibits the desired behaviour
# when a client requests a path that corresponds to a directory that exists
# on the server. In particular, if that directory contains an index.php file,
# that file is correctly served; if it doesn't, then the request is passed to
# the front-end controller. This consistent behaviour means that we don't need
# to specify custom rules for certain paths (e.g. images and other assets,
# `/updater`, `/ocm-provider`, `/ocs-provider`), and thus
# `try_files $uri $uri/ /index.php$request_uri`
# always provides the desired behaviour.
index index.php index.html /index.php$request_uri;
# Rule borrowed from `.htaccess` to handle Microsoft DAV clients
location = / {
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Make a regex exception for `/.well-known` so that clients can still
# access it despite the existence of the regex rule
# `location ~ /(\.|autotest|...)` which would otherwise handle requests
# for `/.well-known`.
location ^~ /.well-known {
# The rules in this block are an adaptation of the rules
# in `.htaccess` that concern `/.well-known`.
location = /.well-known/carddav { return 301 /remote.php/dav/; }
location = /.well-known/caldav { return 301 /remote.php/dav/; }
location /.well-known/acme-challenge { try_files $uri $uri/ =404; }
location /.well-known/pki-validation { try_files $uri $uri/ =404; }
# Let Nextcloud's API for `/.well-known` URIs handle all other
# requests by passing them to the front-end controller.
return 301 /index.php$request_uri;
}
# Rules borrowed from `.htaccess` to hide certain paths from clients
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/) { return 404; }
location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { return 404; }
# Ensure this block, which passes PHP files to the PHP process, is above the blocks
# which handle static assets (as seen below). If this block is not declared first,
# then Nginx will encounter an infinite rewriting loop when it prepends `/index.php`
# to the URI, resulting in a HTTP 500 error response.
location ~ \.php(?:$|/) {
# Required for legacy support
rewrite ^/(?!index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+|.+\/richdocumentscode\/proxy) /index.php$request_uri;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
fastcgi_param HTTPS on;
fastcgi_param modHeadersAvailable true; # Avoid sending the security headers twice
fastcgi_param front_controller_active true; # Enable pretty urls
fastcgi_pass php-handler;
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
fastcgi_max_temp_file_size 0;
}
location ~ \.(?:css|js|svg|gif|png|jpg|ico|wasm|tflite|map)$ {
try_files $uri /index.php$request_uri;
add_header Cache-Control "public, max-age=15778463, $asset_immutable";
access_log off; # Optional: Don't log access to assets
location ~ \.wasm$ {
default_type application/wasm;
}
}
location ~ \.woff2?$ {
try_files $uri /index.php$request_uri;
expires 7d; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
# Rule borrowed from `.htaccess`
location /remote {
return 301 /remote.php$request_uri;
}
location / {
try_files $uri $uri/ /index.php$request_uri;
}
}
Does anyone have a solution for this problem?

NGINX (Reverse Proxy - Bad Gateway

I searched a lot and tried a lot but on this point I dont get it... so here is my question:
I have a newly setup Proxmox and I wanna run a nginx reverse proxy and some VMs behind it. It is the first time with nginx and reverse proxy for me. I only used Apache before and never a reverse proxy.
So my reverse proxy has basicly three files: headers.conf, ssl.conf and my.domain.com.conf.
In headers.conf is the following:
#
# Add headers to serve security related headers
#
# HSTS (ngx_http_headers_module is required)
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload;" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Robots-Tag none always;
add_header X-Download-Options noopen always;
add_header X-Permitted-Cross-Domain-Policies none always;
add_header Referrer-Policy no-referrer always;
add_header X-Frame-Options "SAMEORIGIN" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
#prox headers
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
Then there is the ssl.conf:
GNU nano 5.4 /etc/nginx/snippets/ssl.conf
#
# Configure SSL
#
# Diffie-Hellman parameter for DHE ciphersuites, recommended 4096 bits
ssl_dhparam /etc/nginx/dhparams/dhparams.pem;
# Not using TLSv1 will break:
# Android <= 4.4.40 IE <= 10 IE mobile <=10
# Removing TLSv1.1 breaks nothing else!
ssl_protocols TLSv1.2 TLSv1.3;
# SSL ciphers: RSA + ECDSA
# Two certificate types (ECDSA, RSA) are needed.
ssl_ciphers 'TLS-CHACHA20-POLY1305-SHA256:TLS-AES-256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-A>
# Use multiple curves.
ssl_ecdh_curve secp521r1:secp384r1;
# Server should determine the ciphers, not the client
ssl_prefer_server_ciphers on;
# SSL session handling
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# SSL stapling has to be done seperately, becuase it will not work with self signed certs
# OCSP Stapling fetch OCSP records from URL in ssl_certificate and cache them
ssl_stapling on;
ssl_stapling_verify on;
# DNS resolver
resolver 192.168.xxx.xx;
and the my.domain.com.conf:
server {
listen 80;
listen [::]:80;
server_name my.domain.com;
location ~ \.* {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name my.domain.com;
# SSL configuration
# RSA certificates
ssl_certificate /etc/letsencrypt/my.domain.com/rsa/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/my.domain.com/rsa/key.pem;
# ECC certificates
ssl_certificate /etc/letsencrypt/my.domain.com/ecc/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/my.domain.com/ecc/key.pem;
# This should be ca.pem (certificate with the additional intermediate certificate)
# See here: https://certbot.eff.org/docs/using.html
# ECC
ssl_trusted_certificate /etc/letsencrypt/my.domain.com/ecc/ca.pem;
# Include SSL configuration
include /etc/nginx/snippets/ssl.conf;
# Include headers
include /etc/nginx/snippets/headers.conf;
# Set the access log location
access_log /var/log/nginx/my.domain.access.log;
location ~ \.* {
proxy_pass http://192.168.xxx.xxx:80;
proxy_read_timeout 90;
proxy_redirect http://192.168.xxx.xxx:80 https://my.domain.com;
}
}
That is the reverse proxy side.
The VM has nginx as well and the following file, my.domain.com.conf:
upstream php-handler {
server unix:/run/php/php7.4-fpm.sock;
}
server {
listen 80;
listen [::]:80;
server_name my.domain.com;
#root /var/www;
# location / {
# return 301 https://$host$request_uri;
# }
# Path to the root of your installation
root /var/www/nextcloud;
# Specify how to handle directories -- specifying `/index.php$request_uri`
# here as the fallback means that Nginx always exhibits the desired behaviour
# when a client requests a path that corresponds to a directory that exists
# on the server. In particular, if that directory contains an index.php file,
# that file is correctly served; if it doesn't, then the request is passed to
# the front-end controller. This consistent behaviour means that we don't need
# to specify custom rules for certain paths (e.g. images and other assets,
# `/updater`, `/ocm-provider`, `/ocs-provider`), and thus
# `try_files $uri $uri/ /index.php$request_uri`
# always provides the desired behaviour.
index index.php index.html /index.php$request_uri;
# set max upload size and increase upload timeout:
client_max_body_size 512M;
client_body_timeout 300s;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
# Rule borrowed from `.htaccess` to handle Microsoft DAV clients
location = / {
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Make a regex exception for `/.well-known` so that clients can still
# access it despite the existence of the regex rule
# `location ~ /(\.|autotest|...)` which would otherwise handle requests
# for `/.well-known`.
location ^~ /.well-known {
# The rules in this block are an adaptation of the rules
# in `.htaccess` that concern `/.well-known`.
location = /.well-known/carddav { return 301 /remote.php/dav/; }
location = /.well-known/caldav { return 301 /remote.php/dav/; }
#location /.well-known/acme-challenge { try_files $uri $uri/ =404; }
#location /.well-known/pki-validation { try_files $uri $uri/ =404; }
# Let Nextcloud's API for `/.well-known` URIs handle all other
# requests by passing them to the front-end controller.
return 301 /index.php$request_uri;
}
# Rules borrowed from `.htaccess` to hide certain paths from clients
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/) { return 404; }
location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { return 404; }
# Ensure this block, which passes PHP files to the PHP process, is above the blocks
# which handle static assets (as seen below). If this block is not declared first,
# then Nginx will encounter an infinite rewriting loop when it prepends `/index.php`
# to the URI, resulting in a HTTP 500 error response.
location ~ \.php(?:$|/) {
# Required for legacy support
rewrite ^/(?!index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+|.+\/richdocumentscode\/proxy) /index.php$request_uri;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
fastcgi_param HTTPS on;
fastcgi_param modHeadersAvailable true; # Avoid sending the security headers twice
fastcgi_param front_controller_active true; # Enable pretty urls
fastcgi_pass php-handler;
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
fastcgi_read_timeout 600;
fastcgi_send_timeout 600;
fastcgi_connect_timeout 600;
fastcgi_param PHP_VALUE "upload_max_filesize = 10G
post_max_size = 10G
max_execution_time = 3600
output_buffering = off";
}
location ~ \.(?:css|js|svg|gif|png|jpg|ico|wasm|tflite)$ {
try_files $uri /index.php$request_uri;
expires 6M; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
location ~ \.wasm$ {
default_type application/wasm;
}
}
location ~ \.woff2?$ {
try_files $uri /index.php$request_uri;
expires 7d; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
# Rule borrowed from `.htaccess`
location /remote {
return 301 /remote.php$request_uri;
}
}
I did all that from reading different tutorials and the manuals itself. But I totally don't get my mistake.... is there anything obvies you can see? I feel very happy for any hint you guys have for me!
Thank you a lot and good christmas time :)
This is the output:
root#HL-Reverse-Proxy:/var/www# telnet 192.168.178.100 80
Trying 192.168.178.100...
Connected to 192.168.178.100.
Escape character is '^]'.
GET /
<html>
<head><title>502 Bad Gateway</title></head>
<body>
<center><h1>502 Bad Gateway</h1></center>
<hr><center>nginx/1.21.4</center>
</body>
</html>
Connection closed by foreign host.
So connection is possible.

Nginx microcaching wordpress, why is returning bypass?

recently I had to setup AWS ASG environment for Wordpress application. This wordpress is a very common one.
The environment in pre and pro is equal and also the behavior of both. My infrastructure have:
1 ASG
1 ALB
m3.medium EC2 instances
Memcached cluster
RDS MySQL
S3 for static content
Stack is a common Nginx / Php 5.6 FPM with microcaching enabled and W3TC to setup cache for database and other stuff.
My nginx.conf is like (I will hide the domain name)
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 65536;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
set_real_ip_from 172.16.0.0/16;
real_ip_header X-Forwarded-For;
log_format main '$host $remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent"';
access_log /var/log/nginx/access.log main;
server_names_hash_max_size 1024;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
server_tokens off;
keepalive_requests 100000;
keepalive_timeout 65 20;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_min_length 512;
gzip_buffers 256 8k;
gzip_comp_level 6;
gzip_proxied any;
gzip_types text/plain test/html text/xml text/css image/x-icon image/bmp application/atom+xml text/javascript application/x-javascript application/pdf application/postscript application/rtf application/vnd.ms-powerpoint application/msword application/vnd.ms-excel application/vnd.wap.xhtml+xml;
fastcgi_cache_path /var/run/cache/nginx levels=1:2 keys_zone=microcache:100m inactive=60m;
add_header X-Cache $upstream_cache_status;
#limit_req_zone $binary_remote_addr zone=one:10m rate=5r/m;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/conf.pre.d/*.conf;
}
An example of virtual host will be:
server{
listen 80;
server_name domain.org.dev;
return 301 https://www.domain.org.dev$request_uri;
}
server {
listen 80;
root /var/www/domain;
index index.html index.htm index index.php;
server_name www.domain.org;
error_page 503 #maintenance;
location #maintenance {
rewrite ^(.*)$ /503.html break;
}
if ($http_x_forwarded_proto != "https") {
rewrite ^(.*)$ https://$server_name$1 permanent;
}
location / {
if (-f $document_root/503.html) {
return 503;
}
try_files $uri/ $uri /index.php?q=$uri&$args;
port_in_redirect off;
add_header 'Access-Control-Allow-Origin' '*';
}
location ~ .php$ {
#include global/wp-security.conf;
include global/php.conf;
include global/wp-microcache.conf;
}
location ~ /.ht(access|passwd) {
deny all;
}
location ~ /.git {
deny all;
}
location ~ /.svn {
deny all;
}
include global/static-content.conf;
}
the most important part is the wp-microcache.conf. Honestly, I took a look on this website to setup my configuration because was very well explained (https://github.com/A5hleyRich/wordpress-nginx/blob/master/global/server/fastcgi-cache.conf)
fastcgi_cache_key "$scheme$request_method$host$request_uri";
fastcgi_cache_use_stale error timeout invalid_header http_500;
fastcgi_ignore_headers Cache-Control Expires Set-Cookie;
set $skip_cache 0;
if ($request_method = POST) {
set $skip_cache 1;
}
if ($query_string != "") {
set $skip_cache 1;
}
if ($request_uri ~* "/wp-admin/|/wp-json/|/xmlrpc.php|wp-.*.php|/feed/|index.php|sitemap(_index)?.xml") {
set $skip_cache 1;
}
if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_no_cache|wordpress_logged_in") {
set $skip_cache 1;
}
fastcgi_no_cache $skip_cache;
fastcgi_cache_bypass $skip_cache;
fastcgi_cache microcache;
fastcgi_cache_valid 60m;
So I ran BlazeMeter test on Wordpress, just GETS so I was expecting pretty success around HITs. However, after analyze results I noticed that many URLS from Worpress were BYPASSING the cache. Then I understood why I saw so many php-fpm process doing stuff in the EC2 meanwhile test was running.
Anyway, the point is, If I test the Wordpress home (no matter if by curl or navigator):
$ curl -I https://www.domain.org.dev
HTTP/1.1 200 OK
Date: Thu, 30 Nov 2017 14:29:19 GMT
Content-Type: text/html; charset=UTF-8
Connection: keep-alive
Server: nginx
Vary: Accept-Encoding
Link: <https://www.domain.org.dev/wp-json/>; rel="https://api.w.org/"
Link: <https://www.domain.org.dev/>; rel=shortlink
Last-Modified: Thu, 30 Nov 2017 13:53:05 GMT
Link: <https://www.domain.org.dev/wp-json/>; rel="https://api.w.org/"
Link: <https://www.domain.org.dev/>; rel=shortlink
X-Cache: HIT
Nevertheless, If I requests other url from the same Wordpress:
$ curl -I https://www.domain.org.dev/about-us
HTTP/1.1 301 Moved Permanently
Date: Thu, 30 Nov 2017 14:29:27 GMT
Content-Type: text/html; charset=UTF-8
Connection: keep-alive
Server: nginx
Last-Modified: Thu, 30 Nov 2017 14:18:58 GMT
Location: https://www.domain.org.dev/about-us/
X-Cache: BYPASS
Why this different behavior? What things can I inspect in order to fix this? I'm kind of lost here. I suspect there is nothing related with plugins or other stuff.
Any ideas guys?
Thanks!
This is most likely due to statement try_files $uri $uri/ /index.php?q=$uri&$args; which is taking a page path (other than home page) and converting it in a query string i.e. q=my-non-home-page. Then you have following block to bypass the query string caching,
if ($query_string != "") {
set $skip_cache 1;
}
I will suggest you update the statement to try_files $uri $uri/ /index.php?$args;
You can put debugging mechanism like following which will can help you to highlight the root cause of the problem.
set $cache_bypass_reason "NONE";
set $skip_cache 0;
# POST requests and URLs with a query string should always skip cache
if ($request_method = POST) {
set $skip_cache 1;
set $cache_bypass_reason "POST";
}
if ($query_string != "") {
set $skip_cache 1;
set $cache_bypass_reason "QUERY-STRING";
}
etc
Do any path rewrite if needed, i.e.
rewrite ^/sitemap(-+([a-zA-Z0-9_-]+))?\.xml$ "/index.php?xml_sitemap=params=$2" last;

nginx location header rewrite using proxy_redirect directive

Running nginx on windows as reverse proxy with the below nginx.conf
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
server {
listen 8082;
server_name localhost;
location / {
proxy_pass http://192.168.12.211:8082;
proxy_redirect http://192.168.12.211/ http://localhost:8080/;
proxy_set_header Host $host;
}
}
}
Here is the curl o/p.
c:\curl>curl -I http://localhost:8082
HTTP/1.1 303 See Other
Server: nginx/1.9.9
Date: Wed, 20 Jan 2016 10:30:38 GMT
Content-Type: text/html
Connection: keep-alive
Access-Control-Allow-Origin: *
location: http://192.168.12.211:8080/test.htm?Id=12345678
I want the "location" header received in the response to be rewritten as shown in the proxy_redirect directive in the nginx.conf file. Basically
location: http://192.168.12.211:8080/test.htm?Id=12345678
must be rewritten as
location: http://localhost:8080/test.htm?Id=12345678
What am I missing here in the nginx configuration? Any hints appreciated.

Rails 2 and Ngnix: https pages can't load css or js (but will load graphics)

I'm adding some https pages to my rails site. In order to test it locally, i'm running my site under one mongrel_rails instance (on 3000) and nginx.
I've managed to get my nginx config to the point where i can actually go to the https pages, and they load. Except, the javascript and css files all fail to load: looking in the Network tab in chrome web tools, i can see that it is trying to load them via an https url. Eg, one of the non-working file urls is
https://cmw-local.co.uk/stylesheets/cmw-logged-out.css?1383759216
I have these set up (or at least think i do) in my nginx config to redirect to the http versions of the static files. This seems to be working for graphics, but not for css and js files.
If i click on this in the Network tab, it takes me to the above url, which redirects to the http version. So, the redirect seems to be working in some sense, but not when they're loaded by an https page. Like i say, i thought i had this covered in the second try_files directive in my config below, but maybe not.
Can anyone see what i'm doing wrong? thanks, Max
Here's my nginx config - sorry it's a bit lengthy! I think the error is likely to be in the first (ssl) server block:
NOTE: the urls in here (elearning.dev, cmw-dev.co.uk, etc) are all just local host names, ie they're all just aliases for 127.0.0.1.
server {
listen 443 ssl;
keepalive_timeout 70;
ssl_certificate /home/max/work/charanga/elearn_container/elearn/config/nginx/certs/max-local-server.crt;
ssl_certificate_key /home/max/work/charanga/elearn_container/elearn/config/nginx/certs/max-local-server.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_protocols SSLv3 TLSv1;
ssl_ciphers RC4:HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
server_name elearning.dev cmw-dev.co.uk cmw-dev.com cmw-nginx.co.uk cmw-local.co.uk;
root /home/max/work/charanga/elearn_container/elearn;
# ensure that we serve css, js, other statics when requested
# as SSL, but if the files don't exist (i.e. any non /basket controller)
# then redirect to the non-https version
location / {
try_files $uri #non-ssl-redirect;
}
# securely serve everything under /basket (/basket/checkout etc)
# we need general too, because of the email/username checking
location ~ ^/(basket|general|cmw/account/check_username_availability) {
# make sure cached copies are revalidated once they're stale
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
# this serves Rails static files that exist without running
# other rewrite tests
try_files $uri #rails-ssl;
expires 1h;
}
location #non-ssl-redirect {
return 301 http://$host$request_uri;
}
location #rails-ssl {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_read_timeout 180;
proxy_next_upstream off;
proxy_pass http://127.0.0.1:3000;
expires 0d;
}
}
#upstream elrs {
# server 127.0.0.1:3000;
#}
server {
listen 80;
server_name elearning.dev cmw-dev.co.uk cmw-dev.com cmw-nginx.co.uk cmw-local.co.uk;
root /home/max/work/charanga/elearn_container/elearn;
access_log /home/max/work/charanga/elearn_container/elearn/log/access.log;
error_log /home/max/work/charanga/elearn_container/elearn/log/error.log debug;
client_max_body_size 50M;
index index.html index.htm;
# gzip html, css & javascript, but don't gzip javascript for pre-SP2 MSIE6 (i.e. those *without* SV1 in their user-agent string)
gzip on;
gzip_http_version 1.1;
gzip_vary on;
gzip_comp_level 6;
gzip_proxied any;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; #text/html
# make sure gzip does not lose large gzipped js or css files
# see http://blog.leetsoft.com/2007/7/25/nginx-gzip-ssl
gzip_buffers 16 8k;
# Disable gzip for certain browsers.
#gzip_disable "MSIE [1-6].(?!.*SV1)";
gzip_disable "MSIE [1-6]";
# blank gif like it's 1995
location = /images/blank.gif {
empty_gif;
}
# don't serve files beginning with dots
location ~ /\. { access_log off; log_not_found off; deny all; }
# we don't care if these are missing
location = /robots.txt { log_not_found off; }
location = /favicon.ico { log_not_found off; }
location ~ affiliate.xml { log_not_found off; }
location ~ copyright.xml { log_not_found off; }
# convert urls with multiple slashes to a single /
if ($request ~ /+ ) {
rewrite ^(/)+(.*) /$2 break;
}
# X-Accel-Redirect
# Don't tie up mongrels with serving the lesson zips or exes, let Nginx do it instead
location /zips {
internal;
root /var/www/apps/e_learning_resource/shared/assets;
}
location /tmp {
internal;
root /;
}
location /mnt{
root /;
}
# resource library thumbnails should be served as usual
location ~ ^/resource_library/.*/*thumbnail.jpg$ {
if (!-f $request_filename) {
rewrite ^(.*)$ /images/no-thumb.png
break;
}
expires 1m;
}
# don't make Rails generate the dynamic routes to the dcr and swf, we'll do it here
location ~ "lesson viewer.dcr" {
rewrite ^(.*)$ "/assets/players/lesson viewer.dcr" break;
}
# we need this rule so we don't serve the older lessonviewer when the rule below is matched
location = /assets/players/virgin_lesson_viewer/_cha5513/lessonViewer.swf {
rewrite ^(.*)$ /assets/players/virgin_lesson_viewer/_cha5513/lessonViewer.swf break;
}
location ~ v6lessonViewer.swf {
rewrite ^(.*)$ /assets/players/v6lessonViewer.swf break;
}
location ~ lessonViewer.swf {
rewrite ^(.*)$ /assets/players/lessonViewer.swf break;
}
location ~ lgn111.dat {
empty_gif;
}
# try to get autocomplete school names from memcache first, then
# fallback to rails when we can't
location /schools/autocomplete {
set $memcached_key $uri?q=$arg_q;
memcached_pass 127.0.0.1:11211;
default_type text/html;
error_page 404 =200 #rails; # 404 not really! Hand off to rails
}
location / {
# make sure cached copies are revalidated once they're stale
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
# this serves Rails static files that exist without running other rewrite tests
try_files $uri #rails;
expires 1h;
}
location #rails {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_read_timeout 180;
proxy_next_upstream off;
proxy_pass http://127.0.0.1:3000;
expires 0d;
}
}
EDIT: It just occurred to me that this might be better on superuser or serverfault, or perhaps both. I'm not sure what the cross-site posting rules are.

Resources