How to restrict access to a site by IP through nGinx? - nginx

I have a website that can be accessed by entering the IP address. I want to make it accessible only through the domain. There is little suitable material on the Internet, there is no good explanation of what to replace in the ode of the nginx.conf file.
In my file already has 2 sections named server.
server {
listen 80;
server_name avoe.com;
rewrite ^ https://avoe.com$request_uri? permanent;
}
server {
listen 443 ssl;
server_name avoe.com;
ssl_certificate /etc/ssl/__reksoft_ru.crt;
ssl_certificate_key /etc/ssl/private.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
add_header X-Frame-Options "SAMEORIGIN";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
client_body_buffer_size 8k;
client_max_body_size 20m;
client_body_in_single_buffer on;
client_header_buffer_size 1m;
large_client_header_buffers 4 8k;
location /Intra/api/thumbor/ {
proxy_pass http://thumbor/;
}
location /solr {
proxy_pass http://solr;
}
location /minio {
proxy_pass http://minio;
}
location /activemq {
proxy_pass http://activemq;
}
location / {
proxy_pass http://wildfly/;
proxy_buffer_size 16k;
proxy_buffers 16 16k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_read_timeout 180s;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
if ($request_method !~ ^(GET|HEAD|POST|DELETE|PUT)$ ) {
return 405;
}
}
What to add or replace where so that access is ONLY by domain?

You could ensure the HTTP Host header is set to avoe.com like this:
if ($http_host != 'avoe.com') {
return 301 https://avoe.com$request_uri;
}

use this config as the server that listens on port 80:
server {
listen 80;
server_name avoe.com default_server;
if ($host = avoe.com) {
return 301 https://$host$request_uri;
}
return 404;
}

Related

Reverse proxy to port 8069 on Engintron issues while it works on standard NGINX setup

I have an Odoo app running on port 8069, and while this setup worked fine in my old server, my new server is using Engintron which seems to have a different method of working with vhosts. The standout issue is that under common_http.conf, this line becomes a duplicate of the vhost needed to run the app but is included in the automatically generated config that gets overridden whenever a new cpanel account is created, deleted, or when Engintron is updated.
What would be the correct way of setting this up properly within Engintron?
common_http.conf
location / {
try_files $uri $uri/ #backend;
}
# This location / ends up getting included in the custom
# vhost which is needed for all of the sites except this Odoo app.
custom_vhost.com.conf
upstream example{
server 127.0.0.1:8069 weight=1 fail_timeout=0;
}
upstream example-chat {
server 127.0.0.1:8072 weight=1 fail_timeout=0;
}
server {
listen [::]:80;
server_name delegates.example.com;
return 301 https://delegates.example.com$request_uri;
}
server {
listen [::]:80;
server_name vendors.example.com;
return 301 https://vendors.example.com$request_uri;
}
server {
listen [::]:80;
server_name example.com;
return 301 https://example.com;
}
server {
listen [::]:80;
server_name *.example.com;
return 301 https://example.com;
}
server {
listen [::]:443 ssl;
server_name pgadmin.example.com;
# well-known_start
location ^~ /.well-known {
add_header Host-Header 192fc2e7e50945beb8231a492d6a8024;
root /home/example/public_html;
}
# well-known_end
ssl_certificate /var/cpanel/ssl/apache_tls/*.example.com/combined;
ssl_certificate_key /var/cpanel/ssl/apache_tls/*.example.com/combined;
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains";
add_header X-Content-Type-Options nosniff;
add_header Cache-Control public;
location / {
deny all;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://127.0.0.1:5050;
}
}
server {
listen [::]:443 ssl;
server_name example.com www.example.com;
return 301 https://example.com;
}
server {
listen [::]:443 ssl http2;
server_name vendors.example.com delegates.example.com;
client_max_body_size 200m;
proxy_read_timeout 720s;
proxy_connect_timeout 720s;
proxy_send_timeout 720s;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-NginX-Proxy true;
#proxy_set_header X-Odoo-dbfilter ^%d\Z;
proxy_redirect off;
proxy_buffering off;
# well-known_start
location ^~ /.well-known {
add_header Host-Header 192fc2e7e50945beb8231a492d6a8024;
root /home/example/public_html;
}
# well-known_end
ssl_certificate /var/cpanel/ssl/apache_tls/*.example.com/combined;
ssl_certificate_key /var/cpanel/ssl/apache_tls/*.example.com/combined;
access_log /var/log/nginx/odoo.access.log;
error_log /var/log/nginx/odoo.error.log;
# adds gzip options
gzip on;
gzip_types text/css text/plain text/xml application/xml application/javascript application/x-javascript text/javascript application/json text/x-json;
gzip_proxied no-store no-cache private expired auth;
#gzip_min_length 1000;
gzip_disable "MSIE [1-6]\.";
location /longpolling {
proxy_pass http://example-chat;
}
location ~* /web/static/ {
gzip_static on;
proxy_cache_valid 200 90m;
proxy_buffering on;
expires 864000;
add_header Cache-Control public;
proxy_pass http://example;
}
location / {
error_page 403 = https://example.com;
proxy_pass http://example;
proxy_redirect off;
gzip_static on;
}
# The above location becomes a duplicate of the previous default location - which in turn fails the validity of the configuration.
location ~* /web/content/ {
gzip_static on;
proxy_cache_valid 200 90m;
proxy_buffering on;
expires 864000;
add_header Cache-Control public;
proxy_pass http://example;
}
location /web/database/manager {
deny all;
error_page 403 https://example.com;
proxy_pass http://example;
}
}
Since the conf files are added in alphabetical order, and any conflicting or duplicate settings are ignored - I ended up changing the name of the file so that it's included before the other ones. Also made the file immutable with the following command:
chattr +ai 1_custom_vhost.com.conf
I'm quite sure this is not a graceful solution, but it does the job for now.

the redirect from http to https does not work in nginx

I am trying to redirect all the http traffic to https and my nginx conf looks like this:
upstream upstreamServer {
server upstream_serv:80;
}
server {
listen 80;
server_name ~^(([a-zA-Z0-9]+)|)test\.xy\.abc\.io$ ;
access_log /var/log/nginx/access.log backend;
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl;
server_name ~^(([a-zA-Z0-9]+)|)test\.xy\.abc\.io$ ;
ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
ssl_certificate /path/to/cert_chain.pem;
ssl_certificate_key /path/to/cert_key.pem;
ssl_trusted_certificate /path/to/cert_chain.pem;
access_log /var/log/nginx/access.log backend;
# Redirect all traffic in /.well-known/ to lets encrypt
location /.well-known/acme-challenge/ {
root /var/tmp;
index index.html index.htm;
}
location / {
proxy_pass http://upstreamServer;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_buffering off;
if ($uri ~* ".(js|png|jpg|jpeg|svg|gif|avi|mp3|mp4)$" ){
expires 1d;
add_header Cache-Control public;
}
proxy_pass_request_headers on;
}
}
But for some reason it doesn't work. I read about how the nginx chooses the server block and location block. The setup looks correct to me according to what I understand but still the site keeps loading on http when I hit the url http://test.xy.abc.io instead of redirecting me to https.
I also tried using only
return 301 https://$host$request_uri;
instead of
location / {
return 301 https://$host$request_uri;
}
but it doesn't work either.
Did I get right that your page is still loading the unencrypted http version? Did you reaload the service to load the changed config file? (sorry to ask that stupid question back)
nginx -t && nginx -s reload
I personally use in all nginx instances I maintain something like this:
server {
listen 80 default_server;
# no server_name means all
# For let's encrypt domains: .well-known/acme-challenge
location '/.well-known/acme-challenge' {
default_type "text/plain";
root /var/www/certbot;
}
# Redirect http -> https.
location / {
return 301 https://$host$request_uri$is_args$args;
}
}
The problem was there is a GCP loadbalancer before my nginx proxy. Which was forwarding all the requests on https to my nginx proxy no matter if the orignal reuquest was http or https. After searching the internet I found that loadbalancer can not force https on clients. So this what I had to do in my nginx location block.
if ($http_x_forwarded_proto = http) {
return 301 https://$host$request_uri;
}
and the complete solution looks like this:
server {
listen 80;
listen 443 ssl;
server_name ~^(([a-zA-Z0-9]+)|)test\.xy\.abc\.io$ ;
ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
ssl_certificate /path/to/cert_chain.pem;
ssl_certificate_key /path/to/cert_key.pem;
ssl_trusted_certificate /path/to/cert_chain.pem;
access_log /var/log/nginx/access.log backend;
# Redirect all traffic in /.well-known/ to lets encrypt
location /.well-known/acme-challenge/ {
root /var/tmp;
index index.html index.htm;
}
location / {
if ($http_x_forwarded_proto = http) {
return 301 https://$host$request_uri;
}
proxy_pass http://upstreamServer;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_buffering off;
if ($uri ~* ".(js|png|jpg|jpeg|svg|gif|avi|mp3|mp4)$" ){
expires 1d;
add_header Cache-Control public;
}
proxy_pass_request_headers on;
}
}

Nginx config static website show 404 Not found

I'm config a static website in nginx,but show 404 notfound on browser
nginx version: nginx/1.10.3 (Ubuntu)
nginx configuration:
upstream client {
server 127.0.0.1:8080;
}
upstream admin {
server 127.0.0.1:8090;
}
server {
listen 443;
server_name mp.example.com;
ssl on;
ssl_certificate /etc/nginx/conf.d/certificate/mp.example.com/1_mp.example.com_bundle.crt;
ssl_certificate_key /etc/nginx/conf.d/certificate/mp.example.com/2_mp.example.com.key;
ssl_session_timeout 4m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:HIGH:!aNULL:!MD5:!RC4:!DHE;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://client;
#Proxy Settings
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
location /admin {
proxy_pass http://admin;
#Proxy Settings
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
location /zp {
root /root/admin_ui/1.1.0;
index index.html;
}
location ^~ /assets/ {
expires 90d;
alias /root/www/assets/;
}
}
server{
listen 80;
server_name mp.example.com;
location / {
rewrite ^(.*) https://$host$1 permanent;
}
}
Broswer screehshots:
my static website path:
I want input https://xx.xxxx.com/zp show my website
I think https configuration's problem, but i not sure

nginx internal reverse proxy

I want the following scenario
Client makes browser request to http://my-domain.com
Nginx A intercepts that request which then forwards it to Nginx B which is hosting my website
I have the current configuration but i am getting ERR_TOO_MANY_REDIRECTS
Nginx A (landing host proxy)
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
server_tokens off;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
ssl_certificate /my/ssl/my-domain.com.crt;
ssl_certificate_key /my/ssl/my-domain.com.key;
ssl_dhparam /my/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
ssl_ciphers
'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK';
return 404;
}
upstream client_proxy {
server my_internal_server:80;
}
server {
server_name my-domain.com;
listen 443 ssl;
ssl_certificate /my/ssl/my-domain.com.crt;
ssl_certificate_key /my/ssl/my-domain.com.key;
ssl_dhparam /my/ssl/dhparam.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK';
location / {
rewrite ^ http://my-domain.com/;
index index.html index.htm;
charset utf-8;
auth_basic off;
allow all;
proxy_pass http://client_proxy/;
proxy_ignore_headers Set-Cookie Cache-Control Expires;
proxy_hide_header "Set-Cookie";
proxy_redirect off;
proxy_set_header Host my-domain.com;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_For;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_connect_timeout 90s;
proxy_send_timeout 90s;
proxy_read_timeout 90s;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
send_timeout 90s;
}
}
Nginx B (Web server)
server {
listen 80;
root /my/www;
index index.html;
try_files $uri $uri/ /index.html;
# ... other location blocks which are irrelevant here
}
As stated by Richard Smith in the comment,
remove the rewrite rule and it fixed it

How to DRY nginx configuration

I have nginx config for the current and legacy application where the only difference between the two blocks is DNS-specific entries and root path. How can I put specific parts of the config in a variable or something and then call that variable in both server config blocks?
server {
listen 0.0.0.0:443 ssl;
server_name mysite.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers RC4:HIGH:!aNULL:!MD5;
ssl_session_cache shared:SSL:15m;
ssl_session_timeout 15m;
root /home/deployer/apps/myapp/current/public;
if ($request_method !~ ^(GET|HEAD|POST)$ ) {
return 444;
}
if ($http_user_agent ~* LWP::Simple|BBBike|wget) {
return 403;
}
if ($http_user_agent ~* (spider|AcoiRobot|msnbot|scrapbot|catall|wget) ) {
return 403;
}
location ^~ /assets/ {
gzip_static on;
gzip_vary on;
expires max;
add_header Cache-Control public;
}
location ~ \.(gif|png|jpe?g|JPE?G|GIF|PNG {
valid_referers none blocked mysite.com *.mysite.com;
if ($invalid_referer) {
return 403;
}
}
location /evil/ {
valid_referers none blocked mysite.com *.mysite.com;
if ($invalid_referer) {
return 403;
}
}
try_files $uri/index.html $uri #puma;
location #puma {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://puma;
}
error_page 500 502 503 504 /500.html;
client_max_body_size 4G;
keepalive_timeout 10;
}
How can I DRY up everything below the root line?
Time has proven Alexey Ten's comment about using include to be the right way to go.
We use this in production:
File structure in /etc/nginx
nginx.conf
sites-enabled/app_config
modules/shared_serve
modules/shared_ssl_settings
In /etc/nginx/sites-enabled/app_config:
upstream puma {
server unix:/tmp/puma.socket fail_timeout=1;
}
server {
server_name example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
include modules/shared_ssl_settings;
include modules/shared_serve;
}
In /etc/nginx/modules/shared_ssl_settings:
listen 443 ssl;
listen [::]:443;
ssl on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers On;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:30m;
ssl_stapling on;
ssl_stapling_verify on;
add_header Strict-Transport-Security max-age=15768000;
In /etc/nginx/modules/shared_serve:
location ~ \.(php|aspx|asp|myadmin)$ { return 444; log_not_found off; }
root /home/deployer/apps/example_app/current/public;
try_files $uri/index.html $uri #puma;
location #puma {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://puma;
}
error_page 500 502 503 504 /500.html;
client_max_body_size 4G;
keepalive_timeout 10;
The only gotcha is that your deploy script has to ensure the file structure in /etc/nginx. Naturally, you can name your module directory anything else. You might even keep the includable files right in /etc/nginx without a subdirectory.
You could use a map to define which app root to use based on $host:
map $host $app_root {
default /home/deployer/apps/myapp/current/public;
legacy.mysite.lv /home/deployer/apps/myapp/legacy/public;
}
Add another server_name directive to match your legacy app (use the same name in the map). Then use the variable in your root directive:
root $app_root;

Resources