I hope somebody can help me to solve/understand this issue as I am totally stuck.
I am a ec2 instance running nginx. This ec2 instance is behind a load balancer.
My nginx configuration, is to reverse the traffic to an external service hosted on azure.
But for demonstration purpose lets assume this external service is example.com
all my infra is working, and my nginx configuration files is like this:
# This is the new file
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
server_names_hash_bucket_size 128;
server {
listen 80; ## listen for ipv4; this line is default and implied
root /usr/share/nginx/html;
index index.html;
server_tokens off; # disable the Server nginx header
# enable gzip
gzip on;
gzip_disable "msie6";
gzip_comp_level 6;
gzip_min_length 1100;
gzip_buffers 16 8k;
gzip_proxied any;
gzip_types
text/plain
text/css
text/js
text/xml
text/javascript
application/javascript
application/x-javascript
application/json
application/xml
application/rss+xml
image/svg+xml;
location / {
# try_files $uri /index.html; # redirect all request to index.html
proxy_pass https://example.com;
}
}
server {
listen 443; ## listen for ipv4; this line is default and implied
root /usr/share/nginx/html;
index index.html;
server_tokens off; # disable the Server nginx header
# enable gzip
gzip on;
gzip_disable "msie6";
gzip_comp_level 6;
gzip_min_length 1100;
gzip_buffers 16 8k;
gzip_proxied any;
gzip_types
text/plain
text/css
text/js
text/xml
text/javascript
application/javascript
application/x-javascript
application/json
application/xml
application/rss+xml
image/svg+xml;
location / {
# try_files $uri /index.html; # redirect all request to index.html
proxy_pass https://example.com;
}
}
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
With this configuration everything works just fine. But if I remove the server on port 80 and keep only the 443 and reload nginx. On the browser I get the error Bad Gateway
Is the server 80 required for nginx? I checked all the documentation and couldn't find anything.
Please can anyone enlighten me on this issue or just point me on the right path? thank you so much for any help you can provide or hint.
And please if my question is not 100% clear, don't hesitate to ask for details
Related
On my server, when downloading a larger file from there through curl, sometimes the speed drops to 200 kilobytes per second. Why is that? If you download through the Chrome browser, in most cases the speed is very high
'''
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 1024;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nodelay on;
tcp_nopush on;
types_hash_max_size 2048;
directio 10m;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
'''
here is my nginx.conf file. there is no server block.
how to change nginx root? I want to change the root to home/workspace/www
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
there is no server block in http... how can I change the root? if I Add server block like this:
server{
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /home/workspace/www
}
it will show error:
nginx: [emerg] a duplicate default server for 0.0.0.0:80 in /etc/nginx/sites-enabled/default:22
I developed an application using Django(rest-api) and reactJs with webpacker. It is working in local. Now I need to deploy it in an nginx server.
I have ssh access to nginx server(lets say server name: vardhan.com and username: vishnu). Can someone help with how to do it.
I am following this tutorial. https://tonyteaches.tech/django-nginx-uwsgi-tutorial/
But I got few questions.
There is already a survey application running in the same server and it can be accessed with url vardhan.com/survey. is website address(url) same as server name ?
How to host two applications in the same sever ?
This is .conf of survey app
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
server {
listen 443 ssl;
server_name vardhan.com;
# SSL parameters
ssl_certificate /etc/ssl/certs/asurc.pem;
ssl_certificate_key /etc/ssl/certs/private.key;
location /survey {
#proxy_pass http://localhost:3000/;
proxy_pass http://localhost:3010/;
}
location /survey {
#proxy_pass http://localhost:3000/survey;
proxy_pass http://localhost:3010/survey;
}
location /result {
#proxy_pass http://localhost:3000/result;
proxy_pass http://localhost:3010/result;
}
location /feedback {
proxy_pass http://localhost:3010/feedback;
}
location /public/stylesheets/ {
autoindex on;
#alias /home/*****/AppSurvey/public/stylesheets/;
alias /home/*****/AppSurvey/public/stylesheets/;
}
location /public/javascripts/ {
autoindex on;
#alias /home/*****/AppSurvey/public/javascripts/;
alias /home/*****/AppSurvey/public/javascripts/;
#proxy_pass http://localhost:3000/public/javascripts/index.js;
proxy_pass http://localhost:3010/public/javascripts/index.js;
}
}
server {
listen 80;
server_name vardhan.com;
return 302 https://$server_name$request_uri;
}
}
I am using nginx as a proxy server to serve two web apps on a single server that are running on separate ports (for local development purposes). Below is the full nginx.conf file:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
server {
server_name news.mysite;
location / {
proxy_pass http://localhost:3001;
}
}
server {
server_name blog.mysite;
location / {
proxy_pass http://localhost:3002;
}
}
# include /etc/nginx/conf.d/*.conf;
# include /etc/nginx/sites-enabled/*;
}
When accessing the subdomains from the browser I see the expected content based on the web apps that are running. However, when I access the main domain through the browser (http://mysite), it displays the content from the first proxypass (news.mysite # localhost:3001). I would have expected one of the following two scenarios:
Serve content from the default directory # /var/www/html
The typical "This site can’t be reached" error in the browser.
Why is nginx proxying the first proxypass it finds by default and how can I change it?
The first server that nginx encounters for a socket will be considered the default unless you create another one that you explicitly mark as the default.
So for your case, you would want to add an additional server block as a catch all:
server {
listen 80 default_server;
root /var/www/html;
}
I have docker and nginx version: nginx/1.10.0 (Ubuntu 16.04)
my nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 1024;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
include /etc/nginx/tcpconf.d/*;
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
this is default nginx.conf, and I added include /etc/nginx/tcpconf.d/*;
tcpconf.d contains 1 file:
stream {
upstream docker{
server localhost:8182;
server localhost:8183;
}
server {
listen 443;
proxy_pass docker;
}
}
So basically i have glassfish 4 server on docker, and when i start container on port 8182 i want to nginx balance request to port 8183(if 8182 not responding) and in reverse order.
And this is works perfectly, except one thing, when i start container, glassfish server is starting and web application on this server is starting too. Glassfish starts after 1-5 seconds and web application after 30 sec - 1 min, so when glassfish is up(for example on port 8182) nginx send request to this port and i getting 404, because glassfish is up, but web application is not, in this case I want to be redirected on port 8183 because 404 is not what i want to see)
so my question is how to tell nginx to not showing me 404 and try to request another port?
Is there any reason you are using the stream module for this? If it is a regular http server NGINX is proxying to then use regular http{} and proxy_next_upstream to define behavior on 404 error:
http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream