I'm trying to deploy the tomcat & Nginx server on a single AWS EC2 instance. I have 3 instances & on each instance, I wanted to deploy Nginx & Tomcat server. Below is my configuration file
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
}"
/etc/nginx/conf.d/application.conf
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name localhost;
root /var/lib/tomcat9/webapps/ROOT;
index deploy.html;
location /admin {
try_files $uri $uri/ /deploy.html;
}
location /admin/admin-portal {
alias /opt/tomcat/webapps/ROOT/;
rewrite /admin-portal/(.*) /$1 break;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:8080/;
}
location ~ \.css {
add_header Content-Type text/css;
}
location ~ \.js {
add_header Content-Type application/x-javascript;
}
My goal is, when I hit http://IP/ or HTTP://IP/admin then it should redirect to deploy.html and when I hit HTTP://IP/admin/admi-portal it should open tomcat server
NOTE: I got success in both conditions except when I hit HTTP://IP/admin/admi-portal then it is opening only HTML page and CSS/png/js files getting 404:not found error
/opt/tomcat/webapps/ROOT/ this is the file path for all tomcat static file CSS/js/png etc
Can anyone help me with this?
Try hitting the compete url of your EC2 instance
<instanceip>:8080/admin/admin-portal/
also,
you can add "/" in the end:-
location /admin/admin-portal/
then try hitting the url with
<instance-ip>:8080/admin/admin-portal
Now you don't need to add "/" at the end
Related
I made webpage using R(shiny) and deployed it on shiny-server. And tried to use NGINX to achieve multi-threaded sort of stuff. I found on some posts that NGINX can also help to achieve concurrency but I don't know how to do it. Could you please help me to do that.
In case I misunderstand the definition of concurrency, my desired result is that when different users accessed to the webpage and use some function at the same time, they don't need to wait in the queue and my server could handle those requests at the same time.
Below is the configuration:
`
user www-data;
worker_processes 4;
worker_rlimit_nofile 20960;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
use epoll;
worker_connections 1024;
accept_mutex on;
accept_mutex_delay 500ms;
multi_accept on;
}
http {
underscores_in_headers on;
aio threads;
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
upstream shiny-server {
ip_hash;
server 127.0.0.1:3838;
}
map $http_app_version $app1_url {
"1.0" http://35.78.39.174:3838;
}
server {
aio threads;
listen 80;
listen [::]:80;
server_name 35.78.39.174:3838;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
if ($http_user_agent !~* "MicroMessenger"){
set $app1_url http://35.78.39.174:3838;
}
aio threads;
proxy_pass http://localhost:3838;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real_IP $remote_addr;
proxy_set_header User-Agent $http_user_agent;
proxy_set_header Accept-Encoding '';
proxy_buffering off;
}
location ^~ /mathjax/ {
alias /usr/share/mathjax2/;
}
}
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*.*;
server_names_hash_bucket_size 128;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
}
I have also edited the shiny-server configuration:
# Instruct Shiny Server to run applications as the user "shiny"
run_as shiny;
sanitize_errors false;
preserve_logs true;
# Define a server that listens on port 3838
server {
listen 3838;
# Define a location at the base URL
location / {
# Host the directory of Shiny Apps stored in this directory
site_dir /home/rstudio/;
# Log all Shiny output to files in this directory
log_dir /var/log/shiny-server/port_3838;
# When a user visits the base URL rather than a particular application,
# an index of the applications available in this directory will be shown.
directory_index on;
app_init_timeout 1800;
app_idle_timeout 1800;
}
}
`
Really appreciate your help. Thanks a lot.
In case I misunderstand the definition of concurrency, my desired result is that when different users accessed to the webpage and use some function at the same time, they don't need to wait in the queue and my server could handle those requests at the same time.
Could you please how to set the configuration to achieve that?
I developed an application using Django(rest-api) and reactJs with webpacker. It is working in local. Now I need to deploy it in an nginx server.
I have ssh access to nginx server(lets say server name: vardhan.com and username: vishnu). Can someone help with how to do it.
I am following this tutorial. https://tonyteaches.tech/django-nginx-uwsgi-tutorial/
But I got few questions.
There is already a survey application running in the same server and it can be accessed with url vardhan.com/survey. is website address(url) same as server name ?
How to host two applications in the same sever ?
This is .conf of survey app
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
server {
listen 443 ssl;
server_name vardhan.com;
# SSL parameters
ssl_certificate /etc/ssl/certs/asurc.pem;
ssl_certificate_key /etc/ssl/certs/private.key;
location /survey {
#proxy_pass http://localhost:3000/;
proxy_pass http://localhost:3010/;
}
location /survey {
#proxy_pass http://localhost:3000/survey;
proxy_pass http://localhost:3010/survey;
}
location /result {
#proxy_pass http://localhost:3000/result;
proxy_pass http://localhost:3010/result;
}
location /feedback {
proxy_pass http://localhost:3010/feedback;
}
location /public/stylesheets/ {
autoindex on;
#alias /home/*****/AppSurvey/public/stylesheets/;
alias /home/*****/AppSurvey/public/stylesheets/;
}
location /public/javascripts/ {
autoindex on;
#alias /home/*****/AppSurvey/public/javascripts/;
alias /home/*****/AppSurvey/public/javascripts/;
#proxy_pass http://localhost:3000/public/javascripts/index.js;
proxy_pass http://localhost:3010/public/javascripts/index.js;
}
}
server {
listen 80;
server_name vardhan.com;
return 302 https://$server_name$request_uri;
}
}
I have an application that consists of back end and front end. Because of restrictions with the hoster, I need to provide the back end from a different server than the front end.
My back end handles authentication and serves the content to the front end. It also sends emails to users via nodemailer. Because I am not allowed have outgoing TCP sockets on the server where the front end is hosted, this feature failed which made me relocate the back end.
Now, I have the back end running on a different server. It consists of a loopback instance listening on a certain port which gets requests proxied to it by nginx.
After a while of set up, I had the configuration working. It first failed because of a wrong CORS header, a problem that emerged because Loopback added Access-Control-Allow-Origin *;, which I also had in my nginx config. That resulted in Firefox throwing an error like CORS header does not match Origin (*, *) - which made me think that the headers where on top of each other thus negating the wildcard *.
So I removed the add_header part from my nginx configuration. I worked fine when I tested, but when I came back, Firefox threw the Error Cross-Origin Request Blocked: The Same Origin Policy disallows reading the remote resource at https://{{my_nice_api}}/lang. (Reason: CORS header ‘Access-Control-Allow-Origin’ missing). Status code: 404.. Which baffled me because I hadn't changed the set up at all.
Now, I fiddled even more but am not able to find the error. I have add_header Access-Control-Allow-Origin *; set (for testing purposes obviously), but I keep getting the error that there is no such header present. This post had me thinking that I needed to add another header in Access-Control-Allow-Credentials true;, but to no avail. Can anybody give any pointer as to what I am missing?
My nginx.conf:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
#ssl_certificate /etc/nginx/certs/cert.pem;
#ssl_certificate_key /etc/nginx/certs/key.pem;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
My site.conf (mounted into sites-available):
# Virtual Host configuration for {{my_nice_api}}
#
server {
listen 80;
listen [::]:80;
server_name {{my_nice_api}};
return 301 https://{{my_nice_api}}$uri;
#location / {
# rewrite ^ https://{{my_nice_api}}$request_uri permanent;
#}
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_certificate /etc/nginx/certs/cert.pem;
ssl_certificate_key /etc/nginx/certs/key.pem;
#server_name {{my_nice_api}};
location / {
proxy_pass http://localhost:3001/api/;
#proxy_pass_request_headers on;
#proxy_http_version 1.1;
#proxy_cache_bypass $http_upgrade;
#proxy_set_header Upgrade $http_upgrade;
#proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
#proxy_set_header X-Forwarded-Proto $scheme;
#proxy_set_header X-Forwarded-Host $host;
#proxy_set_header X-Forwarded-Port $server_port;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Credentials true;
#add_header X-Frame-Options SAMEORIGIN;
}
}
I have below nginx config which is running into this error while trying to start the nginx:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
server_names_hash_bucket_size 164;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript
text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
upstream qe {
server qe.domain.com:443;
}
upstream staging {
server staging.domain.com:443;
}
upstream beta {
server mydomain.com:443;
server mydomain-beta.com:443;
}
# map to different upstream backends based on header
map $http_x_server_select $pool {
default "staging";
qe "qe";
beta "beta";
}
server {
listen 80;
server_name 100.0.0.0 ec2.instance.compute-1.amazonaws.com;
location / {
proxy_pass https://$pool;
#standard proxy settings
proxy_set_header X-Real-IP $remote_addr;
proxy_redirect off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
}
}
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
everything seems in place but still seeing this error. Am I missing something? I t is definitely not the curly braces causing it as I have the braces in place. But not sure what is causing this error.
It gives the error on the last line of the config file where I have some commented config which came by default when I installed nginx. But don't think it is the reason as I tried removing that also.
Update: So I removed everything from my config file and still getting the same error. I am confused what is going on now.
This is probably just obvious, but I can't figure out why my nginx gives me a 404 error when I try to view it under www.mywebsite.com/phpmyadmin. But www.mywebsite.com/phpmyadmin/index.php works (however I can't log in with this it just redirect to www.mywebsite.com/index.php when I click "go"). Im using ubuntu 16.04.
my nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript
text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
my default site file
##
# You should look at the following URL's to grasp a solid understanding
# of Nginx configuration files to fully unleash the power of Nginx.
# https://www.nginx.com/resources/wiki/start/
# https://www.nginx.com/resources/wiki/start/topics/tutorials/config_pitfalls/
# https://wiki.debian.org/Nginx/DirectoryStructure
#
# In most cases, administrators will remove this file from sites-enabled/ and
# leave it as reference inside of sites-available where it will continue to be
# updated by the nginx packaging team.
#
# This file will automatically load configuration files provided by other
# applications, such as Drupal or Wordpress. These applications will be made
# available underneath a path with that package name, such as /drupal8.
#
# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples.
##
# Default server configuration
#
server {
listen 80;
listen 443 ssl;
ssl_certificate /root/Stuffmaker/Website/certificates/www.stuffmaker.website_ssl_certificate.cer;
ssl_certificate_key /root/Stuffmaker/Website/certificates/www.stuffmaker.website_private_key.key;
root /usr/share/nginx/html/;
index index.php index.html index.htm;
server_name my.website;
location /phpmyadmin {
root /usr/share/phpmyadmin;
index index.php index.html index.htm;
}
location /images {
try_files $uri =404;
}
location / {
proxy_pass http://localhost:5050;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
location /api {
proxy_pass http://localhost:5000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/run/php/php7.0-fpm.sock;
}
location ~ /\.ht {
deny all;
}
}
# Virtual Host configuration for example.com
#
# You can move that to a different file under sites-available/ and symlink that
# to sites-enabled/ to enable it.
#
#server {
# listen 80;
# listen [::]:80;
#
# server_name example.com;
#
# root /var/www/example.com;
# index index.html;
#
# location / {
# try_files $uri $uri/ =404;
# }
#}
I greatly appreciate any help.