How to achieve concurrency using NGINX - nginx

I made webpage using R(shiny) and deployed it on shiny-server. And tried to use NGINX to achieve multi-threaded sort of stuff. I found on some posts that NGINX can also help to achieve concurrency but I don't know how to do it. Could you please help me to do that.
In case I misunderstand the definition of concurrency, my desired result is that when different users accessed to the webpage and use some function at the same time, they don't need to wait in the queue and my server could handle those requests at the same time.
Below is the configuration:
`
user www-data;
worker_processes 4;
worker_rlimit_nofile 20960;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
use epoll;
worker_connections 1024;
accept_mutex on;
accept_mutex_delay 500ms;
multi_accept on;
}
http {
underscores_in_headers on;
aio threads;
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
upstream shiny-server {
ip_hash;
server 127.0.0.1:3838;
}
map $http_app_version $app1_url {
"1.0" http://35.78.39.174:3838;
}
server {
aio threads;
listen 80;
listen [::]:80;
server_name 35.78.39.174:3838;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
if ($http_user_agent !~* "MicroMessenger"){
set $app1_url http://35.78.39.174:3838;
}
aio threads;
proxy_pass http://localhost:3838;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real_IP $remote_addr;
proxy_set_header User-Agent $http_user_agent;
proxy_set_header Accept-Encoding '';
proxy_buffering off;
}
location ^~ /mathjax/ {
alias /usr/share/mathjax2/;
}
}
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*.*;
server_names_hash_bucket_size 128;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
}
I have also edited the shiny-server configuration:
# Instruct Shiny Server to run applications as the user "shiny"
run_as shiny;
sanitize_errors false;
preserve_logs true;
# Define a server that listens on port 3838
server {
listen 3838;
# Define a location at the base URL
location / {
# Host the directory of Shiny Apps stored in this directory
site_dir /home/rstudio/;
# Log all Shiny output to files in this directory
log_dir /var/log/shiny-server/port_3838;
# When a user visits the base URL rather than a particular application,
# an index of the applications available in this directory will be shown.
directory_index on;
app_init_timeout 1800;
app_idle_timeout 1800;
}
}
`
Really appreciate your help. Thanks a lot.
In case I misunderstand the definition of concurrency, my desired result is that when different users accessed to the webpage and use some function at the same time, they don't need to wait in the queue and my server could handle those requests at the same time.
Could you please how to set the configuration to achieve that?

Related

Can't deploy two apps in Nginx

I developed an application using Django(rest-api) and reactJs with webpacker. It is working in local. Now I need to deploy it in an nginx server.
I have ssh access to nginx server(lets say server name: vardhan.com and username: vishnu). Can someone help with how to do it.
I am following this tutorial. https://tonyteaches.tech/django-nginx-uwsgi-tutorial/
But I got few questions.
There is already a survey application running in the same server and it can be accessed with url vardhan.com/survey. is website address(url) same as server name ?
How to host two applications in the same sever ?
This is .conf of survey app
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
server {
listen 443 ssl;
server_name vardhan.com;
# SSL parameters
ssl_certificate /etc/ssl/certs/asurc.pem;
ssl_certificate_key /etc/ssl/certs/private.key;
location /survey {
#proxy_pass http://localhost:3000/;
proxy_pass http://localhost:3010/;
}
location /survey {
#proxy_pass http://localhost:3000/survey;
proxy_pass http://localhost:3010/survey;
}
location /result {
#proxy_pass http://localhost:3000/result;
proxy_pass http://localhost:3010/result;
}
location /feedback {
proxy_pass http://localhost:3010/feedback;
}
location /public/stylesheets/ {
autoindex on;
#alias /home/*****/AppSurvey/public/stylesheets/;
alias /home/*****/AppSurvey/public/stylesheets/;
}
location /public/javascripts/ {
autoindex on;
#alias /home/*****/AppSurvey/public/javascripts/;
alias /home/*****/AppSurvey/public/javascripts/;
#proxy_pass http://localhost:3000/public/javascripts/index.js;
proxy_pass http://localhost:3010/public/javascripts/index.js;
}
}
server {
listen 80;
server_name vardhan.com;
return 302 https://$server_name$request_uri;
}
}

Nginx is failing to load css, js and png of tomcat server

I'm trying to deploy the tomcat & Nginx server on a single AWS EC2 instance. I have 3 instances & on each instance, I wanted to deploy Nginx & Tomcat server. Below is my configuration file
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
}"
/etc/nginx/conf.d/application.conf
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name localhost;
root /var/lib/tomcat9/webapps/ROOT;
index deploy.html;
location /admin {
try_files $uri $uri/ /deploy.html;
}
location /admin/admin-portal {
alias /opt/tomcat/webapps/ROOT/;
rewrite /admin-portal/(.*) /$1 break;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:8080/;
}
location ~ \.css {
add_header Content-Type text/css;
}
location ~ \.js {
add_header Content-Type application/x-javascript;
}
My goal is, when I hit http://IP/ or HTTP://IP/admin then it should redirect to deploy.html and when I hit HTTP://IP/admin/admi-portal it should open tomcat server
NOTE: I got success in both conditions except when I hit HTTP://IP/admin/admi-portal then it is opening only HTML page and CSS/png/js files getting 404:not found error
/opt/tomcat/webapps/ROOT/ this is the file path for all tomcat static file CSS/js/png etc
Can anyone help me with this?
Try hitting the compete url of your EC2 instance
<instanceip>:8080/admin/admin-portal/
also,
you can add "/" in the end:-
location /admin/admin-portal/
then try hitting the url with
<instance-ip>:8080/admin/admin-portal
Now you don't need to add "/" at the end

nginx config error: "[emerg] unexpected end of file, expecting ";" or "}"

I have below nginx config which is running into this error while trying to start the nginx:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
server_names_hash_bucket_size 164;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript
text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
upstream qe {
server qe.domain.com:443;
}
upstream staging {
server staging.domain.com:443;
}
upstream beta {
server mydomain.com:443;
server mydomain-beta.com:443;
}
# map to different upstream backends based on header
map $http_x_server_select $pool {
default "staging";
qe "qe";
beta "beta";
}
server {
listen 80;
server_name 100.0.0.0 ec2.instance.compute-1.amazonaws.com;
location / {
proxy_pass https://$pool;
#standard proxy settings
proxy_set_header X-Real-IP $remote_addr;
proxy_redirect off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
}
}
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
everything seems in place but still seeing this error. Am I missing something? I t is definitely not the curly braces causing it as I have the braces in place. But not sure what is causing this error.
It gives the error on the last line of the config file where I have some commented config which came by default when I installed nginx. But don't think it is the reason as I tried removing that also.
Update: So I removed everything from my config file and still getting the same error. I am confused what is going on now.

Sub domain hiting Nginix index page and not the API

I have an Amazon EC2 box, and I am trying to host my node js REST API in the box. I run the application as a background process using PM2. To expose the locally running background application to the web, I create a Nginix web server. Now when I try to access the public DNS of the EC2 box, I can access the api like ec2-bla-bla.amazonaws.com/api and I can have it consumed by the front end applications. All the communication is going on HTTP for right now.
But I cant let people use this DNS to consume my API as the DNS is owned by Amazon and not by us. So we created a separate sub domain called api.our-company.com and point it to EC2 public IP. But when I go to api.our-company.com, its displays the nginix index page, so that means its hitting the EC2 box. But when I call api.our-company.com/api, it gives me a nginix 404 error and wont let me access my api.
I need to be able to use api.our-company.com/api to consume my API.
I tried changing the root in my applications config file in sites-enabled. Tried removing index.html completely from the file. but it still displays the index page. I am following this digital ocean article: https://www.digitalocean.com/community/tutorials/how-to-set-up-a-node-js-application-for-production-on-ubuntu-14-04
Here is my project config file in sites-enabled.
upstream app_nodejs {
server 127.0.0.1:8080;
keepalive 8;
}
server {
listen 80;
listen [::]:80 default_server ipv6only=on;
listen 443 default ssl;
root /var/www/express-api/public/api;
index index.html index.html;
# Make site accessible from http://localhost/
server_name api.our-company.com;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwar$
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:8080/;
proxy_redirect off;
}
}
Here is the nginx.conf file
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
I don't have much experience on hosting web servers. Any suggestions are greatly appreciated.
I resolved my problem. It was not a configuration setting. It was just that the C Name of the sub-domain (api.our-company.com) was not pointing to Amazon's EC2 public DNS. We changed the C Name and now I can access my api from api.our-company.com/api .
As you are using upstream, try passing
proxy_pass app_nodejs;
and
server {
listen 80 default_server;
Use below references, as you are newbie to NGINX webserver
Reference:
https://www.nginx.com/resources/wiki/start/topics/examples/full/
https://nginxconfig.io/

Why does nginx default to my proxypass?

I am using nginx as a proxy server to serve two web apps on a single server that are running on separate ports (for local development purposes). Below is the full nginx.conf file:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
server {
server_name news.mysite;
location / {
proxy_pass http://localhost:3001;
}
}
server {
server_name blog.mysite;
location / {
proxy_pass http://localhost:3002;
}
}
# include /etc/nginx/conf.d/*.conf;
# include /etc/nginx/sites-enabled/*;
}
When accessing the subdomains from the browser I see the expected content based on the web apps that are running. However, when I access the main domain through the browser (http://mysite), it displays the content from the first proxypass (news.mysite # localhost:3001). I would have expected one of the following two scenarios:
Serve content from the default directory # /var/www/html
The typical "This site can’t be reached" error in the browser.
Why is nginx proxying the first proxypass it finds by default and how can I change it?
The first server that nginx encounters for a socket will be considered the default unless you create another one that you explicitly mark as the default.
So for your case, you would want to add an additional server block as a catch all:
server {
listen 80 default_server;
root /var/www/html;
}

Resources