I have strange issue when I running Nuxt application by PM2 on nginx.
When I start it by "pm2 start" application is working but I have Error 500 when I try to get data from database. When I start application just by "npm start" everything is working fine.
I use "ecosystem.config.js":
module.exports = {
apps: [
{
name: 'MyAppName',
exec_mode: 'cluster',
instances: 'max',
script: './node_modules/nuxt/bin/nuxt.js',
args: 'start'
}
]
}
nginx setup (sites-available/default)
server {
listen 80;
listen [::]:80;
index index.html;
server_name my-domain.com www.my-domain.com;
location ~* ^.+\.(jpg|jpeg|png|gif)$ {
rewrite ^/_nuxt(/.*) $1 break;
root /var/www/myApplicationPath;
}
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Request URL: http://my-domain.com/api/cars/?limit=5
The error:
{status: 500, message: "Cannot read properties of undefined (reading 'limit')", name: "TypeError"}
message: "Cannot read properties of undefined (reading 'limit')"
name: "TypeError"
status: 500
Related
I try to connect from api user to my hyperledger fabric network via nginx server. I have next settings in my *.conf files:
first file
upstream rca-org1 {
server XXXX:7054;
}
upstream couchdb {
server XXXX:5984;
}
server {
listen XXXX:80 default_server;
listen [::]:80;
server_name XXXX;
access_log path/to/nginx/access.log;
location / {
root /app/build;
index index.html;
try_files $uri /index.html;
}
location /api {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://rca-org1;
}
location /wallet {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://couchdb;
}
second file
upstream network {
server XXXX:7051;
}
server {
listen 80 http2;
listen [::]:80;
server_name XXXX;
access_log /path/to/nginx/access.log;
location /channels {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_redirect off;
grpc_pass grpc://network;
}
location ~/(static|media)/ {
root /app/build/;
}
}
When I try to execute connect request from k8s claster (there are store my node.js api and my hf network in k8s cluster) to nginx, I receive this output:
Error: 13 INTERNAL: Received RST_STREAM with code 2 triggered by internal client error: Protocol error at Object.callErrorFromStatus (/path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/call.js:31:19) at Object.onReceiveStatus (/path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/client.js:190:52) at Object.onReceiveStatus (/path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/client-interceptors.js:365:141) at Object.onReceiveStatus (/path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/client-interceptors.js:328:181) at /path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/call-stream.js:188:78 at processTicksAndRejections (internal/process/task_queues.js:77:11) for call at at ServiceClientImpl.makeUnaryRequest (/path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/client.js:160:30) at ServiceClientImpl.<anonymous> (/path/to/node_modules/fabric-protos/node_modules/#grpc/grpc-js/build/src/make-client.js:105:19) at /path/to/node_modules/fabric-common/lib/Discoverer.js:73:17 at new Promise (<anonymous>) at Discoverer.sendDiscovery (/path/to/node_modules/fabric-common/lib/Discoverer.js:54:10) at DiscoveryService.send (/path/to/node_modules/fabric-common/lib/DiscoveryService.js:318:30) at processTicksAndRejections (internal/process/task_queues.js:95:5) at async NetworkImpl._initializeInternalChannel (/path/to/node_modules/fabric-network/lib/network.js:300:13) at async NetworkImpl._initialize (/path/to/node_modules/fabric-network/lib/network.js:250:9) at async Gateway.getNetwork (/path/to/node_modules/fabric-network/lib/gateway.js:350:9) at async fabricConnectorForUser (/path/to/custom_policies/fabricConnectorForUser/index.js:28:23)
My access.log:
10.39.22.45 - - [29/Sep/2022:16:57:52 +0300] "PRI * HTTP/2.0" 400 157 "-" "-"
My error.log:
2022/09/29 16:57:21 [warn] 5810#5810: conflicting server name "XXXX" on [::]:80, ignored
I'm trying to setup Strapi on my DO droplet.
I have a UI for my web app running on a subdomain (from here onwards: https://blah.mywebsite.com) - it's running a Next.js instance - but I believe that is irrelevant.
I also have Strapi running on a separate node process (via PM2) both the Next.js and Strapi node instances are done via pm2 using the following ecosystem.config.js file:
module.exports = {
apps: [
{
name: "webapp",
cwd: "/root/webapp",
script: "npm",
args: "start",
env: { NODE_ENV: "production" },
},
{
name: "strapi",
cwd: "/root/webappstrapi",
script: "yarn",
args: "start",
env: {
NODE_ENV: "production",
APP_KEYS: "STRINGGOESHERE,STRINGGOESHERE",
ADMIN_JWT_SECRET: "STRINGGOESHERE",
JWT_SECRET: "STRINGGOESHERE",
API_TOKEN_SALT: "STRINGGGOESHERE",
DATABASE_NAME: "DBNAMEHERE",
DATABASE_PASSWORD: "PASSWORDHERE"
},
},
],
};
From what I can see there isn't an issue with either node process and both are running just fine.
I then follow the tutorial here ("Subfolder unified"): https://docs.strapi.io/developer-docs/latest/setup-deployment-guides/deployment/optional-software/nginx-proxy.html#nginx-virtual-host
My Strapi config/server.js file looks like this:
module.exports = ({ env }) => ({
host: env("HOST", "0.0.0.0"),
port: env.int("PORT", 1337),
url: "https://blah.mywebsite.com/strapi",
app: {
keys: env.array("APP_KEYS"),
},
});
I have ran yarn build and run the build files via the aforementioned pm2 config above.
To setup the following URL structure:
https://blah.mywebsite.com/strapi/admin
https://blah.mywebsite.com/strapi/api
My Nginx config for the subdomain looks like this (following the strapi docs):
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name blah.mywebsite.com;
root /var/www/blah.mywebsite.com/public;
# SSL
ssl_certificate /etc/letsencrypt/live/blah.mywebsite.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/blah.mywebsite.com/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/blah.mywebsite.com/chain.pem;
# security
include nginxconfig.io/security.conf;
location / {
proxy_pass http://127.0.0.1:3000; # next.js
include nginxconfig.io/proxy.conf;
}
location /strapi/ {
rewrite ^/strapi/?(.*)$ /$1 break;
proxy_pass http://127.0.0.1:1337;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_pass_request_headers on;
}
# additional config
include nginxconfig.io/general.conf;
}
# subdomains redirect
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name *.blah.mywebsite.com;
# SSL
ssl_certificate /etc/letsencrypt/live/blah.mywebsite.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/blah.mywebsite.com/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/blah.mywebsite.com/chain.pem;
return 301 https://blah.mywebsite.com$request_uri;
}
# HTTP redirect
server {
listen 80;
listen [::]:80;
server_name .blah.mywebsite.com;
include nginxconfig.io/letsencrypt.conf;
location / {
return 301 https://blah.mywebsite.com$request_uri;
}
}
Now when I navigate to https://blah.mywebsite.com/strapi/admin - the HTML resolves - but I get a blank page - looking at the browser console I get:
GET blah.mywebsite.com/strapi/admin/runtime~main.67ca8ce7.js net::ERR_ABORTED 404
GET blah.mywebsite.com/strapi/admin/main.57d09928.js net::ERR_ABORTED 404
So it looks like the build JS bundle files aren't being served by my server.
Looking at the pm2 logs for my strapi node instance I only see:
[2022-09-11 18:45:03.145] http: GET /admin/ (3 ms) 200
So it looks like the requests for the JS files aren't hitting the Strapi node process - which leads me to believe Nginx isn't passing on the GET requests for the JS files...
How do I solve this?
I mentioned it in https://stackoverflow.com/a/75129704/4300071 a while ago.
You must add slash to at the end of url in url in the config/server.js
module.exports = ({ env }) => ({
host: env("HOST", "0.0.0.0"),
port: env.int("PORT", 1337),
url: "https://blah.mywebsite.com/strapi/",
app: {
keys: env.array("APP_KEYS"),
},
});
after that npm run build and now it should be work.
svelte config:
import { sveltekit } from '#sveltejs/kit/vite';
import host from 'vite-plugin-host';
/** #type {import('vite').UserConfig} */
const config = {
plugins: [sveltekit(), host()]
};
export default config;
NGINX config:
server {
listen 80;
listen [::]:80;
server_name customdomain.com www.customdomain.com;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection ‘upgrade’;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Everytime i open the webpage at customdomain.com it start reloading every second
I have no idea what the problem is
im trying to implement a system that comprise of nginx , daphne and gunicorn. So far , i have gotten gunicorn to work , however im facing an issue connecting the websockets for daphne. This issue does not arise when im in developing mode , only when i utilize nginx. This is the error code from my console :
reconnecting-websocket.js:199 WebSocket connection to 'ws://192.168.8.31/' failed: Error during WebSocket handshake: Unexpected response code: 200
This is my nginx config file :
upstream crm_server { server unix:/home/user/project/venv/run/gunicorn.sock fail_timeout=0;
}
upstream channels-backend {
server localhost:8001;
}
server {
listen 80;
# add here the ip address of your server
# or a domain pointing to that ip (like example.com or www.example.com)
server_name 192.168.8.31;
keepalive_timeout 5;
client_max_body_size 4G;
access_log /home/user/project/venv/logs/nginx-access.log;
error_log /home/user/project/venv/logs/nginx-error.log;
location /staticfiles/ {
root /home/user/roject/crm/staticfiles/;
}
# checks for static file, if not found proxy to app
location / {
try_files $uri #proxy_to_app; }
location /ws/ {
try_files $uri #proxy_to_ws;
}
location #proxy_to_ws {
include proxy_params;
proxy_pass http://channels-backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
location #proxy_to_app {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_buffering off;
proxy_redirect off;
proxy_pass http://crm_server;
}
This is my front end thats generating the socket
<script type="text/javascript">
var loc = window.location
var wsStart = 'ws://'
if (loc.protocol == 'https'){
wsStart = 'wss://' } var endpoint = wsStart + loc.host + loc.pathname var socket = new ReconnectingWebSocket(endpoint)
This is my routing:
from channels.routing import ProtocolTypeRouter , URLRouter
from django.urls import path
from rnd.consumers import NotificationConsumer
from django.conf.urls import url
from channels.auth import AuthMiddlewareStack
from channels.security.websocket import AllowedHostsOriginValidator, OriginValidator
application = ProtocolTypeRouter({
"websocket": AllowedHostsOriginValidator(
AuthMiddlewareStack(
URLRouter(
[
path('',NotificationConsumer),
] ) )
) })
I would greatly appreciate any form of feed back!
When you proxy pass to open a Web-socket connection you need to proxy pass all of the web-socket http headers. https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism#WebSocket-specific_headers you will also need to pass the Origin header since you are using AllowedHostsOriginValidator.
--
also to debug these I suggest using a api client directly rather than the browser. Paw or Insomnia can be used to hit api to check that your Nginx config is routing to channels and not to Gunicorn.
I have installed Parse Server and Parse Dashboard on an AWS EC2 instance (Ubuntu 14.04). I am using pm2 process manager to configure and run both applications and nginx to serve as a proxy server.
Both applications are working fine and I can access them from the client. The problem however is that when I access Parse Dashboard (which internally performs calls to local Parse Server) majority of the POST commands are returned with 502 Bad Gateway error.
After some investigation I suspect pm2 is the problem as it keeps on restarting applications after some time since all the POST commands are executed at the same time. I set the max_memory_restart parameter to 500M and killed & restarted apps but no difference.
I must mention I am using pm2 for the first time. So did I configure pm2 wrong or am I missing something here? pm2 error logs are empty.
Nginx error log shows the following:
2016/08/12 08:39:56 [error] 7792#0: *23 connect() failed (111: Connection refused) while connecting to upstream, client: xxx, server: xxx, request: "POST /parse/classes/AccountingDrawer HTTP/1.1", upstream: "http://127.0.0.1:1337/parse/classes/AccountingDrawer", host: "xxx", referrer: "https://xxx/dashboard/apps/myapp/browser/_Role"
Nginx default config
server {
listen 443;
server_name xxx;
root /usr/share/nginx/html;
index index.html index.htm;
# log files
access_log /var/log/nginx/parse.access.log;
error_log /var/log/nginx/parse.error.log;
ssl on;
# Use certificate and key provided by Let's Encrypt:
ssl_certificate /etc/letsencrypt/live/xxx/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/xxx/privkey.pem;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
# Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
# Pass requests for /parse/ to Parse Server instance at localhost:1337
location /parse/ {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:1337/parse/;
proxy_ssl_session_reuse off;
proxy_set_header Host $http_host;
proxy_redirect off;
}
# Pass requests for /dashboard/ to Parse Dashboard instance at localhost:4040
location /dashboard/ {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:4040/dashboard/;
proxy_ssl_session_reuse off;
proxy_set_header Host $http_host;
proxy_redirect off;
}
location / {
try_files $uri $uri/ =404;
}
}
pm2 ecosystem.json
{
"apps" : [{
"name" : "parse-server-wrapper",
"script" : "/usr/bin/parse-server",
"watch" : true,
"merge_logs" : true,
"cwd" : "/home/parse",
"env": {
...
},
"max_memory_restart": "500M",
"instances" : 2,
"exec_interpreter" : "node",
"exec_mode" : "cluster"
},
{
"name" : "parse-dashboard-wrapper",
"script" : "/usr/bin/parse-dashboard",
"watch" : true,
"merge_logs" : true,
"cwd" : "/home/parse",
"max_memory_restart": "500M",
"env": {
"HOST": "localhost",
"PORT": "4040",
"MOUNT_PATH": "/dashboard",
"PARSE_DASHBOARD_ALLOW_INSECURE_HTTP": 1,
...
}
}]
}