Socket.io nginx Redis - Client not receiving messages from server - nginx

I'm quite confused. I was testing this application on localhost with Mamp and everything was working fine but when moved to the development server the client stop receiving messages from server. I'm using it inside a Vuejs component.
On the client I've logged socket.on('connect') and the second check is returning true.
This is my code:
Server
var server = require ('http').Server();
var io = require ('socket.io')(server);
var Redis = require ('ioredis');
var redis = new Redis();
redis.subscribe('chat');
redis.on('message', (channel, message) => {
message = JSON.parse(message);
// channel:event:to_id:to_type - message.data
io.emit(channel + ':' + message.event + ':' + message.to_id + ':' + message.to_type, message.data);
console.log(message +' '+ channel);
});
server.listen('6001');
Client
var io = require('socket.io-client')
var socket = io.connect('http://localhost:6001', {reconnect: true});
...
mounted() {
console.log('check 1', socket.connected);
socket.on('connect', function() {
console.log('check 2', socket.connected);
});
socket.on('chat:newMessage:'+this.fromid+':'+this.fromtype, (data) => {
console.log('new message');
var message = {
'msg': data.message,
'type': 'received',
'color': 'green',
'pos': 'justify-content-start',
}
this.messages.push(message);
});
}
Nginx conf
upstream node1 {
server 127.0.0.1:6001;
}
server {
listen 80;
server_name localhost;
location / {
#Configure proxy to pass data to upstream node1
proxy_pass http://node1/socket.io/;
#HTTP version 1.1 is needed for sockets
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
thanks a lot!

Ok I've found a solution.
I was using localhost
I've replaced
var socket = io.connect('http://localhost:6001', {reconnect: true});
With:
var socket = io.connect('http://'+ window.location.hostname +':6001', {reconnect: true});
And now everything is working fine.

Related

Socket.io scaling issue

I have 3 nodejs apps running on a GCP compute engine instance(2cpu, 2GB ram, ubuntu 20.04) with Nginx reverse proxy. One of them is a socket.io chat server. The socket.io app uses #socket.io/cluster-adapter to utilize all available CPU cores.
I followed this tutorial to update the Linux settings to get maximum number of connections. Here is the output of ulimit command,
core file size (blocks, -c) 0
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 7856
max locked memory (kbytes, -l) 65536
max memory size (kbytes, -m) unlimited
open files (-n) 500000
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 8192
cpu time (seconds, -t) unlimited
max user processes (-u) 7856
virtual memory (kbytes, -v) unlimited
cat /proc/sys/fs/file-max
2097152
/etc/nginx/nginx.conf
user www-data;
worker_processes auto;
worker_rlimit_nofile 65535;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 30000;
# multi_accept on;
}
...
/etc/nginx/sites-available/default
...
//socket.io part
location /socket.io/ {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy false;
proxy_pass http://localhost:3001/socket.io/;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
...
My chat server code,
const os = require("os");
const cluster = require("cluster");
const http = require("http");
const { Server } = require("socket.io");
const { setupMaster, setupWorker } = require("#socket.io/sticky");
const { createAdapter, setupPrimary } = require("#socket.io/cluster-adapter");
const { response } = require("express");
const PORT = process.env.PORT || 3001;
const numberOfCPUs = os.cpus().length || 2;
if (cluster.isPrimary) {
const httpServer = http.createServer();
// setup sticky sessions
setupMaster(httpServer, {
loadBalancingMethod: "least-connection", // either "random", "round-robin" or "least-connection"
});
// setup connections between the workers
setupPrimary();
cluster.setupPrimary({
serialization: "advanced",
});
httpServer.listen(PORT);
for (let i = 0; i < numberOfCPUs; i++) {
cluster.fork();
}
cluster.on("exit", (worker) => {
console.log(`Worker ${worker.process.pid} died`);
cluster.fork();
});
}
//worker process
else {
const express = require("express");
const app = express();
const Chat = require("./models/chat");
const mongoose = require("mongoose");
const request = require("request"); //todo remove
var admin = require("firebase-admin");
var serviceAccount = require("./serviceAccountKey.json");
const httpServer = http.createServer(app);
const io = require("socket.io")(httpServer, {
cors: {
origin: "*",
methods: ["GET", "POST"],
},
transports: "websocket",
});
mongoose.connect(process.env.DB_URL, {
authSource: "admin",
user: process.env.DB_USERNAME,
pass: process.env.DB_PASSWORD,
});
app.use(express.json());
app.get("/", (req, res) => {
res
.status(200)
.json({ status: "success", message: "Hello, I'm your chat server.." });
});
// use the cluster adapter
io.adapter(createAdapter());
// setup connection with the primary process
setupWorker(io);
io.on("connection", (socket) => {
activityLog(
"Num of connected users: " + io.engine.clientsCount + " (per CPU)"
);
...
//chat implementations
});
}
Load test client code,
const { io } = require("socket.io-client");
const URL = //"https://myserver.com/";
const MAX_CLIENTS = 6000;
const CLIENT_CREATION_INTERVAL_IN_MS = 100;
const EMIT_INTERVAL_IN_MS = 300; //1000;
let clientCount = 0;
let lastReport = new Date().getTime();
let packetsSinceLastReport = 0;
const createClient = () => {
const transports = ["websocket"];
const socket = io(URL, {
transports,
});
setInterval(() => {
socket.emit("chat_event", {});
}, EMIT_INTERVAL_IN_MS);
socket.on("chat_event", (e) => {
packetsSinceLastReport++;
});
socket.on("disconnect", (reason) => {
console.log(`disconnect due to ${reason}`);
});
if (++clientCount < MAX_CLIENTS) {
setTimeout(createClient, CLIENT_CREATION_INTERVAL_IN_MS);
}
};
createClient();
const printReport = () => {
const now = new Date().getTime();
const durationSinceLastReport = (now - lastReport) / 1000;
const packetsPerSeconds = (
packetsSinceLastReport / durationSinceLastReport
).toFixed(2);
console.log(
`client count: ${clientCount} ; average packets received per second: ${packetsPerSeconds}`
);
packetsSinceLastReport = 0;
lastReport = now;
};
setInterval(printReport, 5000);
As you can see from the code, I'm only using websocket for transports. So, it should be able to serve up to 8000 connections as per this StackOverflow answer. But when I run the load test, the server becomes unstable after 1600 connections. And CPU usage goes up to 90% and memory usage up to 70%. I couldn’t find anything in the Nginx error log. How can increase the number of connections to at least 8000? Should I upgrade the instance or change any Linux settings? Any help would be appreciated.
UPDATE
I removed everything related to clustering and ran it again as a regular single-threaded nodejs app. This time, the result was a little better, 2800 stable connections (CPU usage 40%., memory usage 50%). Please note that I'm not performing any disk I/O during the test.
You are using the cluster adapter, which is not meant to be used with sticky sessions. You should use the Redis adapter instead. Each worker will connect to Redis and will be able to communicate with each other. You can also use the Redis adapter with sticky sessions, but you will need to use the Redis adapter on the primary process as well.
To answer your another question:
"if I remove sticky session and use only websocket, will the workers be able to communicate to each other?"
Yes, the workers will be able to communicate to each other. I don't think it's a good idea to use sticky sessions for a chat application. You should use a pub/sub system like Redis or NATS to communicate between the workers. For example, you can use Redis to publish a message to a channel and the other workers will receive the message and send it to the client.
When you use sticky sessions, each worker will be connected to a single client. So, if you have 4 workers, you will be able to serve 4 clients at the same time. If you use the cluster adapter, each worker will be connected to all the clients. So, if you have 4 workers, you will be able to serve 4 * 4 clients at the same time. So, you will be able to serve more clients with the cluster adapter.
Example of using the Redis adapter:
const { createAdapter } = require("socket.io-redis");
const io = require("socket.io")(httpServer, {
cors: {
origin: "*",
methods: ["GET", "POST"],
},
transports: "websocket",
});
io.adapter(createAdapter("redis://localhost:6379"));
Example of using the NATS adapter:
const { createAdapter } = require("socket.io-nats");
const io = require("socket.io")(httpServer, {
cors: {
origin: "*",
methods: ["GET", "POST"],
},
transports: "websocket",
});
io.adapter(createAdapter("nats://localhost:4222"));
Try both options and see which works best for you.

CORS issue with Server sent event with Fastify-sse

I'm using a Fastify server to send SSE events to a React front-end.
While everything worked well locally, I'm having issues once deployed behind Nginx. The front-end and the server aren't on the same domain and although I set the cors origin to be "*" on the server, and the other call resolve without issue, for the server-sent-events endpoint only I get
Cross-Origin Request Blocked: The Same Origin Policy disallows reading the remote resource at https://example.com/events. (Reason: CORS request did not succeed). Status code: (null).
Here's how Fastify is configured, using #fastify/cors and fastify-sse-v2
import fastifyCors from "#fastify/cors";
import FastifySSEPlugin from "fastify-sse-v2";
// ...
await this.instance.register(FastifySSEPlugin, { logLevel: "debug" });
await this.instance.after();
await this.instance.register(fastifyCors, { origin: "*" });
Then sending events based on postgres pubsub with:
await pubsub.addChannel(TxUpdateChannel);
reply.sse(
(async function* () {
for await (const [event] of on(pubsub, TxUpdateChannel)) {
yield {
event: event.name,
data: JSON.stringify(event.data),
};
}
})()
);
On the front-end I use eventsource so that I can add Authorization headers:
import EventSource from "eventsource";
// ...
const source = new EventSource(`${SERVER_URL}/transaction/events`, {
headers: {
'Authorization': `Bearer ${jwt}`,
},
});
source.onmessage = (event) => {
console.log('got message', event)
getUserData()
}
source.onopen = (event) => {
console.log('---> open', event)
}
source.onerror = (event) => {
console.error('Event error', event)
}
The problem was in the nginx configuration. Thanks to EventSource / Server-Sent Events through Nginx I solved it using the following placed into the location of the nginx conf :
proxy_set_header Connection '';
proxy_http_version 1.1;
chunked_transfer_encoding off;
proxy_buffering off;
proxy_cache off;

Why is Nginx truncating the gRPC streaming response?

I've asked this question before but decided to delete that old question and reformulate it along with a minimum reproducible example. The issue is that when I deploy my gunicorn webserver on nginx, my streamed responses from my go server via gRPC get truncated. All details can be found in the repository. My nginx configuration for this site looks like this:
server {
listen 80 default_server;
server_name example.com;
location / {
#include proxy_params;
proxy_pass http://localhost:5000;
proxy_buffering off;
chunked_transfer_encoding off;
}
}
The code receiving and parsing the response on the front end looks like this:
<script>
(async function(){
const response = await fetch("{{ url_for('realtimedata') }}");
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
while (true) {
const {done, value} = await reader.read();
if (done) break;
try {
console.log('Received', value);
const rtd = JSON.parse(value);
console.log('Parsed', rtd);
} catch(err) {
console.log(err);
}
}
})()
</script>
Something to note regarding the data from the go server, one service is providing a data object with 96 fields and another service is providing data with 200 fields. Which makes the incoming stream response have variable length (in terms of bytes).
I want to use gunicorn because I may have multiple listeners at the same time. Using gunicorn solved an issue where all the responses were making it to the webserver but they were being distributed among the active clients. So each client would get a different response but not all of them.
EDIT:
I've tried changing the response object size on the goserver to be the same from both services but the truncating still happened. Having variable length doesn't seem to be the issue. I've also tried doing this with uWSGI instead of gunicorn and the issue persists. I even set uwsgi_buffering off; and the issue persists.
UPDATE:
I've ran the minimum reproducible example with Apache2 instead of Nginx and I'm getting the same issue. Maybe the issue is with something else.
Looking at your python code, it seems like pushing the data from the backend to the frontend would be done better with websockets. I've rewritten your backend to use FastAPI instead of Flask and modified the nginx configuration.
main.py
import asyncio
import dependencies.rpc_pb2 as r
import dependencies.rpc_pb2_grpc as rpc
from fastapi import FastAPI, WebSocket, Request
from fastapi.templating import Jinja2Templates
import grpc
import json
import os
os.environ["GRPC_SSL_CIPHER_SUITES"] = 'HIGH+ECDSA'
app = FastAPI()
templates = Jinja2Templates(directory="templates")
server_addr = "localhost"
server_port = 3567
#app.get("/")
def read_root(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
def parseRtd(rtd):
rtdDict = {}
rtdDict["source"] = rtd.source
rtdDict["is_scanning"] = rtd.is_scanning
rtdDict["timestamp"] = int(rtd.timestamp)
rtdDict["data"] = {}
for key, v in rtd.data.items():
rtdDict["data"][int(key)] = {"name": v.name, "value": v.value}
return rtdDict
def get_rtd():
channel = grpc.insecure_channel(f"{server_addr}:{server_port}")
stub = rpc.RpcServiceStub(channel)
for rtd in stub.SubscribeDataStream(r.SubscribeDataRequest()):
yield parseRtd(rtd)
#app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
await websocket.send_json({"test": "this is a test"})
it = get_rtd()
while True:
await asyncio.sleep(0.1)
payload = next(it)
await websocket.send_json(payload)
index.html
<html>
<head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.4.0/socket.io.js" integrity="sha512-nYuHvSAhY5lFZ4ixSViOwsEKFvlxHMU2NHts1ILuJgOS6ptUmAGt/0i5czIgMOahKZ6JN84YFDA+mCdky7dD8A==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
</head>
<body>
<script>
var ws = new WebSocket("ws://localhost:5000/ws");
ws.onopen = function () {
console.log("websocket was open");
};
ws.onclose = () => {
console.log("Websocket was closed!");
}
ws.onerror = (error) =>{
console.error("Websocket error: " + JSON.stringify(error));
};
ws.onmessage = (message) => {
console.log("MSG: " + message.data );
};
</script>
</body>
</html>
webserver.conf
server {
listen 80 default_server;
server_name example.com;
location / {
include proxy_params;
proxy_pass http://localhost:5000;
}
location /ws {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_pass http://localhost:5000;
}
}

getting net::ERR_CONNECTION_REFUSED error on production

I have deployed my react app which also has a backend. I have started both my front end and backend from my server using npm start command. The front end is working correctly but the backend is not. (Works perfectly from localhost)
The frontend is open from port 3000 and backend from port 9000.
This is my nginx config
server {
server_name myservername.com www.myservername.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
server {
if($host = www.myservername.com){
return 301 https://$host$request_uri;
}
if($host = myservername.com){
return 301 https://$host$request_uri;
}
listen 80 default_server;
listen [::]:80 default_server;
server_name myservername.com www.myservername.com;
return 404;
}
ports that are in use
The error i'm getting in console
POST http://localhost:9000/bot net::ERR_CONNECTION_REFUSED
How can i fix this issue?
The issue is trying to do this fetch
fetch("http://localhost:9000/bot", {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify(data)
}).then(res => res.text())
.then(res => {
res = JSON.parse(res);
this.setState({ apiResponse: res.response, apiIntent: res.intent}, () => {this.setBotMsg()});
console.log(`This is the response: ${res.response}, ${res.intent}`);
;
});
This will issue a call to your local loopback adapter on your computer port 9000. I don't think this is what you want. Use your server IP or domainname inseat of localhost.
fetch("http://myservername.com:9000/bot", {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify(data)
}).then(res => res.text())
.then(res => {
res = JSON.parse(res);
this.setState({ apiResponse: res.response, apiIntent: res.intent}, () => {this.setBotMsg()});
console.log(`This is the response: ${res.response}, ${res.intent}`);
;
});
I would use NGINX to proxy the request to your backend and not expose port 9000 directly.
fetch("https://myservername.com/bot", {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify(data)
}).then(res => res.text())
.then(res => {
res = JSON.parse(res);
this.setState({ apiResponse: res.response, apiIntent: res.intent}, () => {this.setBotMsg()});
console.log(`This is the response: ${res.response}, ${res.intent}`);
;
});
The nginx configuration depends on what you are want to do. Subdomain or location? A subdomain could be something like bot.myserver.com. A location myserver.com/bot.
server {
listen 443
.....
location /bot {
proxy_pass http://localhost:9000;
proxy_set_header Host $host;
....
}
}
Additional note: I have updated the configuration to use https from the frontend.

How to turn off buffering on Nginx Server for Server sent event

Problem : Nginx Server is buffring the Server sent events(SSE).
Setup : Node v12.13.1, Nginx 1.16.1, Chrome v80
Scenario:
I tried to turn off buffering with proxy_buffering off; and even added "X-Accel-Buffering": "no" in server resonse header however nginx is still buffering all SSE. if I close node server or restart nginx server then all the SSE message are delivered to client in bulk. I tried alot but dont know that I'm missing.
Nginx Config file :
events {
worker_connections 1024;
}
http {
include mime.types;
sendfile on;
keepalive_timeout 65;
server {
listen 4200;
server_name localhost;
location / {
proxy_set_header Connection '';
proxy_http_version 1.1;
chunked_transfer_encoding off;
proxy_buffering off;
proxy_cache off;
proxy_pass http://localhost:8700;
}
}
}
Node Server :
var express = require('express');
var app = express();
var template =
`<!DOCTYPE html> <html> <body>
<script type="text/javascript">
var source = new EventSource("/events/");
source.onmessage = function(e) {
document.body.innerHTML += e.data + "<br>";
};
</script>
</body> </html>`;
app.get('/', function (req, res) {
res.send(template); // <- Return the static template above
});
var clientId = 0;
var clients = {}; // <- Keep a map of attached clients
// Called once for each new client. Note, this response is left open!
app.get('/events/', function (req, res) {
req.socket.setTimeout(Number.MAX_VALUE);
res.writeHead(200, {
'Content-Type': 'text/event-stream', // <- Important headers
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no'
});
res.write('\n');
(function (clientId) {
clients[clientId] = res; // <- Add this client to those we consider "attached"
req.on("close", function () {
delete clients[clientId]
}); // <- Remove this client when he disconnects
})(++clientId)
});
setInterval(function () {
var msg = Math.random();
console.log("Clients: " + Object.keys(clients) + " <- " + msg);
for (clientId in clients) {
clients[clientId].write("data: " + msg + "\n\n"); // <- Push a message to a single attached client
};
}, 2000);
app.listen(process.env.PORT || 8700);

Resources