Configuring Secure Web Socket on Amazon Beanstalk - nginx

I've deployed two Node.js apps on Amazon Beanstalk: one is the frontend developed with React and running with serve, the other is an MQTT broker with a web socket handler.
Load balancer is nginx 1.12.1, with this configuration (written inside .ebextension folder of the backend project):
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream websocket {
server 127.0.0.1:5000;
}
server {
listen 8080;
if ($time_iso8601 ~ "^(\d{4})-(\d{2})-(\d{2})T(\d{2})") {
set $year $1;
set $month $2;
set $day $3;
set $hour $4;
}
access_log /var/log/nginx/healthd/application.log.$year-$month-$day-$hour healthd;
access_log /var/log/nginx/access.log main;
large_client_header_buffers 8 32k;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass "http://127.0.0.1:3003";
proxy_redirect off;
# Socket.IO Support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass_request_headers on;
}
location /subscriptions {
proxy_pass http://websocket;
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
gzip on;
gzip_comp_level 4;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
}
The frontend should be able to consume wss://mqtt_url/subscriptions, but with whatever configuration I use, I always get WebSocket is closed before the connection is established. This configuration seems to work with non-secure web socket consumption.
The server waiting for connections is just a simple HTTP server like this:
const server = createServer();
server.listen(5000, '0.0.0.0', () =>
new SubscriptionServer({
execute,
subscribe,
schema,
onConnect: async (connectionParams) => {
},
}, {
server,
path: '/subscriptions',
}));
Beanstalk load balancing is configured like the following:
Port: TCP on port 80
Port: 80
Secure port: SSL on port 443 Secure
port: 443
Health: TCP pings on port 80
Cross zone load balancing is enabled
Connection draining is enabled with 200 seconds timeout
Searching around for information, I was just able to see that it's good to select TCP/SSL as protocol, but apart from that, it's not very clear how to configure WSS here.
Any suggestion will be appreciated!
Thanks.

Try adding this to your subscriptions location block
proxy_read_timeout 86400;

Related

Gateway timeout when adding username and password

I'm trying to add basic authentication to a nginx reverse proxy which is in front of a nuxtjs app.
I've configured nginx as so
server {
server_name <url>;
auth_basic "Restricted Content";
auth_basic_user_file /etc/nginx/.htpasswd;
gzip on;
gzip_types text/plain application/xml text/css application/javascript;
gzip_min_length 1000;
location / {
proxy_pass http://127.0.0.1:3222;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
listen 443 ssl; # managed by Certbot
But if hangs. I've also tried it in the location section, but this hangs too, what am I missing?
The .htpasswd file exists with the correct details in.
Have also tried changing
upstream backend {
server backend1.example.com weight=5;
server backend2.example.com:8080;
server unix:/tmp/backend3;
server backup1.example.com:8080 backup;
server backup2.example.com:8080 backup;
}
server {
location / {
proxy_pass http://backend;
}
}
to something similar to this, but still no luck
EDIT: I have gone through various posts and have adjusted a number of the nginx proxy_pass timeout settings, although I have had no successes.
upstream backend {
server 127.0.0.1:3222;
keepalive 7000;
}
and
auth_basic "Restricted Content";
auth_basic_user_file conf.d/.htpasswd;
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 7200s;
proxy_connect_timeout 75s;
proxy_buffer_size 8k;
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
}
The only error I can see is in my pm2 logs
ERROR Request failed with status code 401
at createError (node_modules/axios/lib/core/createError.js:16:15)
at settle (node_modules/axios/lib/core/settle.js:17:12)
at IncomingMessage.handleStreamEnd (node_modules/axios/lib/adapters/http.js:269:11)
at IncomingMessage.emit (events.js:327:22)
at endReadableNT (_stream_readable.js:1220:12)
| at processTicksAndRejections (internal/process/task_queues.js:84:21)
Thanks to all that have helped so far.

NGINX shows "bad gateway" when upstream server restart and not back to normal

Every time when I'm restart the upstream server, my NGINX shows "bad gateway" which is ok, but later, when the upstream server restarts nginx not recover automatically and I need to restart it (the nginx) manually.
Is there an option to make nginx to check every few seconds if the upstream backed to normal?
upstream core {
server core:3001;
}
server {
server_name core.mydomain.com corestg.mydomain.com www.core.mydomain.com;
#listen 80;
#listen [::]:80;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript;
gzip_proxied any;
#gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
listen 443 ssl http2;
listen [::]:443 ssl http2;
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;
server_tokens off;
ssl_certificate /etc/ssl/domain.crt;
ssl_certificate_key /etc/ssl/domain.rsa;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
location / {
proxy_ssl_session_reuse off;
proxy_pass http://core;
proxy_buffers 8 24k;
proxy_buffer_size 2k;
proxy_http_version 1.1;
proxy_ignore_headers X-Accel-Expires Expires Cache-Control;
proxy_ignore_headers Set-Cookie;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-NginX-Proxy true;
# proxy_set_header Host $http_host;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_cache_bypass $http_upgrade;
proxy_redirect off;
}
}
Seems that NGINX does not do the auto recovery by default.
Changing the config part from:
upstream core {
server core:3001;
}
to:
{
server core:3001 max_fails=1 fail_timeout=1s;
server core:3001 max_fails=1 fail_timeout=1s;
}
did the trick. the duplication is not mistake. Nginx tries to resolve the first line, on failure it will try the second one (circularly).
My setup to test NGINX:
Docker-Container simulating the backend exposing port 9002.
afd9551abc54 nginx "/docker-entrypoint.…" About a minute ago Up 11 seconds 0.0.0.0:9002->80/tcp laughing_pike
NGINX configuration
# Defined upstream block.
upstream backend {
server 127.0.0.1:9002;
}
#Main Server block
server {
listen 80;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
}
}
Stopping the container will result in 502 Bad Gateway. Starting the container without restarting / reloading NGINX sends the data to the upstream server. So basically that should just work!

Google App Engine Docker Container 502 Bad Gateway

I am trying to deploy my docker image to google app engine, I succfully mananged to build the image and push it to GCR. And deploy it using gcloud app deploy --image 'link-to-image-on-gcr'
But when accessing the application I'm getting a 502 bad gateway. I ssh into the server and checked the logs of the nginx container in docker and discovered the below log
2020/05/04 00:52:50 [error] 33#33: *127 connect() failed (111: Connection refused) while connecting to upstream, client: 74.125.24.153, server: , request: "GET /wp-login.php HTTP/1.1", upstream: "http://172.17.0.1:8080/wp-login.php", host: "myappengineservice-myrepo.ue.r.appspot.com"
By default, my docker image only has one container (its a Wordpress image), when deployed to app engine I suppose by default app engine will start my docker container within docker and expose the frontend via an Nginx proxy, so all the requests are routed through the Nginx proxy.
After playing around for a while, I edited the Nginx configuration file and came across this line
location / {
proxy_pass http://app_server;
I edited this a replaced it with my Wordpress docker containers internal IP address.
(proxy_pass http://172.17.0.6;)
And voila it seemed to have worked, and the requests are now been routed to my docker container.
This was obviously a temporary fix, how can I make this permanent and any idea on why this is happening?
app.yaml
runtime: custom
service: my-wordpress
env: flex
nginx.conf (inside the Nginx container)
daemon off;
worker_processes auto;
events {
worker_connections 4096;
multi_accept on;
}
http {
include mime.types;
server_tokens off;
variables_hash_max_size 2048;
# set max body size to 32m as appengine supports.
client_max_body_size 32m;
tcp_nodelay on;
tcp_nopush on;
underscores_in_headers on;
# GCLB uses a 10 minutes keep-alive timeout. Setting it to a bit more here
# to avoid a race condition between the two timeouts.
keepalive_timeout 650;
# Effectively unlimited number of keepalive requests in the case of GAE flex.
keepalive_requests 4294967295;
upstream app_server {
keepalive 192;
server gaeapp:8080;
}
geo $source_type {
default ext;
127.0.0.0/8 lo;
169.254.0.0/16 sb;
35.191.0.0/16 lb;
130.211.0.0/22 lb;
172.16.0.0/12 do;
}
map $http_upgrade $ws_connection_header_value {
default "";
websocket upgrade;
}
# ngx_http_realip_module gets the second IP address from the last of the X-Forwarded-For header
# X-Forwarded-For: [USER REQUEST PROVIDED X-F-F.]USER-IP.GCLB_IP
set_real_ip_from 0.0.0.0/0;
set_real_ip_from 0::/0;
real_ip_header X-Forwarded-For;
iap_jwt_verify off;
iap_jwt_verify_project_number 96882395728;
iap_jwt_verify_app_id my-project-id;
iap_jwt_verify_key_file /iap_watcher/iap_verify_keys.txt;
iap_jwt_verify_iap_state_file /iap_watcher/iap_state;
iap_jwt_verify_state_cache_time_sec 300;
iap_jwt_verify_key_cache_time_sec 43200;
iap_jwt_verify_logs_only on;
server {
iap_jwt_verify on;
# self signed ssl for load balancer traffic
listen 8443 default_server ssl;
ssl_certificate /etc/ssl/localcerts/lb.crt;
ssl_certificate_key /etc/ssl/localcerts/lb.key;
ssl_protocols TLSv1.2;
ssl_ciphers EECDH+AES256:!SHA1;
ssl_prefer_server_ciphers on;
ssl_session_timeout 3h;
proxy_pass_header Server;
gzip on;
gzip_proxied any;
gzip_types text/html text/plain text/css text/xml text/javascript application/json application/javascript application/xml application/xml+rss application/protobuf application/x-protobuf;
gzip_vary on;
# Allow more space for request headers.
large_client_header_buffers 4 32k;
# Allow more space for response headers. These settings apply for response
# only, not requests which buffering is disabled below.
proxy_buffer_size 64k;
proxy_buffers 32 4k;
proxy_busy_buffers_size 72k;
# Explicitly set client buffer size matching nginx default.
client_body_buffer_size 16k;
# If version header present, make sure it's correct.
if ($http_x_appengine_version !~ '(?:^$)|(?:^my-wordpress:20200504t053100(?:\..*)?$)') {
return 444;
}
set $x_forwarded_for_test "";
# If request comes from sb, lo, or do, do not care about x-forwarded-for header.
if ($source_type !~ sb|lo|do) {
set $x_forwarded_for_test $http_x_forwarded_for;
}
# For local health checks only.
if ($http_x_google_vme_health_check = 1) {
set $x_forwarded_for_test "";
}
location / {
proxy_pass http://app_server;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $server_name;
proxy_send_timeout 3600s;
proxy_read_timeout 3600s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $ws_connection_header_value;
proxy_set_header X-AppEngine-Api-Ticket $http_x_appengine_api_ticket;
proxy_set_header X-AppEngine-Auth-Domain $http_x_appengine_auth_domain;
proxy_set_header X-AppEngine-BlobChunkSize $http_x_appengine_blobchunksize;
proxy_set_header X-AppEngine-BlobSize $http_x_appengine_blobsize;
proxy_set_header X-AppEngine-BlobUpload $http_x_appengine_blobupload;
proxy_set_header X-AppEngine-Cron $http_x_appengine_cron;
proxy_set_header X-AppEngine-Current-Namespace $http_x_appengine_current_namespace;
proxy_set_header X-AppEngine-Datacenter $http_x_appengine_datacenter;
proxy_set_header X-AppEngine-Default-Namespace $http_x_appengine_default_namespace;
proxy_set_header X-AppEngine-Default-Version-Hostname $http_x_appengine_default_version_hostname;
proxy_set_header X-AppEngine-Federated-Identity $http_x_appengine_federated_identity;
proxy_set_header X-AppEngine-Federated-Provider $http_x_appengine_federated_provider;
proxy_set_header X-AppEngine-Https $http_x_appengine_https;
proxy_set_header X-AppEngine-Inbound-AppId $http_x_appengine_inbound_appid;
proxy_set_header X-AppEngine-Inbound-User-Email $http_x_appengine_inbound_user_email;
proxy_set_header X-AppEngine-Inbound-User-Id $http_x_appengine_inbound_user_id;
proxy_set_header X-AppEngine-Inbound-User-Is-Admin $http_x_appengine_inbound_user_is_admin;
proxy_set_header X-AppEngine-QueueName $http_x_appengine_queuename;
proxy_set_header X-AppEngine-Request-Id-Hash $http_x_appengine_request_id_hash;
proxy_set_header X-AppEngine-Request-Log-Id $http_x_appengine_request_log_id;
proxy_set_header X-AppEngine-TaskETA $http_x_appengine_tasketa;
proxy_set_header X-AppEngine-TaskExecutionCount $http_x_appengine_taskexecutioncount;
proxy_set_header X-AppEngine-TaskName $http_x_appengine_taskname;
proxy_set_header X-AppEngine-TaskRetryCount $http_x_appengine_taskretrycount;
proxy_set_header X-AppEngine-TaskRetryReason $http_x_appengine_taskretryreason;
proxy_set_header X-AppEngine-Upload-Creation $http_x_appengine_upload_creation;
proxy_set_header X-AppEngine-User-Email $http_x_appengine_user_email;
proxy_set_header X-AppEngine-User-Id $http_x_appengine_user_id;
proxy_set_header X-AppEngine-User-Is-Admin $http_x_appengine_user_is_admin;
proxy_set_header X-AppEngine-User-Nickname $http_x_appengine_user_nickname;
proxy_set_header X-AppEngine-User-Organization $http_x_appengine_user_organization;
proxy_set_header X-AppEngine-Version "";
add_header X-AppEngine-Flex-AppLatency $request_time always;
}
include /var/lib/nginx/extra/*.conf;
}
server {
# expose /nginx_status but on a different port (8090) to avoid
# external visibility / conflicts with the app.
listen 8090;
location /nginx_status {
stub_status on;
access_log off;
}
location / {
root /dev/null;
}
}
server {
# expose health checks on a different port to avoid
# external visibility / conflicts with the app.
listen 10402 ssl;
ssl_certificate /etc/ssl/localcerts/lb.crt;
ssl_certificate_key /etc/ssl/localcerts/lb.key;
ssl_protocols TLSv1.2;
ssl_ciphers EECDH+AES256:!SHA1;
ssl_prefer_server_ciphers on;
ssl_session_timeout 3h;
location = /liveness_check {
if ( -f /tmp/nginx/lameducked ) {
return 503 'lameducked';
}
if ( -f /var/lib/google/ae/unhealthy/sidecars ) {
return 503 'unhealthy sidecars';
}
if ( !-f /var/lib/google/ae/disk_not_full ) {
return 503 'disk full';
}
if ( -f /tmp/nginx/app_lameducked ) {
return 200 'ok';
}
return 200 'ok';
}
location = /readiness_check {
if ( -f /tmp/nginx/lameducked ) {
return 503 'lameducked';
}
if ( -f /var/lib/google/ae/unhealthy/sidecars ) {
return 503 'unhealthy sidecars';
}
if ( !-f /var/lib/google/ae/disk_not_full ) {
return 503 'disk full';
}
if ( -f /tmp/nginx/app_lameducked ) {
return 503 'app lameducked';
}
return 200 'ok';
}
}
# Add session affinity entry to log_format line i.i.f. the GCLB cookie
# is present.
map $cookie_gclb $session_affinity_log_entry {
'' '';
default sessionAffinity=$cookie_gclb;
}
# Output nginx access logs in the standard format, plus additional custom
# fields containing "X-Cloud-Trace-Context" header, the current epoch
# timestamp, the request latency, and "X-Forwarded-For" at the end.
# If you make changes to the log format below, you MUST validate this against
# the parsing regex at:
# GoogleCloudPlatform/appengine-sidecars-docker/fluentd_logger/managed_vms.conf
# (In general, adding to the end of the list does not require a change if the
# field does not need to be logged.)
log_format custom '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'tracecontext="$http_x_cloud_trace_context" '
'timestampSeconds="${msec}000000" '
'latencySeconds="$request_time" '
'x-forwarded-for="$http_x_forwarded_for" '
'uri="$uri" '
'appLatencySeconds="$upstream_response_time" '
'appStatusCode="$upstream_status" '
'upgrade="$http_upgrade" '
'iap_jwt_action="$iap_jwt_action" '
'$session_affinity_log_entry';
access_log /var/log/nginx/access.log custom;
error_log /var/log/nginx/error.log warn;
}
/etc/hosts (inside Nginx container)
root#f9c9cb5df8e2:/etc/nginx# cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.1 gaeapp
172.17.0.5 f9c9cb5df8e2
docker ps result
I was able to solve the issue by exposing my Wordpress site through port 8080 from my docker container, it was exposed through port 80 before. It does not make much sense but if anyone knows the roots cause, please do go ahead and explain.

How to redirect websocket session with an Nginx proxy to an IBM Bluemix webapp?

Here is my nginx.conf :
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log debug;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream websocket {
server myapp.onbluemix.net:80;
}
server {
listen 80;
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_pass http://websocket;
}
}
}
I can ping from nginx proxy server my webapp on bluemix.
Here the nginx's logs :
HTTP/1.1 500 Error
Server: nginx/1.13.3
Content-Type: text/xml
Transfer-Encoding: chunked
Connection: keep-alive
X-Backside-Transport: FAIL FAIL
I added the webapp's IP address to the nginx's hosts file just in case, same error.
Nginx proxy works if i run the webapp on my local machine.
I don't understand that assertion X-Backside-Transport: FAIL FAIL
What's wrong ?
It seems the internal IBM Proxy is involved, you have to add the host you want to reach in the header (proxy_set_header host myapp.onbluemix.net;) like:
location / {
proxy_http_version 1.1;
proxy_set_header host myapp.onbluemix.net;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_pass http://websocket;
}

Nginx Reverse Proxy Websocket Authentication - HTTP 403

I'm using Nginx as a reverse proxy of a Spring boot application. I also use Websockets with sockjs and stomp messages.
Here is the context configuration.
<websocket:message-broker application-destination-prefix="/app">
<websocket:stomp-endpoint path="/localization" >
<websocket:sockjs/>
</websocket:stomp-endpoint>
<websocket:simple-broker prefix="/topic" />
</websocket:message-broker>
Here is the client code:
var socket = new SockJS(entryPointUrl);
var stompClient = Stomp.over(socket);
var _this = this;
stompClient.connect({}, function () {
stompClient.subscribe('/app/some-url', function (message) {
// do some stuff
});
});
I also you Spring Security to protect some content.
#Configuration
#Order(4)
public static class FrontendSecurityConfig extends WebSecurityConfigurerAdapter {
#Override
protected void configure(HttpSecurity http) throws Exception {
http.authorizeRequests()
.antMatchers("/js/**", "/css/**", "/webjars/**").permitAll()
.anyRequest().authenticated()
.and()
.formLogin().loginPage("/login").permitAll()
.and()
.logout().permitAll();
}
}
Everything works great, expect when I run this application behind a Nginx reverse proxy. Here is the reverse configuration:
proxy_pass http://testsysten:8080;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# WebSocket support (nginx 1.4)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
# Max body size
client_max_body_size 10M;
The connection always fails with a HTTP 403 code.
I'm using version 1.9.7.
Do you have any idea, why the client does not gets authenticated?
I know similar questions, like this one but the solutions do not work at all.
Update
I managed to run the application over HTTP. I need to pass the CSRF token in the Nginx configuration. New configuration is:
proxy_pass http://testsysten:8080;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Pass the csrf token (see https://de.wikipedia.org/wiki/Cross-Site-Request-Forgery)
# Default in Spring Boot
proxy_pass_header X-XSRF-TOKEN;
# WebSocket support (nginx 1.4)
proxy_http_version 1.1;
Only missing is redirect over HTTPS. In the Spring logs is see following entry:
o.s.w.s.s.t.h.DefaultSockJsService - Processing transport request: GET http://testsystem:80/localization/226/3mbmu212/websocket
Seems like Nginx Proxy needs to rewrite the to the right port.
I solved the problem by myself. Basically, Nginx needs to pass some additional header values if you want to use Websocket and Spring Security. The following lines need to be added to location section in your Nginx config:
# Pass the csrf token (see https://de.wikipedia.org/wiki/Cross-Site-Request-Forgery)
# Default in Spring Boot and required. Without it nginx suppresses the value
proxy_pass_header X-XSRF-TOKEN;
# Set origin to the real instance, otherwise a of Spring security check will fail
# Same value as defined in proxy_pass
proxy_set_header Origin "http://testsysten:8080";
The accepted solution did not work for me although I was using a very classical HTTPS configuration:
server {
listen 443 ssl;
location /ws {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:8888;
}
...
The problem is that Spring checks the origin and specifically that code was causing me trouble:
// in org.springframework.web.util.UriComponentsBuilder.adaptFromForwardedHeaders(HttpHeaders):
if ((this.scheme.equals("http") && "80".equals(this.port)) ||
(this.scheme.equals("https") && "443".equals(this.port))) {
this.port = null;
}
In that code the scheme is 'http' and the port is 8888, which is not discarded because it is not the standard port.
The browser however hits https://myserver/ and the 443 port is omitted because it is the default HTTPS one.
Therefore the ports do not match (empty != 8888) and origin check fails.
Either you can disable origin checks in Spring WebSockets:
registry.addHandler( resgisterHandler(), "/ws" ).setAllowedOrigins( "*" );
or (probably safer) you can add the scheme and port to the NGINX proxy configuration:
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
If you are interested, those headers are read in
org.springframework.web.util.UriComponentsBuilder.adaptFromForwardedHeaders(HttpHeaders)
For Spring Boot 2.2.2+
Starting with Spring Boot version 2.2.2 you should be adding following setting for these X-Forwarded-* headers to be taken into account:
server.forward-headers-strategy=native
(in application.properties for instance)
I had faced a similar problem. I was unable to use the basic Spring Security authentication with NGINX. Apart from setting the proxy_pass_header X-XSRF-TOKEN;, I also had to set underscores_in_headers on;, since NGINX by default does not allow headers with underscores and the CSRF token is named _csrf.
So my final configuration file looked like this:
server {
underscores_in_headers on;
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /usr/share/nginx/html;
index index.html index.htm;
# Make site accessible from http://localhost/
server_name localhost;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
try_files $uri $uri/ =404;
# Uncomment to enable naxsi on this location
# include /etc/nginx/naxsi.rules
}
location /example/ {
proxy_pass_header X-XSRF-TOKEN;
proxy_pass http://localhost:8080/;
}
}
I solved this problem without CSRF header in NGINX proxy.
My stack: spring-boot, spring-security (with redis session store), spring-boot-websocket with default STOMP implementation, NGINX to serve frontend and proxied to another services that frontend consume.
In first time I use the default configuration show in the NGINX Blog here and here (copy and paste for history):
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream websocket {
server 192.168.100.10:8010;
}
server {
listen 8020;
location / {
proxy_pass http://websocket;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
}
But dont work, still 403 Forbidden.
I fixed this issue with the configuration below (the real important part to fix websocket is # WebSocket Proxy):
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
server {
listen 30010;
server_name localhost;
client_max_body_size 10M;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
# Backend API Proxy
location /api {
proxy_pass http://192.168.0.100:30080;
proxy_set_header Host $http_host;
proxy_set_header Access-Control-Allow-Origin 192.168.0.100;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
rewrite ^/api/?(.*) /$1 break;
proxy_redirect off;
}
# CDN Proxy
location ~ ^/cdn/(.*) {
proxy_pass http://192.168.0.110:9000;
rewrite ^/cdn/(.*) /$1 break;
}
# This is the configuration that fix the problem with WebSocket
# WebSocket Proxy
location /ws {
proxy_pass http://192.168.0.120:30090;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header Access-Control-Allow-Origin 192.168.0.120;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
}
}
}
In my case (Spring Boot app), in addition to setting the Origin header as specified in the accepted answer, I had to set the Host header to match the ip:port of the Origin header, or to get rid of it altogether.
This is my working vhost config:
server {
listen 443 ssl;
listen [::]:443 ssl;
ssl_certificate /etc/ssl/certs/<your-cert-file>.pem;
ssl_certificate_key /etc/ssl/private/<your-key-file>.key;
server_name <your-server-fqdn>;
access_log /var/log/nginx/<your-server-fqdn>.access.log;
error_log /var/log/nginx/<your-server-fqdn>.error.log error;
root /srv/www/<your-server-fqdn>;
index index.html index.html;
location / {
try_files $uri $uri/ /index.html;
}
location /api {
proxy_pass http://127.0.0.1:8080/v1;
}
location /async-api {
proxy_pass http://127.0.0.1:8080/stomp;
proxy_http_version 1.1;
# either set Host header as follows or get rid of the directive altogether
#proxy_set_header Host "127.0.0.1:8080";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
# Set origin to the real instance, otherwise a of Spring security check will fail
# Same value as defined in proxy_pass
proxy_set_header Origin "http://127.0.0.1:8080";
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /admin-api {
proxy_pass http://127.0.0.1:8080/api;
}
}

Resources