Fastly error when reverse proxying - nginx

I am trying to setup a caching reverse proxy for Contentful CDN using Nginx
my configuration is as follows:
http {
proxy_cache_path /my/cache levels=1:2 keys_zone=STATIC:10m inactive=24h max_size=1g;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 443 ssl;
server_name my.domain.com;
server_tokens off;
root /my/www/client;
index index.html;
try_files $uri $uri/ /index.html;
auth_basic "Private Property";
auth_basic_user_file /pass/.htpasswd;
ssl_certificate /my/ssl/cert.crt;
ssl_certificate_key /my/ssl/key.key;
ssl_dhparam /etc/ssl/certs/LS-dhparam.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECD$
location /spaces {
index index.html index.htm;
charset utf-8;
proxy_pass http://cdn.contentful.com/spaces/spaceId/entries?access_token=accessTokenValue;
proxy_ignore_headers Cache-Control Expires;
proxy_cache STATIC;
proxy_cache_valid 200 10m;
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_For;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_connect_timeout 90s;
proxy_send_timeout 90s;
proxy_read_timeout 90s;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
send_timeout 90s;
}
}
}
Where of course the spaceId and accessTokenValue are the appropriate ones
I then trigger a request on https://my.domain.com/spaces/&content_type=myContent&locale=en
Which returns this error: Fastly error: unknown domain: my.domain.com. Please check that this domain has been added to a service.
It works when I try to use the full url being: http://cdn.contentful.com/spaces/spaceId/entries?access_token=accessTokenValue&content_type=myContent&locale=en
Is there something wrong with my nginx configuration or is there something from contentful's side?

You need to remove
proxy_set_header Host $host;
For this to work. Because when you proxy_pass to proxy_pass http://cdn.contentful.com/spaces/spaceId/entries?access_toke‌​n=accessTokenValue&$‌​query_string, you want the host name to remain as cdn.contentful.com and not change it

Related

Nginx errors on some browser

I have some problems to call an API endpoint from my server.
I receive an error 500 and the page content says
The page you are looking for is temporarily unavailable.
Please try again later.
But this problem occurs only on some browser/computer, it works normally on others
For example, I have this problem on Firefox and it works on Chrome
and one of my customer has the error on Chrome.
On the server, I have found this error log (/nginx/localhost):
94.xxx.xxx.xxx:- - - [05/Oct/2022:11:10:33 +0000] "POST /api/auth/login HTTP/3" 500 383 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:104.0) Gecko/20100101 Firefox/104.0" "-" "www.mydomain.com"
This server is hosted on a Jelastic platform and it's used as a load balancer to redirect traffic on other servers.
My nginx config file is (I have masked IPs and domain) :
######## HTTP SECTION PROTOTYPE ########
http {
server_tokens off ;
include /etc/nginx/mime.types;
default_type application/octet-stream;
set_real_ip_from 192.xxx.xxx.xxx/16;
set_real_ip_from 10.xxx.xxx.xxx/8;
set_real_ip_from 172.xxx.xxx.xxx/16;
real_ip_header X-Forwarded-For;
real_ip_recursive on;
log_format main '$remote_addr:$http_x_remote_port - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'"$host" sn="$server_name" '
'rt=$request_time '
'ua="$upstream_addr" us="$upstream_status" '
'ut="$upstream_response_time" ul="$upstream_response_length" '
'cs=$upstream_cache_status' ;
client_header_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
client_max_body_size 100m;
proxy_read_timeout 300s;
connection_pool_size 256;
client_header_buffer_size 1k;
large_client_header_buffers 4 32k; # 8k to 32k
request_pool_size 4k;
#Allow large token
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
#proxy_buffering off;
# gzip on;
gzip_min_length 1100;
gzip_buffers 4 8k;
gzip_types text/plain;
output_buffers 1 32k;
postpone_output 1460;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 75 20;
ignore_invalid_headers on;
### UPSTREAMS LIST FOLLOWS HERE ###
#upstream nodes{ server XXX.XXX.XXX.XXX; server 127.0.0.1:8001 backup # UPSTREAMPROTO # This is upstream prototype line, do not remove this! }
#This config is auto-generated. DO NOT modify the weight property. If changing the rest of settings, please, remember that you are doing this at your own risk.
upstream common { check interval=30000 rise=2 fall=5 timeout=10000 default_down=false type=http; check_http_send "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n"; keepalive 100; server mydomain.jcloud-ver-jpe.ik-server.com weight=100 ; } ### UPSTREAMPROTO for common ###
#GFADMIN
server {
listen *:80;
listen [::]:80;
server_name _;
access_log /var/log/nginx/localhost.access_log main;
error_log /var/log/nginx/localhost.error_log info;
proxy_temp_path /var/nginx/tmp/;
proxy_connect_timeout 5s;
error_page 500 502 503 504 /50x.html;
proxy_next_upstream error timeout http_500;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Host $http_host;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Remote-Port $http_x_remote_port;
proxy_set_header X-URI $request_uri;
proxy_set_header X-ARGS $args;
proxy_set_header Refer $http_refer;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
if ($http_x_remote_port = '' ) {
set $http_x_remote_port $remote_port;
}
location = /50x.html {
root html;
}
location / {
proxy_pass http://common;
}
#USERLOCATIONS
}
# server {
# listen *:8001;
# server_name backup.local;
#
# location / {
# proxy_pass http://default_upstream;
# add_header Set-Cookie "SRVGROUP=$group; path=/; HttpOnly";
# proxy_http_version 1.1;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Host $http_host;
# proxy_set_header X-Forwarded-For $http_x_forwarded_for;
# proxy_set_header X-URI $request_uri;
# proxy_set_header X-ARGS $args;
# proxy_set_header Refer $http_refer;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "upgrade";
# }
# }
include /etc/nginx/conf.d/*.conf;
}
######## TCP SECTION PROTOTYPE ########
It's the first time I look at nginx config file, any advice or idea are welcome :)

NGINX: How do I remove a port when performing a reverse proxy?

I have an Nginx reverse proxy set up which is being used as an SSL offload for several servers such as confluence. I've got it successfully working for taking http://confluence and https://confluence but when I try to redirect http://confluence:8090, it tries to go to https://confluence:8090 and fails.
How can I remove the port from the URL?
The config below is a bit trimmed but maybe helpful? Is the $server_port bit in the headers causing the problem?
server {
listen 8090;
server_name confluence;
return 301 https://confluence$request_uri;
}
server {
listen 443 ssl http2;
server_name confluence;
location / {
proxy_http_version 1.1;
proxy_pass http://confbackend:8091
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $server_name:$server_port;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade; #WebSocket Support
proxy_set_header Connection $connection_upgrade; #WebSocket Support
}
}
Seems like a lot of answers here involve http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect but I find no solace in that confusing mess.
I also would have thought you'd have a single server but I was trying the advice from https://serverfault.com/questions/815797/nginx-rewrite-to-new-protocol-and-port
I tried messing with the port_in_redirect off; option but maybe I was using it wrong?
EDIT 1: Add conf files
The files below are modifications from the Artifactory nginx setup. I used their setup initially and added additional conf files (in ./conf.d/) for other RP endpoints.
Confluence.conf
server {
listen 8090 ssl http2;
server_name confluence.domain.com confluence;
## return 301 https://confluence.domain.com$request_uri;
proxy_redirect https://confluence.domain.com:8090 https://confluence.domain.com;
}
server {
## add ssl entries when https has been set in config
ssl_certificate /data/rpssl/confluence.pem;
ssl_certificate_key /data/rpssl/confluence_unencrypted.key;
## server configuration
listen 443 ssl http2;
server_name confluence.domain.com confluence;
add_header Strict-Transport-Security max-age=31536000;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/confluence-access.log timing;
error_log /var/log/nginx/confluence-error.log;
client_max_body_size 0;
proxy_read_timeout 1200;
proxy_connect_timeout 240;
location / {
proxy_http_version 1.1;
proxy_pass http://backendconfluence.domain.com:8091;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $server_name:$server_port;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Upgrade $http_upgrade; # WebSocket Support
proxy_set_header Connection $connection_upgrade; # WebSocket support
}
}
nginx.conf
# Main Nginx configuration file
worker_processes 4;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
worker_rlimit_nofile 4096;
events {
worker_connections 2048;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
map $http_upgrade $connection_upgrade { #WebSocket support
default upgrade;
'' '';
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format timing 'ip = $remote_addr '
'user = \"$remote_user\" '
'local_time = \"$time_local\" '
'host = $host '
'request = \"$request\" '
'status = $status '
'bytes = $body_bytes_sent '
'upstream = \"$upstream_addr\" '
'upstream_time = $upstream_response_time '
'request_time = $request_time '
'referer = \"$http_referer\" '
'UA = \"$http_user_agent\"';
access_log /var/log/nginx/access.log timing;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
Your problem is the STS header
add_header Strict-Transport-Security max-age=31536000;
When you add the STS header. The first request to http://example.com:8090 generates a redirect to https://example.com
This https://example.com then returns the STS header in the response and the browser remembers the example.com always needs to be served on https no matter what. The port doesn't make a difference
Now when you make another request to http://example.com:8090, STS kicks in and then converts it to https://example.com:8090, which is your problem here
Because a port can only serve http or https, you can't use 8090 to redirect http to https AND redirect https 8090 to https 443

Direct IP access not allowed cloudflare in nginx proxy server

I have one site in openshift rhc ,So i want to make one reserve Proxy with nginx software in Openshift Rhc servers, which from this server have access to many other server with nginx so i configured my nginx server by this kind of configuration :
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
error_log {{OPENSHIFT_HOMEDIR}}/app-root/logs/nginx_error.log debug;
pid {{NGINX_DIR}}/logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
#access_log $OPENSHIFT_DIY_LOG_DIR/access.log main;
port_in_redirect off;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 165;
gzip on;
upstream frontends {
#server pr4ss.tk;
#server 222.66.115.233:80 weight=1;
server {{OPENSHIFT_INTERNAL_IP}}:8081 ;
}
upstream frontends2 {
server google.com;
#server 222.66.115.233:80 weight=1;
#server {{OPENSHIFT_INTERNAL_IP}}:8081 ;
}
upstream index {
server free-papers.elasa.ir weight=1;
server diy4tornado-tornado4ss.rhcloud.com weight=2;
}
upstream comment {
server vb2-fishsmarkets.rhcloud.com;
#server community.elasa.ir;
}
server {
listen {{OPENSHIFT_INTERNAL_IP}}:{{OPENSHIFT_INTERNAL_PORT}};
server_name {{OPENSHIFT_GEAR_DNS}} www.{{OPENSHIFT_GEAR_DNS}};
root {{OPENSHIFT_REPO_DIR}};
set_real_ip_from {{OPENSHIFT_INTERNAL_IP}};
real_ip_header X-Forwarded-For;
#charset koi8-r;
#access_log logs/host.access.log main;
location /main {
root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
try_files $uri $uri/ =404;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
}
location ~* ^/(.*) {
#proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
#proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ http://diy-elasa2.rhcloud.com/;
#proxy_pass http://comment/$1$is_args$args;
proxy_pass http://index/$1$is_args$args;
}
location ^~ /admincp {
if (!-f $request_filename) {
rewrite ^/admincp/(.*)$ /index.php?routestring=admincp/$1 last;
}
proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ http://diy-elasa2.rhcloud.com/;
proxy_pass http://comment/$1$is_args$args;
}
location /www {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends;
}
location /categories {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends2;
}
location /index {
#root {{OPENSHIFT_REPO_DIR}};
index index.html index.htm;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
# an HTTP header important enough to have its own Wikipedia entry:
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# enable this if you forward HTTPS traffic to unicorn,
# this helps Rack set the proper URL scheme for doing redirects:
# proxy_set_header X-Forwarded-Proto $scheme;
# pass the Host: header from the client right along so redirects
# can be set properly within the Rack application
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
# set "proxy_buffering off" *only* for Rainbows! when doing
# Comet/long-poll/streaming. It's also safe to set if you're using
# only serving fast clients with Unicorn + nginx, but not slow
# clients. You normally want nginx to buffer responses to slow
# clients, even with Rails 3.1 streaming because otherwise a slow
# client can become a bottleneck of Unicorn.
#
# The Rack application may also set "X-Accel-Buffering (yes|no)"
# in the response headers do disable/enable buffering on a
# per-response basis.
# proxy_buffering off;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 10;
proxy_send_timeout 5;
proxy_read_timeout 3600;
proxy_buffer_size 4k;
proxy_buffers 4 132k;
proxy_busy_buffers_size 264k;
proxy_temp_file_write_size 164k;
proxy_pass http://index;
#proxy_set_header Authorization base64_encoding_of_"user:password";
#proxy_pass_header Server;
proxy_set_header Host $http_host;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
location ~ \.php$ {
root html;
fastcgi_pass {{OPENSHIFT_INTERNAL_IP}}:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
include fastcgi_params;
}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443;
# server_name localhost;
# ssl on;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_timeout 5m;
# ssl_protocols SSLv2 SSLv3 TLSv1;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
I used Upstream index:
upstream index {
server free-papers.elasa.ir weight=1;
server diy4tornado-tornado4ss.rhcloud.com weight=2;
}
but when i use this configuration ( with cloudflare or rhc server) i get this error from cloudflare:
Direct IP access not allowed cloudflare
So what is proper configuration for nginx or Apache server software for this kind of servers.
Thanks a lot for your attentions.
i found the answer:
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain application/xml application/javascript text/javas$
gzip_disable "msie6";
gzip_http_version 1.1;
upstream comment {
#server vb-fishsmarkets.rhcloud.com;
#server vb.elasa.ir;
#server vb-elasa3.rhcloud.com ;
#server vb2-fishsmarkets.rhcloud.com;
#server forums.fishsmarket.tk;
#server community.elasa.ir;
#server free-papers.elasa.ir;
#server diy4tornado-tornado4ss.rhcloud.com weight=1;
server free-papers.elasa.ir weight=1;
}
limit_req_zone $binary_remote_addr zone=one:10m rate=30r/m;
limit_req_zone $binary_remote_addr zone=one2:10m rate=1r/m;
limit_req_zone $http_x_forwarded_for zone=one3:10m rate=1r/m;
proxy_cache_path /tmp levels=1:2 keys_zone=RUBYGEMS:10m
inactive=24h max_size=1g;
server {
listen 127.6.145.1:8080;
server_name diy-elasa2.rhcloud.com community.elasa.ir ;
#charset koi8-r;
and :
location ~* ^/(.*) {
#root html;
#index index.html index.htm;
#proxy_set_header Host vb2-fishsmarkets.rhcloud.com;
proxy_set_header Host free-papers.elasa.ir;
#proxy_redirect http://vb2-fishsmarkets.rhcloud.com/ h$
#proxy_redirect http://fm.elasa.ir/ http://community.e$
proxy_pass http://comment/$1$is_args$args;
proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header X-Scheme $scheme;
sub_filter 'http://fm.elasa.ir/' 'http://community.el$
sub_filter_once on;
proxy_set_header X-outside-url $scheme://$host;
#proxy_set_header X-Real-IP $remote_addr;
proxy_cache RUBYGEMS;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout invalid_header upd$
http_500 http_502 http_503 http_504;
proxy_http_version 1.1;
proxy_cache RUBYGEMS;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout invalid_header upd$
http_500 http_502 http_503 http_504;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_cache_bypass $http_upgrade;
proxy_set_header X-NginX-Proxy true;
proxy_redirect off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded$
client_max_body_size 100M;
client_body_buffer_size 1m;
proxy_intercept_errors on;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 256 16k;
proxy_busy_buffers_size 256k;
proxy_temp_file_write_size 256k;
#proxy_max_temp_file_size 0;
proxy_read_timeout 300;
}
Important things is proxy header Host:
proxy_set_header Host free-papers.elasa.ir;

Status 405 - docker - artifactory

I get the following error when pushing images to docker artifactory:
v1:
Error: Status 405 trying to push repository abc-docker-local: "{\n \"errors\" : [ {\n \"status\" : 405,\n \"message\" : \"Method Not Allowed\"\n } ]\n}"
I use nginx. My config file looks like:
server_name localserver;
rewrite ^ remoteserver/artifactory/api/docker/abc-docker-local/v1 redirect;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
location / {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://remoteserver:9081/artifactory/api/docker/abc-docker-local/v1;
proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
In Artifactory I set the repo as v1. I also tested with v2 but no end points are returned. The exact error for v2 is:
Index response didn't contain any endpoints
thanks in advance
If the result is blank you must have not inserted all of the required configuration - here's an example from my sandbox:
At any rate, this is the config that Artifactory generated which I use which works for me with Docker 1.10 (but also worked with Docker 1.8 and 1.9) installed on a remote Ubuntu machine while my Artifactory sandbox is my machine.
Note the certificates location I have which is not common (certs\myCert.cert) so change it to wherever your SSL certs are, and also that i'm using ports 4441 and 4442 for V1 and V2 respectively:
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
## add ssl entries when https has been set in config
ssl_certificate certs\myCert.cert;
ssl_certificate_key certs\myCert.cert;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
listen 443 ssl;
listen 9091 ;
server_name localhost;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
## access_log /var/log/nginx/localhost-access.log timing;
## error_log /var/log/nginx/localhost-error.log;
rewrite ^/$ /artifactory/webapp/ redirect;
rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect;
location /artifactory/ {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://localhost:8080/artifactory/;
proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
## server configuration
server {
listen 4441 ssl;
server_name localhost;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
## access_log /var/log/nginx/localhost-access.log timing;
## error_log /var/log/nginx/localhost-error.log;
rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/docker-local-v1/$1/$2;
client_max_body_size 0;
chunked_transfer_encoding on;
location /artifactory/ {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://localhost:8080/artifactory/;
proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
## server configuration
server {
listen 4442 ssl;
server_name localhost;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
## access_log /var/log/nginx/localhost-access.log timing;
## error_log /var/log/nginx/localhost-error.log;
rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/docker-local-v2/$1/$2;
client_max_body_size 0;
chunked_transfer_encoding on;
location /artifactory/ {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://localhost:8080/artifactory/;
proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
}

502 Bad Gateway Error Nginx connect() to unix:/tmp/unicorn.sock failed

This is my first question on Stack Overflow. I could not find a solution to this while searching the web.
I am working on an OpenBSD server running Nginx, which I did not setup, that is throwing a 502 Bad Gateway error in the browser. The Nginx error log shows this error:
014/04/29 09:43:49 [error] 5236#0: *263 connect() to unix:/tmp/unicorn.sock failed (61: Connection refused) while connecting to upstream, client: ###.###.###.###, server: , request: "GET / HTTP/1.1", upstream: "http://unix:/tmp/unicorn.sock:/", host: "sub.domain.com"
Here is the contents of the nginx.conf
user _nginx;
worker_processes 2;
pid /var/run/nginx.pid;
error_log /var/log/httpd.err debug;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format access '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent $bytes_sent '
'"$http_referer" "$http_user_agent" "$sent_http_content_type"';
access_log /var/log/httpd.log access;
upload_progress proxied 1m;
sendfile on;
server_name_in_redirect off;
client_body_timeout 120;
client_header_timeout 120;
keepalive_timeout 20;
send_timeout 120;
upstream mongrel {
server unix:/tmp/unicorn.sock;
}
gzip on;
gzip_vary on;
gzip_min_length 0;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_proxied any;
gzip_types text/plain text/javascript text/css text/stylesheet application/x-javascript application/javascript;
gzip_disable "MSIE [1-6]\.";
client_max_body_size 128000M;
client_body_buffer_size 512k;
ssl_session_timeout 60m;
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:1m;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/private/server.key;
server {
listen 80;
listen [::]:80;
listen 443 default ssl;
listen [::]:443 default ssl;
charset utf-8;
root /var/sfta/current/public;
location ~* ^/(message\/create|dropbox\/create|attachment\/create|attachments) {
upload_pass #internal_upload;
upload_resumable on;
upload_pass_args on;
upload_store /var/data/tmp;
upload_state_store /var/data/tmp/resume;
upload_store_access user:rw group:rw all:rw;
chunked_transfer_encoding on;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_Forwarded_Proto $scheme;
if ($request_method != POST) {
proxy_pass http://mongrel;
break;
}
# Set specified fields in request body
upload_set_form_field $upload_field_name.name "$upload_file_name";
upload_set_form_field $upload_field_name.content_type "$upload_content_type";
upload_set_form_field $upload_field_name.path "$upload_tmp_path";
upload_aggregate_form_field "$upload_field_name.sha1" "$upload_file_sha1";
upload_aggregate_form_field "$upload_field_name.size" "$upload_file_size";
upload_max_output_body_len 0;
upload_pass_form_field "^authenticity_token$|message|dropbox";
upload_cleanup 400 404 499 500-505;
# track uploads in the 'proxied' zone
# remember connections for 30s after they finished
track_uploads proxied 120s;
}
location ~* ^/(send|messages) {
rewrite ^(.*)$ /message redirect;
}
location / {
proxy_pass http://mongrel;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_Forwarded_Proto $scheme;
proxy_read_timeout 120;
proxy_connect_timeout 120;
# track uploads in the 'proxied' zone
# remember connections for 30s after they finished
track_uploads proxied 120s;
}
location #internal_upload {
proxy_pass http://mongrel;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X_Forwarded_Proto $scheme;
proxy_read_timeout 120;
proxy_connect_timeout 120;
}
location ~* ^/send {
rewrite ^(.*)$ /message redirect;
}
location ^~ /files/ {
alias /var/data/files/;
chunked_transfer_encoding on;
post_action #protected_done;
if_modified_since off;
gzip off;
internal;
}
location #protected_done {
internal;
proxy_pass http://mongrel;
proxy_set_header RateBytes $body_bytes_sent;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass_request_body off;
proxy_pass_request_headers off;
}
location ^~ /images/custom {
alias /var/data/assets/images;
}
location ~* /(javascripts|stylesheets|images)/.*\.(ico|css|gif|js|jp?g|png)(\?[0-9]+)?$ {
access_log off;
expires 1w;
break;
}
location = /favicon.ico {
alias /var/data/assets/images/favicon.ico;
access_log off;
}
location = /alive {
access_log off;
return 200;
}
location ^~ /progress {
access_log off;
report_uploads proxied;
upload_progress_json_output;
}
error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /500.html;
}
}
I have very little experience with Nginx. Any help is greatly appreciated.

Resources