Nginx on kubernetes not serving static content - nginx

I have Nginx Open Source on AKS service. Every thing was good but unable to serve static content like index.html or favicon.ico.
When I open http:// it is not serving the index.html by default[i get 404] and if I try to open any static content I get 404 error.
nginx configuration was passed as ConfigMap and below is the config file that talks about serving static content.
server {
listen 80;
server_name localhost;
root /opt/abc/html; #also tried root /opt/abc/html/
location / {
root /opt/abc/html; #also tried root /opt/abc/html/
index index.html;
try_files $uri $uri/ /index.html?$args;
...
...
..
proxy_pass http://tomcat;
}
}
Setup:
Kubernetes on AKS
Nginx Open Source [no ingress]
configMaps to mount config.d
the static content (/opt/abc/html) was passed into pod with kubernetes cp command. [will this work?]

ref: https://github.com/RammusXu/toolkit/tree/master/k8s/echoserver
Here's a example to mount nginx.conf from ConfigMap
And make sure you kubectl rollout restart deployment echoserver after update ConfigMap. Pod only clone ConfigMap when it created. It don't sync or auto-updated.
apiVersion: apps/v1
kind: Deployment
metadata:
name: echoserver
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: echoserver
template:
metadata:
labels:
app: echoserver
spec:
volumes:
- name: config
configMap:
name: nginx-config
containers:
- name: echoserver
# image: gcr.io/google_containers/echoserver:1.10
image: openresty/openresty:1.15.8.2-1-alpine
ports:
- containerPort: 8080
name: http
# nginx.conf override
volumeMounts:
- name: config
subPath: nginx.conf
# mountPath: /etc/nginx/nginx.conf
mountPath: /usr/local/openresty/nginx/conf/nginx.conf
readOnly: true
---
apiVersion: v1
kind: Service
metadata:
name: echoserver
namespace: default
spec:
type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: echoserver
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
namespace: default
data:
nginx.conf: |-
events {
worker_connections 1024;
}
env HOSTNAME;
env NODE_NAME;
env POD_NAME;
env POD_NAMESPACE;
env POD_IP;
http {
default_type 'text/plain';
# maximum allowed size of the client request body. By default this is 1m.
# Request with bigger bodies nginx will return error code 413.
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
client_max_body_size 10m;
# https://blog.percy.io/tuning-nginx-behind-google-cloud-platform-http-s-load-balancer-305982ddb340
keepalive_timeout 650;
keepalive_requests 10000;
# GZIP support
gzip on;
gzip_min_length 128;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/css
text/plain
text/javascript
application/javascript
application/json
application/x-javascript
application/xml
application/xml+rss
application/xhtml+xml
application/x-font-ttf
application/x-font-opentype
application/vnd.ms-fontobject
image/svg+xml
image/x-icon
application/rss+xml
application/atom_xml
application/vnd.apple.mpegURL
application/x-mpegurl
vnd.apple.mpegURL
application/dash+xml;
init_by_lua_block {
local template = require("template")
-- template syntax documented here:
-- https://github.com/bungle/lua-resty-template/blob/master/README.md
tmpl = template.compile([[
Hostname: {{os.getenv("HOSTNAME") or "N/A"}}
Pod Information:
{% if os.getenv("POD_NAME") then %}
node name: {{os.getenv("NODE_NAME") or "N/A"}}
pod name: {{os.getenv("POD_NAME") or "N/A"}}
pod namespace: {{os.getenv("POD_NAMESPACE") or "N/A"}}
pod IP: {{os.getenv("POD_IP") or "N/A"}}
{% else %}
-no pod information available-
{% end %}
Server values:
server_version=nginx: {{ngx.var.nginx_version}} - lua: {{ngx.config.ngx_lua_version}}
Request Information:
client_address={{ngx.var.remote_addr}}
method={{ngx.req.get_method()}}
real path={{ngx.var.request_uri}}
query={{ngx.var.query_string or ""}}
request_version={{ngx.req.http_version()}}
request_scheme={{ngx.var.scheme}}
request_uri={{ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri}}
Request Headers:
{% for i, key in ipairs(keys) do %}
{{key}}={{headers[key]}}
{% end %}
Request Body:
{{ngx.var.request_body or " -no body in request-"}}
]])
}
server {
# please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1
# basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT
# socket option), allowing a kernel to distribute incoming connections between worker processes.
listen 8080 default_server reuseport;
listen 8443 default_server ssl http2 reuseport;
ssl_certificate /certs/certificate.crt;
ssl_certificate_key /certs/privateKey.key;
# Replace '_' with your hostname.
server_name _;
location / {
lua_need_request_body on;
content_by_lua_block {
ngx.header["Server"] = "echoserver"
local headers = ngx.req.get_headers()
local keys = {}
for key, val in pairs(headers) do
table.insert(keys, key)
end
table.sort(keys)
ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers}))
}
}
}
}

Related

Nginx Ingress session-cookie-expires doesn't work in kubernetes

Deployed application on Azure and kubernetes verison is 1.19.6 and nginx-ingress-controller version is 0.27.1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
namespace: qas
name: ingress
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/ingress.allow-http: "false"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/http2-push-preload: "true"
nginx.ingress.kubernetes.io/affinity: "cookie"
nginx.ingress.kubernetes.io/affinity-mode: "persistent"
nginx.ingress.kubernetes.io/upstream-fail-timeout: "300"
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
# Legacy: for compatibilty with older browsers: https://kubernetes.github.io/ingress-nginx/examples/affinity/cookie/
nginx.ingress.kubernetes.io/session-cookie-name: "INGRESSCOOKIE"
nginx.ingress.kubernetes.io/session-cookie-expires: "3600"
nginx.ingress.kubernetes.io/session-cookie-max-age: "3600"
#-----project specific-----#
nginx.ingress.kubernetes.io/app-root: "/welcome"
#----No ip whitelist for storefront, we fully depend on NSG rules in both D/Q/P-----#
nginx.ingress.kubernetes.io/server-snippet: |
# maintanance page
#rewrite ^(.*)$ https://www.maintenance.bosch.com/ redirect;
####################################
# NOTE for storefront we strictly don't allow access to admin urls
#################################
if ( $uri ~* "^/(smartedit|backoffice|hac|hmc|mcc|cmssmart*|maintenance|boschfoundationemployee|embeddedserver|groovynature|acceleratorservices|authorizationserver|permission*|previewwebservices|tomcatembeddedserver|.*cockpit)" ) {
return 403;
}
nginx.ingress.kubernetes.io/configuration-snippet: |
server_name_in_redirect on;
chunked_transfer_encoding off;
proxy_ignore_client_abort on;
gzip on;
gzip_vary on;
gzip_min_length 1;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/javascript application/json application/xml image/png image/svg+xml;
gzip_disable "MSIE [1-6]\.";
set $redirection_target "/";
#------project specific-----#
# TODO: Change to quality.aacentral.bosch.tech once migration is completed
set $best_http_host rb-q-aa-central.westeurope.cloudapp.azure.com;
# only if we did not redirect apply headers for caching
if ($uri ~* \.(js|css|gif|jpe?g|png|woff2)) {
# for older browsers
expires 5h;
add_header Cache-Control "private, max-age=1800, stale-while-revalidate";
}
spec:
tls:
- hosts:
- domain.com
secretName: waf
rules:
- host: domain.com
http:
paths:
- backend:
serviceName: svc
servicePort: 443
path: /
the Ingress works fine but the annotations
nginx.ingress.kubernetes.io/session-cookie-name: "INGRESSCOOKIE"
nginx.ingress.kubernetes.io/session-cookie-expires: "3600"
nginx.ingress.kubernetes.io/session-cookie-max-age: "3600"
no matter how I change the time-out value, it still the same with 300s
And I cannot found the session-affinity configuration in nginx.conf after deployed nginx-ingress-controller, and from Nginx official document also have no chapter describe how this annotation works.
Hope someone can provide any material to understand how it works and why the time-out doesn't work.
Thanks

Rewrite with nginx-ingress controller

I am running celery flower on port inside Kubernetes with nginx-ingress controller
I want to do a rewrite where requests to /flower/(.*) request goes to /$1 according to their documentation:
https://flower.readthedocs.io/en/latest/config.html?highlight=nginx#url-prefix
server {
listen 80;
server_name example.com;
location /flower/ {
rewrite ^/flower/(.*)$ /$1 break;
proxy_pass http://example.com:5555;
proxy_set_header Host $host;
}
}
I have come up with the following ingress.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-airflow-ingress
namespace: edna
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/use-regex: "true"
ingress.kubernetes.io/rewrite-target: /$2
# nginx.ingress.kubernetes.io/app-root: /flower
spec:
rules:
- host:
http:
paths:
- path: /flower(/|$)(.*)
backend:
serviceName: airflow-flower-service
servicePort: 5555
Inside POD running flower, I successfully get
curl localhost:5555/dashboard
However if get into the POD running Nginx controller, then it fails.
curl localhost/flower/dashboard
I get response by the flower:
<div class="span12">
<p>
Error, page not found
</p>
</div>
this is what I see inside nginx.conf in nginx-controller pod
server {
server_name _ ;
listen 80 default_server reuseport backlog=511 ;
listen 443 default_server reuseport backlog=511 ssl http2 ;
set $proxy_upstream_name "-";
ssl_certificate_by_lua_block {
certificate.call()
}
location ~* "^/flower(/|$)(.*)" {
set $namespace "edna";
set $ingress_name "backend-airflow-ingress";
set $service_name "";
set $service_port "";
set $location_path "/flower(/|${literal_dollar})(.*)";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = false,
ssl_redirect = true,
force_no_ssl_redirect = false,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
# be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
# will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
# other authentication method such as basic auth or external auth useless - all requests will be allowed.
#access_by_lua_block {
#}
header_filter_by_lua_block {
lua_ingress.header()
plugins.run()
}
body_filter_by_lua_block {
}
Ok figured this out
ingress.kubernetes.io/rewrite-target: /$2
should be in my case a different annotation
nginx.ingress.kubernetes.io/rewrite-target: /$2

Reverse proxy not routing to APIs using nginx and kubernetes

I am trying to get a reverse proxy working on kubernetes using nginx and a .net core API.
When I request http://localhost:9000/api/message I want something like the following to happen:
[Request] --> [nginx](localhost:9000) --> [.net API](internal port 9001)
but what appears to be happening is:
[Request] --> [nginx](localhost:9000)!
Fails because /usr/share/nginx/api/message is not found.
Obviously nginx is failing to route the request to the upstream servers. This works correctly when I run the same config under docker-compose but is failing here in kubernetes (local in docker)
I am using the following configmap for nginx:
error_log /dev/stdout info;
events {
worker_connections 2048;
}
http {
access_log /dev/stdout;
upstream web_tier {
server webapi:9001;
}
server {
listen 80;
access_log /dev/stdout;
location / {
proxy_pass http://web_tier;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
location /nginx_status {
stub_status on;
access_log off;
allow all;
}
}
}
The load-balancer yaml is:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: load-balancer
spec:
replicas: 1
template:
metadata:
labels:
app: load-balancer
spec:
containers:
- args:
- nginx
- -g
- daemon off;
env:
- name: NGINX_HOST
value: example.com
- name: NGINX_PORT
value: "80"
image: nginx:1.15.9
name: iac-load-balancer
ports:
- containerPort: 80
volumeMounts:
- mountPath: /var/lib/nginx
readOnly: true
name: vol-config
- mountPath: /tmp/share/nginx/html
readOnly: true
name: vol-html
volumes:
- name: vol-config
configMap:
name: load-balancer-configmap
items:
- key: nginx.conf
path: nginx.conf
- name: vol-html
configMap:
name: load-balancer-configmap
items:
- key: index.html
path: index.html
status: {}
---
apiVersion: v1
kind: Service
metadata:
name: load-balancer
spec:
type: LoadBalancer
ports:
- name: http
port: 9000
targetPort: 80
selector:
app: load-balancer
status:
loadBalancer: {}
Finally the error messages are:
2019/04/10 18:47:26 [error] 7#7: *1 open() "/usr/share/nginx/html/api/message" failed (2: No such file or directory), client: 192.168.65.3, server: localhost, request: "GET /api/message HTTP/1.1", host: "localhost:9000",
192.168.65.3 - - [10/Apr/2019:18:47:26 +0000] "GET /api/message HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36" "-",
It seems like nginx is either not reading the config correctly for some reason, or it is failing to communicating with the webapi servers and is defaulting back to trying to serve static local content (nothing in the log indicates a comms issue though).
Edit 1: I should have included that /nginx_status is also not routing correctly and fails with the same "/usr/share/nginx/html/nginx_status" not found error.
Here what i understood is you are requesting a Api, which is giving 404.
http://localhost:9000/api/message
I have solved this issue by creating backend service as nodeport, and i am trying to access the api from my Angular App.
Here is my configure.conf file which get replaced by original nginx configuration file
server {
listen 80;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
server {
listen 5555;
location / {
proxy_pass http://login:5555;
}
}
server {
listen 5554;
location / {
proxy_pass http://dashboard:5554;
}
}
here i have route my external traffic coming on port 5554/5555 to the service [selector-Name]
here login and dashboards are my services having Type as NodePort
Here is my Docker file
from nginx:1.11-alpine
copy configure.conf /etc/nginx/conf.d/default.conf
copy dockerpoc /usr/share/nginx/html
expose 80
expose 5555
expose 5554
cmd ["nginx","-g","daemon off;"]
Here i kept my frontend service's Type as LoadBalancer which will expose a public endpoint and,
I am calling my backend Api from frontend as :
http://loadbalancer-endpoint:5555/login
Hope this will help you.
Can you share how you created the configmap? Verify that the configmap has a data entry named nginx.conf. It might be related to the readOnly flag maybe you can also try to remove it or change the path to /etc/nginx/ as stated in docker image documentation.

NGINX Container Not Loading Static Files using Traefik / Kubernetes

I am running the Traefik Ingress Controller on Kubernetes (AKS). I've successfully deployed my Django application using the Traefik Ingress but it's not currently loading any static files (and therefore the styling isn't working).
Static files are served from a custom NGINX container with /static. So if my domain name is xyz.com, static is served from xyz.com/static.
apiVersion: v1
kind: Service
metadata:
name: nginxstaticservice
labels:
app: nginxstatic
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
selector:
app: nginxstatic
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: nginxstatic-ingress
annotations:
kubernetes.io/ingress.class: traefik
traefik.frontend.rule.type: PathPrefixStrip
# traefik.ingress.kubernetes.io/frontend-entry-points: http,https
spec:
rules:
- host: xyz.com
http:
paths:
- path: /static
backend:
serviceName: nginxstaticservice
servicePort: http
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginxstatic-deployment
labels:
app: nginxstatic
spec:
replicas: 1
selector:
matchLabels:
app: nginxstatic
template:
metadata:
labels:
app: nginxstatic
spec:
containers:
- name: nginxstatic
image: nginxstatic:latest
ports:
- containerPort: 80
imagePullSecrets:
This is the default.conf running on the NGINX container (this was previously working in a Website configuration.
server {
listen 80;
server_name _;
client_max_body_size 200M;
set $cache_uri $request_uri;
location = /favicon.ico { log_not_found off; access_log off; }
location = /robots.txt { log_not_found off; access_log off; }
ignore_invalid_headers on;
add_header Access-Control-Allow_Origin *;
location /static {
autoindex on;
alias /static;
}
location /media {
autoindex on;
alias /media;
}
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
}
Resolved in comments, PathPrefixStrip was used incorrectly which caused Nginx to see different paths than expected.

GKE gRPC Ingress Health Check with mTLS

I am trying to implement a gRPC service on GKE (v1.11.2-gke.18) with mutual TLS auth.
When not enforcing client auth, the HTTP2 health check that GKE automatically creates responds, and everything connects issue.
When I turn on mutual auth, the health check fails - presumably because it cannot complete a connection since it lacks a client certificate and key.
As always, documentation is light and conflicting. I require a solution that is fully programmatic (I.e. no console tweaking), but I have not been able to find a solution, other than manually changing the health check to TCP.
From what I can see
I am guessing that I will either need to:
implement a custom mTLS health check that will prevent GKE automatically creating a HTTP2 check
find an alternative way to do SSL termination at the container that doesn't use the service.alpha.kubernetes.io/app-protocols: '{"grpc":"HTTP2"}' proprietary annotation
find some way to provide the health check with the credentials it needs
alter my go implementation to somehow server a health check without requiring mTLS, while enforcing mTLS on all other endpoints
Or perhaps there is something else that I have not considered? The config below works perfectly for REST and gRPC with TLS but breaks with mTLS.
service.yaml
apiVersion: v1
kind: Service
metadata:
name: grpc-srv
labels:
type: grpc-srv
annotations:
service.alpha.kubernetes.io/app-protocols: '{"grpc":"HTTP2"}'
spec:
type: NodePort
ports:
- name: grpc
port: 9999
protocol: TCP
targetPort: 9999
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: myapp
ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: io-ingress
annotations:
kubernetes.io/ingress.global-static-ip-name: "grpc-ingress"
kubernetes.io/ingress.allow-http: "true"
spec:
tls:
- secretName: io-grpc
- secretName: io-api
rules:
- host: grpc.xxx.com
http:
paths:
- path: /*
backend:
serviceName: grpc-srv
servicePort: 9999
- host: rest.xxx.com
http:
paths:
- path: /*
backend:
serviceName: grpc-srv
servicePort: 8080
It seems that there is currently no way to achieve this using the GKE L7 ingress. But I have been successful deploying an NGINX Ingress Controller. Google have a not bad tutorial on how to deploy one here.
This installs a L4 TCP load balancer with no health checks on the services, leaving NGINX to handle the L7 termination and routing. This gives you a lot more flexibility, but the devil is in the detail, and the detail isn't easy to come by. Most of what I found was learned trawling through github issues.
What I have managed to achieve is for NGINX to handle the TLS termination, and still pass through the certificate to the back end, so you can handle things such as user auth via the CN, or check the certificate serial against a CRL.
Below is my ingress file. The annotations are the minimum required to achieve mTLS authentication, and still have access to the certificate in the back end.
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: grpc-ingress
namespace: master
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/auth-tls-verify-client: "on"
nginx.ingress.kubernetes.io/auth-tls-secret: "master/auth-tls-chain"
nginx.ingress.kubernetes.io/auth-tls-verify-depth: "2"
nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true"
nginx.ingress.kubernetes.io/backend-protocol: "GRPCS"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/grpc-backend: "true"
spec:
tls:
- hosts:
- grpc.example.com
secretName: auth-tls-chain
rules:
- host: grpc.example.com
http:
paths:
- path: /grpc.AwesomeService
backend:
serviceName: awesome-srv
servicePort: 9999
- path: /grpc.FantasticService
backend:
serviceName: fantastic-srv
servicePort: 9999
A few things to note:
The auth-ls-chain secret contains 3 files. ca.crt is the certificate chain and should include any intermediate certificates. tls.crt contains your server certificate and tls.key contains your private key.
If this secret lies in a namespace that is different to the NGINX ingress, then you should give the full path in the annotation.
My verify-depth is 2, but that is because I am using intermediate certificates. If you are using self signed, then you will only need a depth of 1.
backend-protocol: "GRPCS" is required to prevent NGINX terminating the TLS. If you want to have NGINX terminate the TLS and run your services without encryption, use GRPC as the protocol.
grpc-backend: "true" is required to let NGINX know to use HTTP2 for the backend requests.
You can list multiple paths and direct to multiple services. Unlike with the GKE ingress, these paths should not have a forward slash or asterisk suffix.
The best part is that if you have multiple namespaces, or if you are running a REST service as well (E.g. gRPC Gateway), NGINX will reuse the same load balancer. This provides some savings over the GKE ingress, that would use a separate LB for each ingress.
The above is from the master namespace and below is a REST ingress from the staging namespace.
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
namespace: staging
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
tls:
- hosts:
- api-stage.example.com
secretName: letsencrypt-staging
rules:
- host: api-stage.example.com
http:
paths:
- path: /awesome
backend:
serviceName: awesom-srv
servicePort: 8080
- path: /fantastic
backend:
serviceName: fantastic-srv
servicePort: 8080
For HTTP, I am using LetsEncrypt, but there's plenty of information available on how to set that up.
If you exec into the ingress-nginx pod, you will be able to see how NGINX has been configured:
...
server {
server_name grpc.example.com ;
listen 80;
set $proxy_upstream_name "-";
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
listen 442 proxy_protocol ssl http2;
# PEM sha: 142600b0866df5ed9b8a363294b5fd2490c8619d
ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_by_lua_block {
certificate.call()
}
# PEM sha: 142600b0866df5ed9b8a363294b5fd2490c8619d
ssl_client_certificate /etc/ingress-controller/ssl/master-auth-tls-chain.pem;
ssl_verify_client on;
ssl_verify_depth 2;
error_page 495 496 = https://help.example.com/auth;
location /grpc.AwesomeService {
set $namespace "master";
set $ingress_name "grpc-ingress";
set $service_name "awesome-srv";
set $service_port "9999";
set $location_path "/grpc.AwesomeServices";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = true,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
header_filter_by_lua_block {
plugins.run()
}
body_filter_by_lua_block {
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
if ($scheme = https) {
more_set_headers "Strict-Transport-Security: max-age=15724800; includeSubDomains";
}
port_in_redirect off;
set $proxy_upstream_name "master-analytics-srv-9999";
set $proxy_host $proxy_upstream_name;
client_max_body_size 1m;
grpc_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
grpc_set_header ssl-client-cert $ssl_client_escaped_cert;
grpc_set_header ssl-client-verify $ssl_client_verify;
grpc_set_header ssl-client-subject-dn $ssl_client_s_dn;
grpc_set_header ssl-client-issuer-dn $ssl_client_i_dn;
# Allow websocket connections
grpc_set_header Upgrade $http_upgrade;
grpc_set_header Connection $connection_upgrade;
grpc_set_header X-Request-ID $req_id;
grpc_set_header X-Real-IP $the_real_ip;
grpc_set_header X-Forwarded-For $the_real_ip;
grpc_set_header X-Forwarded-Host $best_http_host;
grpc_set_header X-Forwarded-Port $pass_port;
grpc_set_header X-Forwarded-Proto $pass_access_scheme;
grpc_set_header X-Original-URI $request_uri;
grpc_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
grpc_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
grpc_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 4k;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_tries 3;
grpc_pass grpcs://upstream_balancer;
proxy_redirect off;
}
location /grpc.FantasticService {
set $namespace "master";
set $ingress_name "grpc-ingress";
set $service_name "fantastic-srv";
set $service_port "9999";
set $location_path "/grpc.FantasticService";
...
This is just an extract of the generated nginx.conf. But you should be able to see how a single configuration could handle multiple services across multiple namespaces.
The last piece is a go snippet of how we get hold of the certificate via the context. As you can see from the config above, NGINX adds the authenticated cert and other details into the gRPC metadata.
meta, ok := metadata.FromIncomingContext(*ctx)
if !ok {
return status.Error(codes.Unauthenticated, "missing metadata")
}
// Check if SSL has been handled upstream
if len(meta.Get("ssl-client-verify")) == 1 && meta.Get("ssl-client-verify")[0] == "SUCCESS" {
if len(meta.Get("ssl-client-cert")) > 0 {
certPEM, err := url.QueryUnescape(meta.Get("ssl-client-cert")[0])
if err != nil {
return status.Errorf(codes.Unauthenticated, "bad or corrupt certificate")
}
block, _ := pem.Decode([]byte(certPEM))
if block == nil {
return status.Error(codes.Unauthenticated, "failed to parse certificate PEM")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return status.Error(codes.Unauthenticated, "failed to parse certificate PEM")
}
return authUserFromCertificate(ctx, cert)
}
}
// if fallen through, then try to authenticate via the peer object for gRPCS,
// or via a JWT in the metadata for gRPC Gateway.
HTTP/2 and gRPC support on GKE is not available yet. Please see limitation. There is already a feature request in the works in order to address the issue.

Resources