Can anybody provide me a complete example about how running insecure (without TLS) ingress controller and resource with nginx to have remote access to services running inside kubernetes cluster ? i did not find something useful.
PS: my kubernetes cluster is running on bare metal, not on a cloud provider.
the next may be useful information about what i did:
$kubectl get svc
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
attachmentservice 10.254.111.232 <none> 80/TCP 3d
financeservice 10.254.38.228 <none> 80/TCP 3d
gatewayservice 10.254.38.182 nodes 80/TCP 3d
hrservice 10.254.61.196 <none> 80/TCP 3d
kubernetes 10.254.0.1 <none> 443/TCP 31d
messageservice 10.254.149.125 <none> 80/TCP 3d
redis-service 10.254.201.241 <none> 6379/TCP 15d
settingservice 10.254.157.155 <none> 80/TCP 3d
trainingservice 10.254.166.92 <none> 80/TCP 3d
nginx-ingress-rc.yml
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-ingress-rc
labels:
app: nginx-ingress
spec:
replicas: 1
selector:
app: nginx-ingress
template:
metadata:
labels:
app: nginx-ingress
spec:
containers:
- image: nginxdemos/nginx-ingress:0.6.0
imagePullPolicy: Always
name: nginx-ingress
ports:
- containerPort: 80
hostPort: 80
services-ingress.yml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: services-ingress
spec:
rules:
- host: ctc-cicd2
http:
paths:
- path: /gateway
backend:
serviceName: gatewayservice
servicePort: 80
- path: /training
backend:
serviceName: trainingservice
servicePort: 80
- path: /attachment
backend:
serviceName: attachmentservice
servicePort: 80
- path: /hr
backend:
serviceName: hrservice
servicePort: 80
- path: /message
backend:
serviceName: messageservice
servicePort: 80
- path: /settings
backend:
serviceName: settingservice
servicePort: 80
- path: /finance
backend:
serviceName: financeservice
servicePort: 80
nginx.conf new content
upstream default-services-ingress-ctc-cicd2-trainingservice {
server 12.16.64.5:8190;
server 12.16.65.6:8190;
}
upstream default-services-ingress-ctc-cicd2-attachmentservice {
server 12.16.64.2:8095;
}
upstream default-services-ingress-ctc-cicd2-hrservice {
server 12.16.64.7:8077;
}
upstream default-services-ingress-ctc-cicd2-messageservice {
server 12.16.64.9:8065;
}
upstream default-services-ingress-ctc-cicd2-settingservice {
server 12.16.64.10:8098;
server 12.16.65.4:8098;
}
upstream default-services-ingress-ctc-cicd2-financeservice {
server 12.16.64.4:8092;
}
upstream default-services-ingress-ctc-cicd2-gatewayservice {
server 12.16.64.6:8090;
server 12.16.65.7:8090;
}`
server {
listen 80;
server_name ctc-cicd2;
location /gateway {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-gatewayservice;
}
location /training {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-trainingservice;
}
location /attachment {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-attachmentservice;
}
location /hr {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-hrservice;
}
location /message {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-messageservice;
}
location /settings {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-settingservice;
}
location /finance {
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 60s;
client_max_body_size 1m;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_pass http://default-services-ingress-ctc-cicd2-financeservice;
}
}
According to the Kubernetes ingress documentation, Ingress is a collection of rules that allow inbound connections to reach the cluster services. This, of course requires that you have an ingress controller deployed in your cluster. While there are many many ways you can implement an ingress controller, a simple one that will help you understand the concept can be found here. This one is written in golang and basically listens to the kubeapi for new ingress resources. When it gets a new incoming ingress resource, it will recreate a new nginx conf based off that config and reload the nginx container that makes up your ingress controller:
const (
nginxConf = `
events {
worker_connections 1024;
}
http {
# http://nginx.org/en/docs/http/ngx_http_core_module.html
types_hash_max_size 2048;
server_names_hash_max_size 512;
server_names_hash_bucket_size 64;
{{range $ing := .Items}}
{{range $rule := $ing.Spec.Rules}}
server {
listen 80;
server_name {{$rule.Host}};
{{ range $path := $rule.HTTP.Paths }}
location {{$path.Path}} {
proxy_set_header Host $host;
proxy_pass http://{{$path.Backend.ServiceName}}.{{$ing.Namespace}}.svc.cluster.local:{{$path.Backend.ServicePort}};
}{{end}}
}{{end}}{{end}}
}`
)
What this allows for is one single entry point into your cluster that proxy traffic to all of the services inside of your Kubernetes cluster.
Say you have a service named foo inside the namespace bar. Kube-DNS allows us to reach that service from inside a kubernetes cluster form the DNS address foo.bar.svc.cluster.local. This is basically what Ingress does for us. We specify a path in which we want to use to reach the service and then the ingress controller proxies that path to the service foo in your cluster.
Related
I have a script that deploys nginx configuration to some reverse proxies. My nginx configuration looks like this
server {
listen 443 ssl;
server_name SOME_NAME;
ssl_certificate MYCERT.crt;
ssl_certificate_key MYKEY.key;
location /health-check {
add_header Content-Type text/plain;
return 200 'healthy\n';
}
location /prod/ {
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_pass https://${PROD_URL}/;
proxy_read_timeout 90;
}
location /integration/ {
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_pass https://${INTEGRATION_URL}/;
proxy_read_timeout 90;
}
location /dev/ {
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_pass https://${DEV_URL}/;
proxy_read_timeout 90;
}
}
My problem is that my requests are passed as follows:
10.211.194.44 - - [08/Mar/2022:11:08:42 +0000] "GET /devSometihing/test HTTP/1.1" 404 169 "-" "curl/7.76.1" "10.211.194.71" "-"
what I'd like is anything to goes to the /dev location being proxypassed as /whateverIsPassedHere instead as /devwhateverIsPassedHere
How can I manipulate the incoming request to only pass to the backend servers whatevers is passed after /dev/ or /prod/ or /integration ???
I have two node js apps that I want to put behind nginx.
I access application 1 which has "/" as a base and I access its pages which are on /be/ but when I try to go to application 2 automatically I am referred to "/".
summary
App1:
based:"/"
url: localhost:3003
application pages can be found in /be/
App2:
base:"/be/"
url: localhost:3000/be/login
the application pages can also be found on /be/
here is my nginx config
server {
listen 80;
server_name 192.168.1.64;
access_log /var/log/nginx/portalerr.logs;
error_log /var/log/nginx/portalaccess.logs;
location / {
proxy_pass http://192.168.1.64:3003/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
}
location /login/ {
proxy_pass http://192.168.1.64:3000/be/login;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
rewrite /be/login(.*) /$1 break;
proxy_cache_bypass $http_upgrade;
}
}
I have a VPS has running some services and Nginx reversing proxy between that's
I want to running Poste.io on this server but got bad gateway
Nginx config file like that:
server {
listen 443 ssl;
server_name mail.domain.com;
proxy_buffering off;
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
proxy_set_header Host $host;
proxy_set_header Connection "upgrade";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location / {
proxy_pass https://127.0.0.1:1443/;
# proxy_redirect default;
}
ssl_certificate /etc/letsencrypt/live/mail.domain.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/mail.domain.com/privkey.pem; # managed by Certbot
}
docker compose like that:
version: "3"
services:
poste.io:
image: analogic/poste.io:latest
hostname: mail.domain.com
environment:
- VIRTUAL_HOST=mail.domain.com
- HTTPS=OFF
ports:
- "1080:80" # HTTP
- "1443:443" # HTTPS
replace proxy pass with proxy_pass http://127.0.0.1:1080/;
I want to print the request_body and response_body from NGINX.
I try to implement a few of the solution that I have learned from here
But it was not working in my case. Is there some additional changes that I need to configure in my NGINX.conf file.
Here is my conf file.
worker_processes 4;
events { worker_connections 1024; }
http {
sendfile on;
upstream consumer-portal {
server xx.xx.xx.xx:9006;
}
upstream download-zip-service {
server xx.xx.xx.xx:9012;
}
server {
listen 8765;
location / {
proxy_pass http://download-zip-service/;
proxy_redirect off;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Host $server_name;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
#socket timeout setting added
fastcgi_read_timeout 7200s;
send_timeout 7200s;
proxy_connect_timeout 7200s;
proxy_send_timeout 7200s;
proxy_read_timeout 7200s;
#new property added
proxy_request_buffering off;
proxy_buffering off;
}
location /consumer-portal/ {
proxy_pass http://consumer-portal/;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
}
Below is the docker-compose.yml
version: '3'
services:
nginx:
restart: always
build: ../../conf/sandbox/
volumes:
- ./mysite.template:/etc/nginx/conf.d/mysite.template
ports:
- "8765:8765"
networks:
- cloud
networks:
cloud:
driver: bridge
Please let me know what changes I need to configure.
Thanks In Advance.
http {
server {
listen 80;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Real-Port $server_port;
proxy_set_header X-Real-Scheme $scheme;
}
}
I have an NodeJS/Express app listening on port 3000. How do I redirect requests to <ip-address>:3000 to <ip-address>:80?
The above configuration did not work. (shows 404 Not Found
nginx/1.10.3 (Ubuntu) when I go to port 80.)