Terraform not starting the nginx server on AWS - nginx

I am having the below terraform file where I am starting a Nginx server. Terraform is installing Nginx but the Nginx server did not start. I have to manually login to the ec2-machine and start the service. Where am i going wrong?
terraform {
required_version = ">=0.12, <0.13"
}
provider "aws" {
region = "ap-south-1"
}
resource "aws_instance" "ec2-instance" {
ami = "ami-04b2519c83e2a7ea5"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.web_security.id]
key_name = "kops-keypair"
user_data = <<-EOF
#!/bin/bash
sudo yum update -y
sudo yum install nginx -y
sudo service start nginx
EOF
tags = {
Name = "nginx-instance",
created-date = "22-04-2020"
}
}
resource "aws_security_group" "web_security" {
name = "web-security"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

My bad. The command should be sudo service nginx start

Related

Dovecot IMAPS . Port 993 , telnet NO IMAP handshake message but telnet port 143 does

I am trying to setup a postfix/dovecot on ubuntu 20.x
After DOVECOT configuration I can connect(thru telnet, from my iPhone using gmail client) to port 143 and get IMAP handshake message, login etc.
BUT I get authentication failed error when connecting to port 993 (from iPhone)
then
I tried telnet 'mymailserver' 993 and get no IMAP handshake message..
Moreover, when i do openssl s_client -connect 'mailserver' 993 , I get the handshake message as expected..
Can someone help ?
Here is my dovecot -n
root#mail:/usr/share/dovecot/protocols.d# dovecot -n
# 2.3.7.2 (3c910f64b): /etc/dovecot/dovecot.conf
# Pigeonhole version 0.5.7.2 ()
# OS: Linux 5.4.0-131-generic x86_64 Ubuntu 20.04.5 LTS
# Hostname: ****************
auth_mechanisms = plain login
auth_username_format = %n
mail_location = mbox:~/mail:INBOX=/var/mail/%u
mail_privileged_group = mail
namespace inbox {
inbox = yes
location =
mailbox Drafts {
special_use = \Drafts
}
mailbox Junk {
special_use = \Junk
}
mailbox Sent {
special_use = \Sent
}
mailbox "Sent Messages" {
special_use = \Sent
}
mailbox Trash {
special_use = \Trash
}
prefix =
}
passdb {
driver = pam
}
protocols = " imap lmtp pop3"
service auth {
unix_listener /var/spool/postfix/private/auth {
group = postfix
mode = 0660
user = postfix
}
}
service imap-login {
inet_listener imap {
port = 143
}
inet_listener imaps {
port = 993
ssl = yes
}
}
service pop3-login {
inet_listener pop3 {
port = 110
}
inet_listener pop3s {
port = 995
ssl = yes
}
}
service submission-login {
inet_listener submission {
port = 587
}
}
ssl = required
ssl_cert = </etc/..../fullchain.pem
ssl_client_ca_dir = /etc/ssl/certs
ssl_dh = # hidden, use -P to show it
ssl_key = # hidden, use -P to show it
userdb {
driver = passwd
}

Kubernetes Nginx ingress - failed to ensure load balancer: could not find any suitable subnets for creating the ELB

I would like to deploy a minimal k8s cluster on AWS with Terraform and install a Nginx Ingress Controller with Helm.
The terraform code:
provider "aws" {
region = "us-east-1"
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
variable "cluster_name" {
default = "my-cluster"
}
variable "instance_type" {
default = "t2.large"
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.11"
}
data "aws_availability_zones" "available" {
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.0.0"
name = "k8s-${var.cluster_name}-vpc"
cidr = "172.16.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "12.2.0"
cluster_name = "eks-${var.cluster_name}"
cluster_version = "1.18"
subnets = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
worker_groups = [
{
name = "worker-group-1"
instance_type = "t3.small"
additional_userdata = "echo foo bar"
asg_desired_capacity = 2
},
{
name = "worker-group-2"
instance_type = "t3.small"
additional_userdata = "echo foo bar"
asg_desired_capacity = 1
},
]
write_kubeconfig = true
config_output_path = "./"
workers_additional_policies = [aws_iam_policy.worker_policy.arn]
}
resource "aws_iam_policy" "worker_policy" {
name = "worker-policy-${var.cluster_name}"
description = "Worker policy for the ALB Ingress"
policy = file("iam-policy.json")
}
The installation performs correctly:
helm install my-release nginx-stable/nginx-ingress
NAME: my-release
LAST DEPLOYED: Sat Jun 26 22:17:28 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
The NGINX Ingress Controller has been installed.
The kubectl describe service my-release-nginx-ingress returns:
Error syncing load balancer: failed to ensure load balancer: could not find any suitable subnets for creating the ELB
The VPC is created and the public subnet seems to be correctly tagged, what is lacking to make the Ingress aware of the public subnet ?
In the eks modules you are prefixing the cluster name with eks-:
cluster_name = "eks-${var.cluster_name}"
However you do not use the prefix in your subnet tags:
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
Drop the prefix from cluster_name and add it to the cluster name variable (assuming you want the prefix at all). Alternatively, you could add the prefix to your tags to fix the issue, but that approach makes it easier to introduce inconsistencies.

Nginx ingress rewrite target breaks link to .NET Core API

I've got a setup with multiple services and are run via an Ingress / Terraform / Kubernetes setup. Currently, all my Vue.JS applications are served via NGINX however when I added the following line "nginx.ingress.kubernetes.io/rewrite-target": "/$1" to make sure my other routes work such as /frontend and /backend it broke my API and I can no longer access it via /api/
Terraform setup:
resource "kubernetes_ingress" "ingress" {
metadata {
name = "ingress"
namespace = var.namespace_name
annotations = {
"nginx.ingress.kubernetes.io/force-ssl-redirect" = true
"nginx.ingress.kubernetes.io/from-to-www-redirect" = true
"nginx.ingress.kubernetes.io/ssl-redirect": true
"nginx.ingress.kubernetes.io/add-base-url": false
"nginx.ingress.kubernetes.io/rewrite-target": "/$1"
"kubernetes.io/ingress.class": "nginx"
"ncp/use-regex": true
}
}
spec {
tls {
hosts = [var.domain_name, "*.${var.domain_name}"]
secret_name = "tls-secret"
}
rule {
host = var.domain_name
http {
path {
path = "/(.*)"
backend {
service_name = "frontend"
service_port = 80
}
}
path {
path = "/api(.*)"
backend {
service_name = "api"
service_port = 80
}
}
path {
path = "/backend(.*)"
backend {
service_name = "backend"
service_port = 80
}
}
path {
path = "/payment(.*)"
backend {
service_name = "payment"
service_port = 80
}
}
}
}
}
wait_for_load_balancer = true
}
My Startup.cs config
public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
// Enable middleware to serve generated Swagger as a JSON endpoint.
app.UseSwagger();
// Enable middleware to serve swagger-ui (HTML, JS, CSS, etc.),
// specifying the Swagger JSON endpoint.
app.UseSwaggerUI(c =>
{
c.SwaggerEndpoint("/swagger/v1/swagger.json", "OLC API V1");
});
app.UseCors(AllowSpecificOrigins);
app.UseRouting();
app.UseEndpoints(endpoints =>
{
endpoints.MapControllers();
});
}
The terraform setup seems to work for all my vue.js containers. Only problem is that API is no longer accessible. Not even via /api/api such as in this question
I've tried to rewrite only specific routes and change the app so that'd serve on a different route but I still get a 404 error. Even when creating a controller that responds to the / route. When removing the rewrite target line the API does work. But the Vue.JS containers do not
It turns out that, this is simply a regex issue and me not setting paths correctly. I changed the following.
path {
path = "/api(.*)"
backend {
service_name = "api"
service_port = 80
}
}
Into
path {
path = "/(api.*)"
backend {
service_name = "olc-api"
service_port = 80
}
}
With this it matches the /api to my .NET core app, instead of it trying to find a URL within the vue.js container(s)

How to create http and https server in nodejs?

This is the code I used to connect http server.
var app = require('http').createServer(require('express')),
io = require('socket.io').listen(app),
util = require('util'),
connectionsArray = [], // maintain active connected client details
connectionStatistics = {'summary': {'instance_count': 0, 'user_count': 0, 'customer_count': 0}, 'customers': {}}, // for debugging purpose
server_port = 3000, // port on which nodejs engine to run
POLLING_INTERVAL = 10 * 1000, // 10 sec
pollingTimer = [], // timeouts for connected sockets
fs = require('fs'), // lib for file related operations
log_file = {
'error': fs.createWriteStream(__dirname + '/debug.log', {flags: 'a'}), // file to log error messages
'info': fs.createWriteStream(__dirname + '/info.log', {flags: 'a'}) // file to log info messages
};
var server = app.listen(server_port, function () {
var host = server.address().address;
var port = server.address().port;
console.log('Please use your browser to navigate to http://%s:%s', host, port);
});
I want to include https connection in the above code.
I tried to connect https using SSLCertificateFile and SSLCertificateKeyFile.
But it didn't work for me.
install https module ( yarn add https / npm i https )
change options (ssl file path) as below :
const options = {
key: fs.readFileSync('./ssl/private.key'),
cert: fs.readFileSync('./ssl/certificate.crt'),
ca:fs.readFileSync('./ssl/ca_bundle.crt')
}
https.createServer(options, app).listen(port);
Try this snippet using express and https module instead of http
let fs = require('fs');
let https = require('https');
let express = require('express');
let app = express();
let options = {
key: fs.readFileSync('./file.pem'),
cert: fs.readFileSync('./file.crt')
};
let serverPort = 3000;
let server = https.createServer(options, app);
let io = require('socket.io')(server);
io.on('connection', function(socket) {
console.log('new connection');
});
server.listen(serverPort, function() {
console.log('server up and running at %s port', serverPort);
});

Nomad : Health Check

need help I have this
service {
name = "nginx"
tags = [ "nginx", "web", "urlprefix-/nginx" ]
port = "http"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
how can i add a health for a specific URI if it returns a 200 response
like localhost:8080/test/index.html
It is as simple as adding another check configured to use an http check like so where /health is served by your nginx on its published port:
service {
name = "nginx"
tags = [ "nginx", "web", "urlprefix-/nginx" ]
port = "http"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "2s"
}
}
You can see a full example here:
https://www.nomadproject.io/docs/job-specification/index.html

Resources