I have created the resources with Terraform 0.12.6. However, without making any changes to the code and doing "terraform apply" the process wants to destroy the existing EC2 and rebuild them. I would like to know why it is doing so and what is incorrect below.
resource "aws_instance" "web_ui" {
count = 2
ami = data.aws_ami.ami.id
instance_type = var.type_m5lg
associate_public_ip_address = false
key_name = var.key_name
security_groups = [var.vpc_security_group_ids, var.sg_devops, var.sg_common]
subnet_id = (data.aws_subnet.subnetid)[count.index].id
root_block_device {
delete_on_termination = true
}
ebs_block_device {
device_name = "/dev/sdb"
volume_size = "200"
volume_type = "gp2"
delete_on_termination = true
}
tags = "${merge(
local.common_tags,
map(
"Name", "${var.name}-${var.prog}-${var.env}${count.index + 1}-${var.ec2_name_web}-use1.xyz.com"
)
)}"
}
Changing the
security_groups = [var.vpc_security_group_ids, var.sg_devops, var.sg_common]
TO
vpc_security_group_ids = [var.vpc_security_group_ids, var.sg_devops, var.sg_common]
fixed the issue.
Based on the comment from #stack72 hashicorp/terraform#7853
Related
Sorry I am a beginner at terraform and found some useful modules.
I need to make a single aurora instance cluster for non-production and I need to shutdown after business hours.
how can I create an aurora cluster and schedule to run during business hours???
I can't get the scheduler to connect to the RDS cluster
provider "aws" {
region = local.region
}
locals {
name = "example-aurora"
region = "us-east-1"
tags = {
Owner = "user"
Environment = "dev"
}
}
################################################################################
# Supporting Resources
################################################################################
resource "random_password" "master" {
length = 10
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = "aurora_vpc"
cidr = "10.99.0.0/18"
enable_dns_support = true
enable_dns_hostnames = true
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
public_subnets = ["10.99.0.0/24", "10.99.1.0/24", "10.99.2.0/24"]
private_subnets = ["10.99.3.0/24", "10.99.4.0/24", "10.99.5.0/24"]
database_subnets = ["10.99.7.0/24", "10.99.8.0/24", "10.99.9.0/24"]
tags = local.tags
}
resource "aws_db_parameter_group" "muffy-pg" {
family = "postgres13"
name = "peter-rds-param-group"
parameter {
apply_method = "immediate"
name = "autovacuum_naptime"
value = "30"
}
parameter {
apply_method = "pending-reboot"
name = "autovacuum_max_workers"
value = "15"
}
}
resource "aws_docdb_cluster_parameter_group" "muffy-cluster-pg" {
name = "peter-rds-param-group"
family = "postgres13"
}
module "cluster" {
source = "terraform-aws-modules/rds-aurora/aws"
name = "test-aurora-db-postgres96"
engine = "aurora-postgresql"
engine_version = "13.7"
instance_class = "db.t3.small"
instances = {
one = {}
two = {}
}
vpc_id = module.vpc.vpc_id
subnets = [module.vpc.database_subnets[0], module.vpc.database_subnets[1], module.vpc.database_subnets[2]]
# allowed_security_groups = ["sg-12345678"]
allowed_cidr_blocks = ["10.99.0.0/18"]
storage_encrypted = true
apply_immediately = true
monitoring_interval = 10
db_parameter_group_name = aws_db_parameter_group.muffy-pg.name
db_cluster_parameter_group_name = aws_docdb_cluster_parameter_group.muffy-cluster-pg.name
enabled_cloudwatch_logs_exports = ["postgresql"]
tags = {
Environment = "dev"
Terraform = "true"
}
}
variable "environment" {
default = "dev"
}
module "rds_schedule" {
depends_on = [module.cluster]
source = "github.com/barryw/terraform-aws-rds-scheduler"
# version = "~> 2.0.0"
/* Don't stop RDS in production! */
skip_execution = var.environment == "prod"
identifier = "peter-scheduler"
/* Start the RDS cluster at 6:50am EDT Monday - Friday */
up_schedule = "cron(50 10 ? * MON-FRI *)"
/* Stop the RDS cluster at 9pm EDT every night */
down_schedule = "cron(0 1 * * ? *)"
rds_identifier = module.cluster.identifier
is_cluster = true
}
I guess the issue is with rds_identifier value used under module "rds_schedule"
rds_identifier = module.cluster.identifier
It should be,
rds_identifier = module.cluster.cluster_id
The source module for the aurora cluster used here "terraform-aws-modules/rds-aurora/aws" outputs the cluster identifier as cluster_id not cluster_identifier
Github Reference:
https://registry.terraform.io/modules/terraform-aws-modules/rds-aurora/aws/latest#outputs
I'm trying to get tf 0.12.x new dynamic feature to work with a nested map, config is below.
As you can see below (simplified for this) I'm defining all the variables and adding variable required_resource_access which contains a map.
I was hoping to use new dynamic feature to create read this map in a nested dyanmic block.
variable prefix {
description = "Prefix to applied to all top level resources"
default = "abx"
}
variable suffix {
description = "Suffix to applied to all valid top level resources, usually this is 2 letter region code such as we (westeurope), ne (northeurope)."
default = "we"
}
variable env {
description = "3 letter environment code appied to all top level resources"
default = "dev"
}
variable location {
description = "Where to create all resources in Azure"
default = "westeurope"
}
variable available_to_other_tenants {
default = false
}
variable oauth2_allow_implicit_flow {
default = true
}
variable public_client {
default = false
}
# other option is native
variable application_type {
default = "webapp/api"
}
variable required_resource_access {
type = list(object({
resource_app_id = string
resource_access = object({
id = string
type = string
})
}))
default = [{
resource_app_id = "00000003-0000-0000-c000-000000000000"
resource_access = {
id = "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
type = "Role"
}
}]
}
variable reply_urls {
default = []
}
variable group_membership_claims {
default = "All"
}
resource "azuread_application" "bootstrap" {
name = "${var.prefix}-${var.env}-spn"
homepage = "http://${var.prefix}-${var.env}-spn"
identifier_uris = ["http://${var.prefix}-${var.env}-spn"]
reply_urls = var.reply_urls
available_to_other_tenants = var.available_to_other_tenants
oauth2_allow_implicit_flow = var.oauth2_allow_implicit_flow
type = var.application_type
group_membership_claims = var.group_membership_claims
dynamic "required_resource_access" {
for_each = var.required_resource_access
content {
resource_app_id = required_resource_access.value["resource_app_id"]
dynamic "resource_access" {
for_each = required_resource_access.value["resource_access"]
content {
id = resource_access.value["id"]
type = resource_access.value["type"]
}
}
}
}
}
But for reasons beyond my knowledge it keeps giving me this error (notice it's priting it twice as well), I've tried a few other options but this is the closest I managed to get where it would at least give me a meaningful error.
------------------------------------------------------------------------
Error: Invalid index
on pe_kubernetes.tf line 24, in resource "azuread_application" "bootstrap":
24: id = resource_access.value["id"]
|----------------
| resource_access.value is "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
This value does not have any indices.
Error: Invalid index
on pe_kubernetes.tf line 24, in resource "azuread_application" "bootstrap":
24: id = resource_access.value["id"]
|----------------
| resource_access.value is "Role"
This value does not have any indices.
Error: Invalid index
on pe_kubernetes.tf line 25, in resource "azuread_application" "bootstrap":
25: type = resource_access.value["type"]
|----------------
| resource_access.value is "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
This value does not have any indices.
Error: Invalid index
on pe_kubernetes.tf line 25, in resource "azuread_application" "bootstrap":
25: type = resource_access.value["type"]
|----------------
| resource_access.value is "Role"
This value does not have any indices.
Spent the best part of 2 days on this with no luck so any help or pointers would be much appreciated!
I had some time to test my comment...
If I change the resource_access to a list it works.
See code below:
variable required_resource_access {
type = list(object({
resource_app_id = string
resource_access = list(object({
id = string
type = string
}))
}))
default = [{
resource_app_id = "00000003-0000-0000-c000-000000000000"
resource_access = [{
id = "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
type = "Role"
}]
}]
}
resource "azuread_application" "bootstrap" {
name = "test"
type = "webapp/api"
group_membership_claims = "All"
dynamic "required_resource_access" {
for_each = var.required_resource_access
content {
resource_app_id = required_resource_access.value["resource_app_id"]
dynamic "resource_access" {
for_each = required_resource_access.value["resource_access"]
content {
id = resource_access.value["id"]
type = resource_access.value["type"]
}
}
}
}
}
And the plan shows:
Terraform will perform the following actions:
# azuread_application.bootstrap will be created
+ resource "azuread_application" "bootstrap" {
+ application_id = (known after apply)
+ available_to_other_tenants = false
+ group_membership_claims = "All"
+ homepage = (known after apply)
+ id = (known after apply)
+ identifier_uris = (known after apply)
+ name = "test"
+ oauth2_allow_implicit_flow = true
+ object_id = (known after apply)
+ owners = (known after apply)
+ public_client = (known after apply)
+ reply_urls = (known after apply)
+ type = "webapp/api"
+ oauth2_permissions {
+ admin_consent_description = (known after apply)
...
}
+ required_resource_access {
+ resource_app_id = "00000003-0000-0000-c000-000000000000"
+ resource_access {
+ id = "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
+ type = "Role"
}
}
}
Plan: 1 to add, 0 to change, 0 to destroy.
I removed a lot of your variables an some of the optional Arguments for azuread_application to keep the code as small as possible, but the same principle applies to your code, use lists on for_each or it will loop on the object properties.
I'm trying to import a couple of dynamodb tables to terraform. I'm stuck on how to dynamically handle global secondary indexes between environments.
I have a module and two state files for each environment.
How can i dynamically enter these variables using count , that change between environments,
For example in the below example there are 4 indexes but for a particular index in prod account the read capacity and write capacity changes, whereas all other variables remain constant.
ie last-index has different read and write capacity values for both prod and nonprod
How can it be implemented in terraform?
Module:
locals {
name = ["xxx-index","xxx-index","xxx-index","xxx-index","last-index"]
write_capacity = [ 5,5,5,5,5]
read_capacity = [ 5,5,5,5,5]
range_key = ["xxx","xxx","xxx","xxx","xxx"]
}
global_secondary_index {
count = "${length(local.name)}"
name = "${element(local.name, count.index)}"
write_capacity = "${element(local.write_capacity, count.index)"
read_capacity = "${element(local.read_capacity, count.index)"
hash_key = "userId"
range_key = "${element(local.range_key,count.index)}"
projection_type = "ALL"
}
Terraform -version Terraform v0.11.13
+ provider.aws v2.25.0
There is no reasonable answer to this question for Terraform 0.11. It lacks the primitives required to describe the transform you are looking for, and it doesn't support dynamically generating nested blocks.
The closest supported thing in Terraform 0.11 would be to fix the number of indices as constant but still vary the individual parts, like this:
resource "aws_dynamodb_table" "example" {
# ...
global_secondary_index {
name = "${local.name[0]}"
write_capacity = "${local.write_capacity[0]}"
read_capacity = "${local.read_capacity[0]}"
range_key = "${local.range_key[0]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[1]}"
write_capacity = "${local.write_capacity[1]}"
read_capacity = "${local.read_capacity[1]}"
range_key = "${local.range_key[1]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[2]}"
write_capacity = "${local.write_capacity[2]}"
read_capacity = "${local.read_capacity[2]}"
range_key = "${local.range_key[2]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[3]}"
write_capacity = "${local.write_capacity[3]}"
read_capacity = "${local.read_capacity[3]}"
range_key = "${local.range_key[3]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[4]}"
write_capacity = "${local.write_capacity[4]}"
read_capacity = "${local.read_capacity[4]}"
range_key = "${local.range_key[4]}"
hash_key = "userId"
projection_type = "ALL"
}
}
The new Terraform 0.12 feature that was added to deal with this use-case is dynamic blocks, which allow producing zero or more blocks of a particular type based on a collection value.
For example:
locals {
indices = {
"xxx-index" = {
write_capacity = 5
read_capacity = 5
range_key = "xxx"
},
"last-index" = {
write_capacity = 5
read_capacity = 5
range_key = "xxx"
},
}
}
resource "aws_dynamodb_table" "example" {
# ...
dynamic "global_secondary_index" {
for_each = local.indices
content {
name = global_secondary_index.key
write_capacity = global_secondary_index.value.write_capacity
read_capacity = global_secondary_index.value.read_capacity
range_key = global_secondary_index.value.range_key
hash_key = "userId"
projection_type = "ALL"
}
}
}
I am trying to create MSK_AWS cluster with terraform. With new released version of terraform, AWS_MSK cluster is not getting created.
Here is the code used below
resource "aws_msk_cluster" "msk_cluster" {
cluster_name = "Testing_Cluster"
kafka_version = "2.1.0"
number_of_broker_nodes = 3
broker_node_group_info {
instance_type = "kafka.m5.large"
client_subnets = [
"${aws_subnet.subnet_a.id}",
"${aws_subnet.subnet_b.id}",
"${aws_subnet.subnet_c.id}",
]
ebs_volume_size = 5
security_groups = [ "${aws_security_group.MSK_Sg.id}" ]
}
tags = {
Name = "Cluster_MSK"
}
}
But I am getting the error
BadRequestException: The parameter value contains one or more
characters that are not valid.status code: 400, request id:
e2589e6a-8161-11e9-8f31-6f8877605e30.
MSK doesn't support underscores in the aws_msk_cluster.cluster_name and aws_msk_configuration.name parameters.
In your case simply use cluster_name = "Testing-Cluster".
So, I'm doing translations for a game, and I have different dictionaries. If a translation doesn't exist in one language, I want to set it to the English translation. Every method I've tried to combine the dictionaries has ended up incredibly inefficient.
Here are some cut down examples
local translation-sr = {
Buttons = {
Confirm = "Потврди";
Submit = "Унеси";
};
Countries = {
Bloxell = "Блоксел";
USA = "Сједињене Америчке Државе";
};
Firearms = {
Manufacturers = {
GenMot = "Џенерални Мотори";
Intratec = "Интратек";
TF = "ТФ Оружје";
};
};
};
local translation-en = {
Buttons = {
Confirm = "Confirm";
Purchase = "Purchase";
Submit = "Submit";
};
Countries = {
Bloxell = "Bloxell";
USA = "United States";
};
Firearms = {
Manufacturers = {
GenMot = "General Motors";
Intratec = "Intratec ";
TF = "TF Armaments";
};
};
Languages = {
Belarusian = "Belarusian";
English = "English";
French = "French";
German = "German";
Italian = "Italian";
Russian = "Russian";
Serbian = "Serbian";
Spanish = "Spanish";
};
};
I guess you want to do something like this
setmetatable(translation_sr.Buttons,{__index=translation_en.Buttons})
for all leaf subtables. You can do this by hand if there are only a few subtables.
I believe you should use a metatable to accomplish what you need.
I assume that you will always index by the English default word. With that true you can do the following.
local function default(t,k)
return k
end
local translation_sr = {
Button = setmetatable({
Confirm = "Потврди",
Submit = "Унеси",
},
{ __index = default }),
Countries = setmetatable({
["Bloxell"] = "Блоксел",
["United States"]= "Сједињене Америчке Државе",
},
{ __index = default }),
Firearms = {
Manufacturers = setmetatable({
["General Motors"] = "Џенерални Мотори",
["Intratec"] = "Интратек",
["TF Armaments"] = "ТФ Оружје",
},
{ __index = default }),
},
}
This function simply returns your key that was not present in the table.
local function default(t,k)
return k
end
With this key being assumed to be the English word you would use as the default the returned value for "Purchase" you would get "Purchase" back from the translation_sr. This method requires no translation_en table