I'm trying to import a couple of dynamodb tables to terraform. I'm stuck on how to dynamically handle global secondary indexes between environments.
I have a module and two state files for each environment.
How can i dynamically enter these variables using count , that change between environments,
For example in the below example there are 4 indexes but for a particular index in prod account the read capacity and write capacity changes, whereas all other variables remain constant.
ie last-index has different read and write capacity values for both prod and nonprod
How can it be implemented in terraform?
Module:
locals {
name = ["xxx-index","xxx-index","xxx-index","xxx-index","last-index"]
write_capacity = [ 5,5,5,5,5]
read_capacity = [ 5,5,5,5,5]
range_key = ["xxx","xxx","xxx","xxx","xxx"]
}
global_secondary_index {
count = "${length(local.name)}"
name = "${element(local.name, count.index)}"
write_capacity = "${element(local.write_capacity, count.index)"
read_capacity = "${element(local.read_capacity, count.index)"
hash_key = "userId"
range_key = "${element(local.range_key,count.index)}"
projection_type = "ALL"
}
Terraform -version Terraform v0.11.13
+ provider.aws v2.25.0
There is no reasonable answer to this question for Terraform 0.11. It lacks the primitives required to describe the transform you are looking for, and it doesn't support dynamically generating nested blocks.
The closest supported thing in Terraform 0.11 would be to fix the number of indices as constant but still vary the individual parts, like this:
resource "aws_dynamodb_table" "example" {
# ...
global_secondary_index {
name = "${local.name[0]}"
write_capacity = "${local.write_capacity[0]}"
read_capacity = "${local.read_capacity[0]}"
range_key = "${local.range_key[0]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[1]}"
write_capacity = "${local.write_capacity[1]}"
read_capacity = "${local.read_capacity[1]}"
range_key = "${local.range_key[1]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[2]}"
write_capacity = "${local.write_capacity[2]}"
read_capacity = "${local.read_capacity[2]}"
range_key = "${local.range_key[2]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[3]}"
write_capacity = "${local.write_capacity[3]}"
read_capacity = "${local.read_capacity[3]}"
range_key = "${local.range_key[3]}"
hash_key = "userId"
projection_type = "ALL"
}
global_secondary_index {
name = "${local.name[4]}"
write_capacity = "${local.write_capacity[4]}"
read_capacity = "${local.read_capacity[4]}"
range_key = "${local.range_key[4]}"
hash_key = "userId"
projection_type = "ALL"
}
}
The new Terraform 0.12 feature that was added to deal with this use-case is dynamic blocks, which allow producing zero or more blocks of a particular type based on a collection value.
For example:
locals {
indices = {
"xxx-index" = {
write_capacity = 5
read_capacity = 5
range_key = "xxx"
},
"last-index" = {
write_capacity = 5
read_capacity = 5
range_key = "xxx"
},
}
}
resource "aws_dynamodb_table" "example" {
# ...
dynamic "global_secondary_index" {
for_each = local.indices
content {
name = global_secondary_index.key
write_capacity = global_secondary_index.value.write_capacity
read_capacity = global_secondary_index.value.read_capacity
range_key = global_secondary_index.value.range_key
hash_key = "userId"
projection_type = "ALL"
}
}
}
Related
Sorry I am a beginner at terraform and found some useful modules.
I need to make a single aurora instance cluster for non-production and I need to shutdown after business hours.
how can I create an aurora cluster and schedule to run during business hours???
I can't get the scheduler to connect to the RDS cluster
provider "aws" {
region = local.region
}
locals {
name = "example-aurora"
region = "us-east-1"
tags = {
Owner = "user"
Environment = "dev"
}
}
################################################################################
# Supporting Resources
################################################################################
resource "random_password" "master" {
length = 10
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = "aurora_vpc"
cidr = "10.99.0.0/18"
enable_dns_support = true
enable_dns_hostnames = true
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
public_subnets = ["10.99.0.0/24", "10.99.1.0/24", "10.99.2.0/24"]
private_subnets = ["10.99.3.0/24", "10.99.4.0/24", "10.99.5.0/24"]
database_subnets = ["10.99.7.0/24", "10.99.8.0/24", "10.99.9.0/24"]
tags = local.tags
}
resource "aws_db_parameter_group" "muffy-pg" {
family = "postgres13"
name = "peter-rds-param-group"
parameter {
apply_method = "immediate"
name = "autovacuum_naptime"
value = "30"
}
parameter {
apply_method = "pending-reboot"
name = "autovacuum_max_workers"
value = "15"
}
}
resource "aws_docdb_cluster_parameter_group" "muffy-cluster-pg" {
name = "peter-rds-param-group"
family = "postgres13"
}
module "cluster" {
source = "terraform-aws-modules/rds-aurora/aws"
name = "test-aurora-db-postgres96"
engine = "aurora-postgresql"
engine_version = "13.7"
instance_class = "db.t3.small"
instances = {
one = {}
two = {}
}
vpc_id = module.vpc.vpc_id
subnets = [module.vpc.database_subnets[0], module.vpc.database_subnets[1], module.vpc.database_subnets[2]]
# allowed_security_groups = ["sg-12345678"]
allowed_cidr_blocks = ["10.99.0.0/18"]
storage_encrypted = true
apply_immediately = true
monitoring_interval = 10
db_parameter_group_name = aws_db_parameter_group.muffy-pg.name
db_cluster_parameter_group_name = aws_docdb_cluster_parameter_group.muffy-cluster-pg.name
enabled_cloudwatch_logs_exports = ["postgresql"]
tags = {
Environment = "dev"
Terraform = "true"
}
}
variable "environment" {
default = "dev"
}
module "rds_schedule" {
depends_on = [module.cluster]
source = "github.com/barryw/terraform-aws-rds-scheduler"
# version = "~> 2.0.0"
/* Don't stop RDS in production! */
skip_execution = var.environment == "prod"
identifier = "peter-scheduler"
/* Start the RDS cluster at 6:50am EDT Monday - Friday */
up_schedule = "cron(50 10 ? * MON-FRI *)"
/* Stop the RDS cluster at 9pm EDT every night */
down_schedule = "cron(0 1 * * ? *)"
rds_identifier = module.cluster.identifier
is_cluster = true
}
I guess the issue is with rds_identifier value used under module "rds_schedule"
rds_identifier = module.cluster.identifier
It should be,
rds_identifier = module.cluster.cluster_id
The source module for the aurora cluster used here "terraform-aws-modules/rds-aurora/aws" outputs the cluster identifier as cluster_id not cluster_identifier
Github Reference:
https://registry.terraform.io/modules/terraform-aws-modules/rds-aurora/aws/latest#outputs
I'm using ElarsticSearch 7.7 & NEST 7.7 and on my web page, I'm getting 9 search result documents per page. Even I'm showing the first 9 results on the page, I need to return some property values from all the results for side filtration on the web page.
Eg: if I'm searching "LapTop", my page will show 9 results on the first page. Also, I need to show all the "Manufactures" from all the search results. Not only manufacturers in the first-page result. Then customers can filter by manufacture not only display on the first page.
I have tried GlobalAggregation but it returns categories and manufactures only items in selected page.
public SearchResult Search(SearchType searchType, string searchQuery, int storeId, int pageNumber = 1, int pageSize = 12, IList<SearchFilter> requestFilter = null, decimal? priceFrom = 0, decimal? priceTo = 100000000, string sortBy = null, int totalCount = 0)
{
var queryContainer = new QueryContainer();
var sorts = new List<ISort>();
sorts.Add(new FieldSort { Field = "_score", Order = SortOrder.Descending });
switch (sortBy)
{
case "z-a":
sorts.Add(new FieldSort { Field = Field<ElasticIndexGroupProduct>(p => p.SortValue), Order = SortOrder.Descending });
break;
case "a-z":
sorts.Add(new FieldSort { Field = Field<ElasticIndexGroupProduct>(p => p.SortValue), Order = SortOrder.Ascending });
break;
}
var aggrigations = new AggregationDictionary
{
{"average_per_child", new
AverageAggregation("average_per_child",Field<ElasticIndexGroupProduct>(d => d.Price))},
{"max_per_child", new MaxAggregation("max_per_child",Field<ElasticIndexGroupProduct>(d => d.Price))},
{"min_per_child", new MinAggregation("min_per_child", Field<ElasticIndexGroupProduct>(d => d.Price))},
{
"globle_filter_aggrigation", new GlobalAggregation("globle_filter_aggrigation")
{
Aggregations =new AggregationDictionary
{
{"category_flow", new TermsAggregation("category_flow"){Field = Field<ElasticIndexGroupProduct>(p => p.CategoryFlow)} },
{"manufacturers", new TermsAggregation("manufacturers"){Field = Field<ElasticIndexGroupProduct>(p => p.Manufacturer)} }
}
}
}
};
var searchRequest = new SearchRequest<ElasticIndexGroupProduct>()
{
Profile = true,
From = (pageNumber - 1) * pageSize,
Size = pageSize,
Version = true,
Sort = sorts,
//Scroll = Time.MinusOne,
Aggregations = aggrigations
};
var multiMatch = new QueryStringQuery
{
Query = searchQuery,
Fields = GetSearchFields(searchType),
Boost = 1.1,
Name = "named_query",
DefaultOperator = Operator.Or,
Analyzer = "standard",
QuoteAnalyzer = "keyword",
AllowLeadingWildcard = true,
MaximumDeterminizedStates = 2,
Escape = true,
FuzzyPrefixLength = 2,
FuzzyMaxExpansions = 3,
FuzzyRewrite = MultiTermQueryRewrite.ConstantScore,
Rewrite = MultiTermQueryRewrite.ConstantScore,
Fuzziness = Fuzziness.Auto,
TieBreaker = 1,
AnalyzeWildcard = true,
MinimumShouldMatch = 2,
QuoteFieldSuffix = "'",
Lenient = true,
AutoGenerateSynonymsPhraseQuery = false
};
searchRequest.Query = new BoolQuery
{
Must = new QueryContainer[] { multiMatch },
Filter = new QueryContainer[] { queryContainer }
};
var searchResponse = _client.Search<ElasticIndexGroupProduct>(searchRequest);
var categoryFlowsGlobe = new List<string>();
var allAggregations = searchResponse.Aggregations.Global("globle_filter_aggrigation");
var categories = allAggregations.Terms("category_flow");
foreach (var aggItem in categories.Buckets)
{
if (!categoryFlowsGlobe.Any(x => x == aggItem.Key))
{
categoryFlowsGlobe.Add(aggItem.Key);
}
}
}
This is the exact use case for Post filter - to run a search request that returns hits and aggregations, then to apply filtering to the hits after aggregations have been calculated.
For Manufacturers, these can be retrieved with a terms aggregation in the search request - you can adjust the size on the aggregation if you need to return all manufacturers, otherwise you might decide to return only the top x.
I am trying to create a terraform module with the help of which I can make an entry to existing Dynamo DB table.
I have got this code which create dynamo DB table
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "GameScores"
billing_mode = "PROVISIONED"
read_capacity = 20
write_capacity = 20
hash_key = "UserId"
range_key = "GameTitle"
attribute {
name = "UserId"
type = "S"
}
attribute {
name = "GameTitle"
type = "S"
}
attribute {
name = "TopScore"
type = "N"
}
ttl {
attribute_name = "TimeToExist"
enabled = false
}
global_secondary_index {
name = "GameTitleIndex"
hash_key = "GameTitle"
range_key = "TopScore"
write_capacity = 10
read_capacity = 10
projection_type = "INCLUDE"
non_key_attributes = ["UserId"]
}
tags = {
Name = "dynamodb-table-1"
Environment = "production"
}
}
Is there any way I can make changes in existing dynamo db table.
For adding entries to a table you can take a look at the aws_dynamodb_table_item resource. Here is an example that you can use to add an entry to your table:
resource "aws_dynamodb_table_item" "item1" {
table_name = aws_dynamodb_table.basic-dynamodb-table.name
hash_key = aws_dynamodb_table.basic-dynamodb-table.hash_key
range_key = aws_dynamodb_table.basic-dynamodb-table.range_key
item = <<ITEM
{
"UserId": {"S": "user"},
"GameTitle": {"S": "gamex"},
"TopScore": {"N": "42"}
}
ITEM
}
I'm trying to get tf 0.12.x new dynamic feature to work with a nested map, config is below.
As you can see below (simplified for this) I'm defining all the variables and adding variable required_resource_access which contains a map.
I was hoping to use new dynamic feature to create read this map in a nested dyanmic block.
variable prefix {
description = "Prefix to applied to all top level resources"
default = "abx"
}
variable suffix {
description = "Suffix to applied to all valid top level resources, usually this is 2 letter region code such as we (westeurope), ne (northeurope)."
default = "we"
}
variable env {
description = "3 letter environment code appied to all top level resources"
default = "dev"
}
variable location {
description = "Where to create all resources in Azure"
default = "westeurope"
}
variable available_to_other_tenants {
default = false
}
variable oauth2_allow_implicit_flow {
default = true
}
variable public_client {
default = false
}
# other option is native
variable application_type {
default = "webapp/api"
}
variable required_resource_access {
type = list(object({
resource_app_id = string
resource_access = object({
id = string
type = string
})
}))
default = [{
resource_app_id = "00000003-0000-0000-c000-000000000000"
resource_access = {
id = "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
type = "Role"
}
}]
}
variable reply_urls {
default = []
}
variable group_membership_claims {
default = "All"
}
resource "azuread_application" "bootstrap" {
name = "${var.prefix}-${var.env}-spn"
homepage = "http://${var.prefix}-${var.env}-spn"
identifier_uris = ["http://${var.prefix}-${var.env}-spn"]
reply_urls = var.reply_urls
available_to_other_tenants = var.available_to_other_tenants
oauth2_allow_implicit_flow = var.oauth2_allow_implicit_flow
type = var.application_type
group_membership_claims = var.group_membership_claims
dynamic "required_resource_access" {
for_each = var.required_resource_access
content {
resource_app_id = required_resource_access.value["resource_app_id"]
dynamic "resource_access" {
for_each = required_resource_access.value["resource_access"]
content {
id = resource_access.value["id"]
type = resource_access.value["type"]
}
}
}
}
}
But for reasons beyond my knowledge it keeps giving me this error (notice it's priting it twice as well), I've tried a few other options but this is the closest I managed to get where it would at least give me a meaningful error.
------------------------------------------------------------------------
Error: Invalid index
on pe_kubernetes.tf line 24, in resource "azuread_application" "bootstrap":
24: id = resource_access.value["id"]
|----------------
| resource_access.value is "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
This value does not have any indices.
Error: Invalid index
on pe_kubernetes.tf line 24, in resource "azuread_application" "bootstrap":
24: id = resource_access.value["id"]
|----------------
| resource_access.value is "Role"
This value does not have any indices.
Error: Invalid index
on pe_kubernetes.tf line 25, in resource "azuread_application" "bootstrap":
25: type = resource_access.value["type"]
|----------------
| resource_access.value is "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
This value does not have any indices.
Error: Invalid index
on pe_kubernetes.tf line 25, in resource "azuread_application" "bootstrap":
25: type = resource_access.value["type"]
|----------------
| resource_access.value is "Role"
This value does not have any indices.
Spent the best part of 2 days on this with no luck so any help or pointers would be much appreciated!
I had some time to test my comment...
If I change the resource_access to a list it works.
See code below:
variable required_resource_access {
type = list(object({
resource_app_id = string
resource_access = list(object({
id = string
type = string
}))
}))
default = [{
resource_app_id = "00000003-0000-0000-c000-000000000000"
resource_access = [{
id = "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
type = "Role"
}]
}]
}
resource "azuread_application" "bootstrap" {
name = "test"
type = "webapp/api"
group_membership_claims = "All"
dynamic "required_resource_access" {
for_each = var.required_resource_access
content {
resource_app_id = required_resource_access.value["resource_app_id"]
dynamic "resource_access" {
for_each = required_resource_access.value["resource_access"]
content {
id = resource_access.value["id"]
type = resource_access.value["type"]
}
}
}
}
}
And the plan shows:
Terraform will perform the following actions:
# azuread_application.bootstrap will be created
+ resource "azuread_application" "bootstrap" {
+ application_id = (known after apply)
+ available_to_other_tenants = false
+ group_membership_claims = "All"
+ homepage = (known after apply)
+ id = (known after apply)
+ identifier_uris = (known after apply)
+ name = "test"
+ oauth2_allow_implicit_flow = true
+ object_id = (known after apply)
+ owners = (known after apply)
+ public_client = (known after apply)
+ reply_urls = (known after apply)
+ type = "webapp/api"
+ oauth2_permissions {
+ admin_consent_description = (known after apply)
...
}
+ required_resource_access {
+ resource_app_id = "00000003-0000-0000-c000-000000000000"
+ resource_access {
+ id = "7ab1d382-f21e-4acd-a863-ba3e13f7da61"
+ type = "Role"
}
}
}
Plan: 1 to add, 0 to change, 0 to destroy.
I removed a lot of your variables an some of the optional Arguments for azuread_application to keep the code as small as possible, but the same principle applies to your code, use lists on for_each or it will loop on the object properties.
So, I'm doing translations for a game, and I have different dictionaries. If a translation doesn't exist in one language, I want to set it to the English translation. Every method I've tried to combine the dictionaries has ended up incredibly inefficient.
Here are some cut down examples
local translation-sr = {
Buttons = {
Confirm = "Потврди";
Submit = "Унеси";
};
Countries = {
Bloxell = "Блоксел";
USA = "Сједињене Америчке Државе";
};
Firearms = {
Manufacturers = {
GenMot = "Џенерални Мотори";
Intratec = "Интратек";
TF = "ТФ Оружје";
};
};
};
local translation-en = {
Buttons = {
Confirm = "Confirm";
Purchase = "Purchase";
Submit = "Submit";
};
Countries = {
Bloxell = "Bloxell";
USA = "United States";
};
Firearms = {
Manufacturers = {
GenMot = "General Motors";
Intratec = "Intratec ";
TF = "TF Armaments";
};
};
Languages = {
Belarusian = "Belarusian";
English = "English";
French = "French";
German = "German";
Italian = "Italian";
Russian = "Russian";
Serbian = "Serbian";
Spanish = "Spanish";
};
};
I guess you want to do something like this
setmetatable(translation_sr.Buttons,{__index=translation_en.Buttons})
for all leaf subtables. You can do this by hand if there are only a few subtables.
I believe you should use a metatable to accomplish what you need.
I assume that you will always index by the English default word. With that true you can do the following.
local function default(t,k)
return k
end
local translation_sr = {
Button = setmetatable({
Confirm = "Потврди",
Submit = "Унеси",
},
{ __index = default }),
Countries = setmetatable({
["Bloxell"] = "Блоксел",
["United States"]= "Сједињене Америчке Државе",
},
{ __index = default }),
Firearms = {
Manufacturers = setmetatable({
["General Motors"] = "Џенерални Мотори",
["Intratec"] = "Интратек",
["TF Armaments"] = "ТФ Оружје",
},
{ __index = default }),
},
}
This function simply returns your key that was not present in the table.
local function default(t,k)
return k
end
With this key being assumed to be the English word you would use as the default the returned value for "Purchase" you would get "Purchase" back from the translation_sr. This method requires no translation_en table