Google-beta seems to be using non-existent project with google_firebase_project. What should I do? - firebase

Objective
I am trying to fix a Firebase deployment managed in Terraform. My module looks something like this...
data "google_client_config" "default_project" {
provider = google-beta
}
data "google_project" "project" {
provider = google-beta
project_id = var.gcp_project
}
resource "google_firebase_project" "default" {
provider = google-beta
project = var.gcp_project
}
# enable billing API
resource "google_project_service" "cloud_billing" {
provider = google-beta
project = google_firebase_project.default.id
service = "cloudbilling.googleapis.com"
}
# enable firebase
resource "google_project_service" "firebase" {
provider = google-beta
project = google_firebase_project.default.id
service = "firebase.googleapis.com"
}
# enable access context manage api
resource "google_project_service" "access_context" {
provider = google-beta
project = google_firebase_project.default.id
service = "accesscontextmanager.googleapis.com"
}
resource "google_firebase_web_app" "app" {
provider = google-beta
project = data.google_project.project.project_id
display_name = "firestore-controller-${google_firebase_project.default.display_name}"
depends_on = [
google_firebase_project.default,
google_project_service.firebase,
google_project_service.access_context,
google_project_service.cloud_billing
]
}
data "google_firebase_web_app_config" "app" {
provider = google-beta
web_app_id = google_firebase_web_app.app.app_id
}
resource "google_storage_bucket" "storage" {
provider = google-beta
name = "firestore-controller-${google_firebase_project.default.display_name}"
location = "US"
}
locals {
firebase_config = jsonencode({
appId = google_firebase_web_app.app.app_id
apiKey = data.google_firebase_web_app_config.app.api_key
authDomain = data.google_firebase_web_app_config.app.auth_domain
databaseURL = lookup(data.google_firebase_web_app_config.app, "database_url", "")
storageBucket = lookup(data.google_firebase_web_app_config.app, "storage_bucket", "")
messagingSenderId = lookup(data.google_firebase_web_app_config.app, "message_sender_id", "")
measurementId = lookup(data.google_firebase_web_app_config.app, "measurement_id", "")
})
}
resource "google_storage_bucket_object" "firebase_config" {
provider = google-beta
bucket = google_storage_bucket.storage.name
name = "firebase-config.json"
content = local.firebase_config
}
Issue
Unfortunately, this fails at google_firebase_project.default with the following message:
{
│ "#type": "type.googleapis.com/google.rpc.ErrorInfo",
│ "domain": "googleapis.com",
│ "metadata": {
│ "consumer": "projects/764086051850",
│ "service": "firebase.googleapis.com"
│ },
│ "reason": "SERVICE_DISABLED"
│ }
This is strange because a project with that number does not exist (unless it's some kind of root project that I'm having trouble finding). If this is the the project number for some child of the project I am providing to google_firebase_project.default that is also strange; var.gcp_project_name certainly has this service enabled.
What I've tried thusfar
Removing tfstate.
Refactoring back and forth from legacy modules.
I have double-checked and confirmed that the google-beta provider does indeed recognize the correct project in its configuration when using data.google_project without specifying a project_id.
Where is this mysterious projects/764086051850 coming from?
cross-post

Related

Terraform aws_s3_bucket_website_configuration keeps creating website block of aws_s3_bucket resource

I'm using ~3.0 as AWS provider version on Terraform and last terraform init downloaded 3.75.1. When I ran terraform plan, a WARNING came up;
Warning: Argument is deprecated
on main.tf line 14, in resource "aws_s3_bucket" "xxx":
14: resource "aws_s3_bucket" "xxx" {
Use the aws_s3_bucket_website_configuration resource instead
My bucket resource was like this;
resource "aws_s3_bucket" "bucket" {
bucket = "bucket"
acl = "public-read"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::bucket/*"
}
]
}
EOF
website {
index_document = "index.html"
error_document = "index.html"
}
}
And due to latest changes on provider configuration and Deprecation warning I got because of changes, I divided my bucket resource to 3 like below;
resource "aws_s3_bucket" "bucket" {
bucket = "bucket"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::bucket/*"
}
]
}
EOF
}
resource "aws_s3_bucket_acl" "bucket-acl" {
bucket = aws_s3_bucket.bucket.id
acl = "public-read"
}
resource "aws_s3_bucket_website_configuration" "bucket-website-config" {
bucket = aws_s3_bucket.bucket.id
index_document {
suffix = "index.html"
}
error_document {
key = "index.html"
}
}
I ran terraform plan, Output was like below;
# aws_s3_bucket.bucket will be updated in-place
~ resource "aws_s3_bucket" "bucket" {
~ acl = "public-read" -> "private"
id = "bucket"
tags = {}
# (13 unchanged attributes hidden)
- website {
- error_document = "index.html" -> null
- index_document = "index.html" -> null
}
# (1 unchanged block hidden)
}
# aws_s3_bucket_acl.bucket-acl will be created
+ resource "aws_s3_bucket_acl" "bucket-acl" {
+ acl = "public-read"
+ bucket = "bucket"
+ id = (known after apply)
+ access_control_policy {
+ grant {
+ permission = (known after apply)
+ grantee {
+ display_name = (known after apply)
+ email_address = (known after apply)
+ id = (known after apply)
+ type = (known after apply)
+ uri = (known after apply)
}
}
+ owner {
+ display_name = (known after apply)
+ id = (known after apply)
}
}
}
# aws_s3_bucket_website_configuration.bucket-website-config will be created
+ resource "aws_s3_bucket_website_configuration" "bucket-website-config" {
+ bucket = "bucket"
+ id = (known after apply)
+ website_domain = (known after apply)
+ website_endpoint = (known after apply)
+ error_document {
+ key = "index.html"
}
+ index_document {
+ suffix = "index.html"
}
}
Despite the confusion (because I couldn't understand the changes on aws_s3_bucket. Because I'm using the same configuration values basically), I ran terraform apply to see what will be happening.
After every change is applied, I ran terraform plan to make sure everything is up-to-date. After this point, my environment entered kind of a vicious circle here.
Second terraform plan output is;
aws_s3_bucket.bucket will be updated in-place
~ resource "aws_s3_bucket" "bucket" {
id = "bucket"
tags = {}
# (14 unchanged attributes hidden)
- website {
- error_document = "index.html" -> null
- index_document = "index.html" -> null
}
# (1 unchanged block hidden)
}
As we can see, it tries to remove website configuration from bucket. I ran terraform apply for this as well and after apply, I ran terraform plan for the 3rd time;
# aws_s3_bucket_website_configuration.bucket-website-config will be created
+ resource "aws_s3_bucket_website_configuration" "bucket-website-config" {
+ bucket = "bucket"
+ id = (known after apply)
+ website_domain = (known after apply)
+ website_endpoint = (known after apply)
+ error_document {
+ key = "index.html"
}
+ index_document {
+ suffix = "index.html"
}
}
When I apply this, Terraform is trying to remove website config again, And these circle of changes goes on and on.
Is this a bug, are there anyone stumbled upon this issue? Is there any solution other than adding ignore_changes block or downgrading provider version?
Any help will be appreciated,
Thank you very much.
I had exactly the same case and I ran into it because of a too old provider version.
I was also using a ~3.62 AWS provider.
According to the provider changelog some of this resources just got added with 4.0.0:
New Resource: aws_s3_bucket_website_configuration (#22648)
New Resource: aws_s3_bucket_acl (#22853)
I switched to version >= 4.4 for the AWS provider and afterwards everything was working as expected (just to mention it, I have chosen 4.4 for additional reasons not related to this problem. 4.0 should have also already been enough).
as #lopin said, it's an old version provider problem. Additionally to #Oguzhan Aygun lifecycle workaround, you can use the old version provider method which is the website block inside the aws_s3_bucket resource like the following;
resource "aws_s3_bucket" "b" {
bucket = "s3-website-test.hashicorp.com"
website {
index_document = "index.html"
error_document = "error.html"
routing_rules = ...
}```

Terraform Firebase Web Application

I have some trouble with this terraform file I wrote to define a Firebase application in my org account:
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "3.86.0"
}
}
}
provider "google-beta" {
credentials = file("service-account-credentials.json")
project = var.gcp_project_id
region = var.region
zone = var.zone
}
resource "google_project" "default" {
provider = google-beta
project_id = var.gcp_project_id
name = "Optic OTP API"
org_id = var.gcp_organization_id
}
resource "google_firebase_project" "default" {
provider = google-beta
project = google_project.default.project_id
}
resource "google_firebase_web_app" "basic" {
provider = google-beta
project = google_project.default.project_id
display_name = "Optic OTP API"
depends_on = [google_firebase_project.default]
}
data "google_firebase_web_app_config" "basic" {
provider = google-beta
web_app_id = google_firebase_web_app.basic.app_id
}
resource "google_storage_bucket" "default" {
provider = google-beta
name = "firebase-optic-storage"
}
resource "google_storage_bucket_object" "default" {
provider = google-beta
bucket = google_storage_bucket.default.name
name = "firebase-config.json"
content = jsonencode({
appId = google_firebase_web_app.basic.app_id
apiKey = data.google_firebase_web_app_config.basic.api_key
authDomain = data.google_firebase_web_app_config.basic.auth_domain
databaseURL = lookup(data.google_firebase_web_app_config.basic, "database_url", "")
storageBucket = lookup(data.google_firebase_web_app_config.basic, "storage_bucket", "")
messagingSenderId = lookup(data.google_firebase_web_app_config.basic, "messaging_sender_id", "")
measurementId = lookup(data.google_firebase_web_app_config.basic, "measurement_id", "")
})
}
I followed the official terraform plugin documentation here
I’m using a Service Account created in the company GCP org within the Firebase Service Management Service Agent role:
But when I run terraform plan I get
Error when reading or editing Storage Bucket "firebase-optic-storage": googleapi: Error 403: XXX does not have storage.buckets.get access to the Google Cloud Storage bucket.
Even if the service account’s role has it!
$ gcloud projects get-iam-policy optic-web-otp
# returns
bindings:
- members:
- serviceAccount:XXX
role: roles/firebase.managementServiceAgent
- members:
- serviceAccount:XXX
role: roles/firebase.sdkAdminServiceAgent
- members:
- serviceAccount:XXX
role: roles/firebase.sdkProvisioningServiceAgent
- members:
- user:MY-EMAIL
role: roles/owner
etag:
version: 1
(The XXX is the right service account identifier)
Do you have some hints to check what is missing from my Service Account?
If the roles that you listed are the only ones that your account has - you lack roles that allow you to access Cloud Storage. Command you used to check the roles doesn't give you correct information.
Correct solution (described in this answer) would be to run this :
gcloud projects get-iam-policy <your project name> \
--flatten="bindings[].members" \
--format='table(bindings.role)' \
--filter="bindings.members:<your account name>"
If you don't see any of these roles:
roles/storage.objectAdmin
roles/storage.admin
roles/storage.objectCreator
described here you won't be able to create any buckets/objects.
In this case add these roles to your service account and try again.
For example:
gcloud projects add-iam-policy-binding optic-web-otp \
--member=user:my-user#example.com --role=roles/roles/storage.admin

flow log for specific ENI

so I am supposed be able to create a vpc flow log for a specific instance network interface.
I have been able to create a vpc flow log for the entire VPC but not a specific instance network interface. If I create an instance. it comes with a eni. I would think that I should be able to inspect the instance to find the eni and get the eni id.
for this source code
resource "aws_instance" "master_inst" { ...}
resource "aws_flow_log" "example-instance-flow-log" {
provider = aws.region_master
iam_role_arn = aws_iam_role.master-vpc-flow-log-role.arn
log_destination = aws_cloudwatch_log_group.master-instance-flow-log.arn
traffic_type = "ALL"
eni_id = aws_instance.master_inst.network_interface.id
}
resource "aws_cloudwatch_log_group" "master-instance-flow-log" {
provider = aws.region_master
name = "master-instance-flow-log"
}
I am getting
Error: Cannot index a set value
│
│ on ../../modules/instances.tf line 78, in resource "aws_flow_log" "example-instance-flow-log":
│ 78: eni_id = aws_instance.master_inst.network_interface.id
│
│ Block type "network_interface" is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if"
│ clause.
this does the trick
In order for Terraform destroy to clean up the log group the role needs to have permission to destroy the log group. Now unfortunately adding the delete to the policy for some reason 1 out of 3 does not actually delete the log group. so you have to keep the console open to manually delete the log group.
resource "aws_iam_role" "flowlog-role" {
provider = aws.region_master
name = "flowlog-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "vpc-flow-logs.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "flowlog-role-policy" {
provider = aws.region_master
name = "flowlog-role-policy"
role = aws_iam_role.flowlog-role.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:DeleteLogGroup",
"logs:CreateLogStream",
"lots:DeleteLogStream",
"logs:PutLogEvents",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
EOF
}
resource "aws_cloudwatch_log_group" "master-instance-flowlog-grp" {
count = var.enable_instance_flowlog? 1 : 0
provider = aws.region_master
name = "master-instance-flowlog-grp"
retention_in_days = 3 ## need to specify number of days otherwise terraform destroy will not remove log group
}
resource "aws_flow_log" "master-instance-flowlog" {
count = var.enable_instance_flowlog? 1 : 0
provider = aws.region_master
iam_role_arn = aws_iam_role.flowlog-role.arn
log_destination = aws_cloudwatch_log_group.master-instance-flowlog-grp[count.index].arn
traffic_type = "ALL"
eni_id = aws_instance.master_instance.primary_network_interface_id
}

Pass variables from terraform to arm template

I am deploying an ARM template with Terraform.
We deploy all our Azure infra with Terraform but for AKS there are some preview features which are not in terraform yet so we want to deploy an AKS cluster with an ARM template.
If I create a Log Analytics workspace with TF, how can I pass the workspace id to ARM.
resource "azurerm_resource_group" "test" {
name = "k8s-test-bram"
location = "westeurope"
}
resource "azurerm_log_analytics_workspace" "test" {
name = "lawtest"
location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
sku = "PerGB2018"
retention_in_days = 30
}
So here is a snippet of the AKS ARM where I want to enable monitoring and I refer to the workspaceresourceId. But how do I define/declare the parameter to get the id from the workspace that I created with TF
"properties": {
"kubernetesVersion": "[parameters('kubernetesVersion')]",
"enableRBAC": "[parameters('EnableRBAC')]",
"dnsPrefix": "[parameters('DnsPrefix')]",
"addonProfiles": {
"httpApplicationRouting": {
"enabled": false
},
omsagent": {
"enabled": true,
"config": {
"logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]"
}
}
},
you could use the parameters property of the azurerm_template_deployment deployment to pass in parameters:
parameters = {
"workspaceResourceId" = "${azurerm_log_analytics_workspace.test.id}"
}
I think it should look more or less like that, here's the official doc on this.

Identityserver unauthorized_client error in implicit flow

My Identity Server works well in some weeks after that I have gotten an unauthorized_client error, I don't know why.
Identity Server host in http://localhost:5001
Angular Started with .Net Core project in http://localhost:4200
The exact error is:
Sorry, there was an error: unauthorized_client
Unknown client or client not enabled
In the Identity Server, my client defined as follow:
var clients = new List<Client>
{
new Client
{
ClientId = "app.spa.client",
ClientName = "Client Application",
AllowedGrantTypes = GrantTypes.Implicit,
AllowAccessTokensViaBrowser = true,
RequireConsent = false,
RedirectUris =
{
"http://localhost:4200/assets/oidc-login-redirect.html",
"http://localhost:4200/assets/silent-redirect.html"
},
PostLogoutRedirectUris = { "http://localhost:4200/?postLogout=true" },
AllowedCorsOrigins = new[] { "http://localhost:4200/" },
AllowedScopes =
{
IdentityServerConstants.StandardScopes.OpenId,
IdentityServerConstants.StandardScopes.Profile,
"webapi"
},
IdentityTokenLifetime = 120,
AccessTokenLifetime = 120
}
};
And in Angular project, I'm using from oidc-client and my config is like follow:
var config = {
authority: "http://localhost:5001/",
client_id: "app.spa.client",
redirect_uri: `http://localhost:4200/assets/oidc-login-redirect.html`,
scope: "openid profile webapi",
response_type: "id_token token",
post_logout_redirect_uri: `http://localhost:4200/?postLogout=true`,
userStore: new WebStorageStateStore({ store: window.localStorage }),
automaticSilentRenew: true,
silent_redirect_uri: `http://localhost:4200/assets/silent-redirect.html`
};
Have you ever been this error?
How I can find more details of this error?
Actually, I found the problem, The IdentityServer4 package in Identity service updated from version 2.4.0 to 2.5.0 but I can't resolve this problem.
Eventually, I'm forced to be down-grade to 2.4.0 version and my problem solved.
Any Idea to solve this problem in IdentityServer4 version 2.5.0?

Resources