I have the following Terraform configuration that is used to create a cluster with two instances:
resource "aws_rds_cluster" "aurora" {
storage_encrypted = true
cluster_identifier = var.cluster_identifier
engine = "aurora-postgresql"
engine_mode = "provisioned"
engine_version = "13.6"
database_name = var.database_name
master_username = "test"
master_password = var.database_password
availability_zones = ["ap-southeast-2a", "ap-southeast-2b"]
db_subnet_group_name = var.db_subnet_group_name
serverlessv2_scaling_configuration {
max_capacity = 1.0
min_capacity = 0.5
}
tags = {
Name = "${var.prefix}-${var.environment}-rds-cluster"
Environment = "${var.prefix}-${var.environment}"
}
vpc_security_group_ids = var.aurora_security_group_id
skip_final_snapshot = true
}
resource "aws_rds_cluster_instance" "aurora" {
count = 2
availability_zone = var.availability_zones[count.index]
cluster_identifier = aws_rds_cluster.aurora.id
instance_class = "db.serverless"
engine = aws_rds_cluster.aurora.engine
engine_version = aws_rds_cluster.aurora.engine_version
db_subnet_group_name = var.db_subnet_group_name
publicly_accessible = true
}
If I make literally no changes to my configuration, and run terraform plan, I receive the output below:
module.potentium_databases.module.potentium_rds_cluster.aws_rds_cluster_instance.aurora[1] must be replaced
-/+ resource "aws_rds_cluster_instance" "aurora" {
+ apply_immediately = (known after apply)
~ arn = "arn:aws:rds:ap-southeast-2:749732509682:db:tf-20220706042316120800000001" -> (known after apply)
~ ca_cert_identifier = "rds-ca-2019" -> (known after apply)
~ cluster_identifier = "potentium-cluster" -> (known after apply) # forces replacement
~ db_parameter_group_name = "default.aurora-postgresql13" -> (known after apply)
~ dbi_resource_id = "db-5AH6GR5KJNW4IXQ2BSGNPLL4FM" -> (known after apply)
~ endpoint = "tf-20220706042316120800000001.cv6x1exxvfdc.ap-southeast-2.rds.amazonaws.com" -> (known after apply)
~ engine_version_actual = "13.6" -> (known after apply)
~ id = "tf-20220706042316120800000001" -> (known after apply)
~ identifier = "tf-20220706042316120800000001" -> (known after apply)
+ identifier_prefix = (known after apply)
~ kms_key_id = "arn:aws:kms:ap-southeast-2:749732509682:key/a3f87bb9-f0b4-44a4-8677-bac5f0bb1546" -> (known after apply)
+ monitoring_role_arn = (known after apply)
~ performance_insights_enabled = false -> (known after apply)
+ performance_insights_kms_key_id = (known after apply)
~ performance_insights_retention_period = 0 -> (known after apply)
~ port = 5432 -> (known after apply)
~ preferred_backup_window = "13:51-14:21" -> (known after apply)
~ preferred_maintenance_window = "thu:15:39-thu:16:09" -> (known after apply)
~ storage_encrypted = true -> (known after apply)
- tags = {} -> null
~ tags_all = {} -> (known after apply)
~ writer = false -> (known after apply)
# (10 unchanged attributes hidden)
}
Can anyone explain why Terraform thinks this resource needs to be recreated even if nothing has changed? It is causing me grief due to how long it takes to actually re-create the instances.
2
Answers
It appears my issue is that I was only specifying 2 Availability Zones instead of 3. Im assuming because Terraform/AWS is left to decide the third AZ, it must perform a re-create as it does not know what to use.
As I know, the re-creating issue is usually caused by the changed attribute, which is shown with #forces replacement. In your case:
~ cluster_identifier = "potentium-cluster" -> (known after apply) # forces replacement
Please double-check it.