skip to Main Content

I am trying to deploy EKS Cluster using terraform.
Currently I am using source as "hashicorp/aws" and version: 5.16.0, I am trying to deploy in "il-central-1" region.

I am getting error as "Invalid AWS Region: il-central-1".

This is my provider block.

provider "aws" {
  region = var.aws_region
}

terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "5.16.0"
    }
    archive = {
      source = "hashicorp/archive"
    }
  }

    backend "s3" {
        bucket                 = "state-files"
        key                    = "terraform.tfstate"
        region                 = var.aws_region
        dynamodb_table         = "terraform-lock"
        encrypt                = true
        skip_region_validation = true
    }

  required_version = ">= 0.14.9"
}

This is my main.tf

# Define the EKS Cluster
resource "aws_eks_cluster" "eks_cluster" {
  version = var.cluster_version
  name     = var.cluster_name
  role_arn = aws_iam_role.role.arn
  vpc_config {
    subnet_ids = [xxxxxx,xxxxxx,xxxxxxx]
    endpoint_private_access = true
    security_group_ids = [aws_security_group.eks_cluster_sg.id]
  }
  # kubernetes_network_config {
  #   service_ipv4_cidr = "xx.xx.xx.xx/22"
  # }
  
  tags = {
    Environment = "test"
    Terraform   = "true"
  }
}

resource "aws_eks_node_group" "eks_node_group" {
  cluster_name    = aws_eks_cluster.eks_cluster.name
  version         = aws_eks_cluster.eks_cluster.version
  # release_version = nonsensitive(data.aws_ssm_parameter.eks_ami_release_version.value)  
  release_version = "xxxxxx"
  ami_type = "xxxxxx"
  # release_version = 
  disk_size = 20
  capacity_type = "ON_DEMAND"
  instance_types = ["xx.medium"]
  node_group_name = "xxxxxxxx"
  node_role_arn   = aws_iam_role.eks_nodegroup_role.arn
  subnet_ids = [xxxxxx,xxxxxx,xxxxx]

  scaling_config {
    desired_size = var.desired_size
    max_size     = var.max_size
    min_size     = var.min_size
  }

  update_config {
    max_unavailable = 1
  }
  tags = {
    Environment = "test"
    Terraform   = "true"
  }

resource "aws_eks_addon" "coredns" {
  cluster_name                = aws_eks_cluster.eks_cluster.name
  depends_on = [aws_eks_node_group.eks_node_group]
  addon_name                  = "coredns"
  addon_version               = "v1.10.1-eksbuild.4"
  service_account_role_arn    = aws_iam_role.eks_addon_role.arn
  resolve_conflicts_on_create = "OVERWRITE"

  tags = {
    Environment = "test"
    Terraform   = "true"
  }
}

resource "aws_eks_addon" "kube_proxy" {
  cluster_name        = aws_eks_cluster.eks_cluster.name
  depends_on = [aws_eks_node_group.eks_node_group]    
  addon_name        = "kube-proxy"
  addon_version     = "v1.27.4-eksbuild.2"
  resolve_conflicts = "OVERWRITE"
  service_account_role_arn = aws_iam_role.eks_addon_role.arn  
  tags = {
    Environment = "test"
    Terraform   = "true"
  }
}

resource "aws_eks_addon" "vpc_cni" {
  cluster_name        = aws_eks_cluster.eks_cluster.name
  depends_on = [aws_eks_node_group.eks_node_group]  
  addon_name        = "vpc-cni"
  addon_version     = "xxxxxx"
  # resolve_conflicts = "OVERWRITE"
  service_account_role_arn = aws_iam_role.eks_addon_role.arn

  tags = {
    Environment = "test"
    Terraform   = "true"
  }
}

resource "aws_eks_addon" "ebs_csi_driver" {
  cluster_name        = aws_eks_cluster.eks_cluster.name
  depends_on = [aws_eks_node_group.eks_node_group]
  addon_name          = "aws-ebs-csi-driver"
  addon_version       = "v1.22.0-eksbuild.2"
  resolve_conflicts   = "OVERWRITE"
  service_account_role_arn = aws_iam_role.eks_addon_role.arn
  tags = {
    Environment = "staging"
    Terraform   = "true"
  }
}

resource "aws_eks_addon" "efs_csi_driver" {
  cluster_name        = aws_eks_cluster.eks_cluster.name
  depends_on = [aws_eks_node_group.eks_node_group]
  addon_name          = "aws-efs-csi-driver"
  addon_version       = "v1.5.8-eksbuild.1"
  resolve_conflicts   = "OVERWRITE"
  service_account_role_arn = aws_iam_role.eks_addon_role.arn
  tags = {
    Environment = "staging"
    Terraform   = "true"
  }
}

data "tls_certificate" "eks_cluster_certificate" {
  url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer
}

I tried changing the version and tried to deploy in another region also, which returned same error, Now i am confuse from where it is taking the "il-central-1" region if i change the variable value to another region.

Is this because of bucket which is created in "il-central-1" region?

2

Answers


  1. Chosen as BEST ANSWER

    Everyone I found the issue:

    The problem was not due to the version or backend configuration; they were set up correctly. Our backend was located in the il-central-1 region.

    The issue was actually related to the remote.tf file. In the il-central-1 region, we were unable to access the tf.state file from another configuration.

    data "terraform_remote_state" "network_state" {
      backend = "s3"
      config = {
        bucket = "XXXXXX"
        region = "il-central-1"
        key    = "XXXXXXX"
        skip_region_validation = true #Added the region validation and it works as charm.
      }
    }
    

  2. Because AWS regions each have their own independent set of endpoints and other metadata, the hashicorp/aws provider has a table of supported regions built into it and each time AWS adds a new region it must be added to that table and included in a subsequent release of the provider.

    The il-central-1 region was added to the relevant codebase on August 3 2023 and was added to the hashicorp/aws provider in its v5.12.0 release, so you will need to use at least v5.12 of the hashicorp/aws provider for the region il-central-1 to be accepted as valid.

    The same would be true for any new AWS region added in the future: there will always be some minimum hashicorp/aws provider release that supports any given AWS region.

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search