skip to Main Content

I don’t quite understand how a terraform directory is meant to be setup but mine seems pretty basic. it keeps complaining about empty values though they are set. can someone please take a look and tell me what could be the issue?

snippet of .tf:

provider "aws" {
  region     = var.region

  default_tags {
    tags = {
      source = "/home/ubuntu/bootcamp-terraform-master"
      owner_name = var.owner-name
      owner_email = var.owner-email
      purpose = var.purpose
    }
  }
}


// Resources

resource "aws_instance" "zookeepers" {
  count         = var.zk-count
  ami           = var.aws-ami-id
  instance_type = var.zk-instance-type
  key_name = var.key-name

  root_block_device {
    volume_size = 100
  }

  tags = {
    Name = "${var.owner-name}-zookeeper-${count.index}"
"bootcamp2.tf" 269L, 7806C                                                                                                                              14,0-1        Top
provider "aws" {
  region     = var.region

  default_tags {
    tags = {
      source = "/home/ubuntu/bootcamp-terraform-master"
      owner_name = var.owner-name
      owner_email = var.owner-email
      purpose = var.purpose
    }
  }
}


// Resources

resource "aws_instance" "zookeepers" {
  count         = var.zk-count
  ami           = var.aws-ami-id
  instance_type = var.zk-instance-type
  key_name = var.key-name

  root_block_device {
    volume_size = 100
  }

  tags = {
    Name = "${var.owner-name}-zookeeper-${count.index}"
    description = "zookeeper nodes - Managed by Terraform"
    role = "zookeeper"
    zookeeperid = count.index
    Schedule = "zookeeper-mon-8am-fri-6pm"
    sshUser = var.linux-user
    region = var.region
    role_region = "zookeepers-${var.region}"
  }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "zookeepers" {
  count = var.zk-count
  zone_id = var.hosted-zone-id
  name = "zookeeper-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.zookeepers.*.private_ip, count.index)}"]
}

resource "aws_instance" "brokers" {
  count         = var.broker-count
  ami           = var.aws-ami-id
  instance_type = var.broker-instance-type
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]

    # security_groups = ["${var.security_group}"]
  key_name = var.key-name

  root_block_device {
    volume_size = 64 # 64 GB
  }

  tags = {
    Name = "${var.owner-name}-broker-${count.index}"
    description = "broker nodes - Managed by Terraform"
    nice-name = "kafka-${count.index}"
    big-nice-name = "follower-kafka-${count.index}"
    brokerid = count.index
    role = "broker"
    sshUser = var.linux-user
    # sshPrivateIp = true // this is only checked for existence, not if it's true or false by terraform.py (ati)
    createdBy = "terraform"
    Schedule = "kafka-mon-8am-fri-6pm"
    # ansible_python_interpreter = "/usr/bin/python3"
    #EntScheduler = "mon,tue,wed,thu,fri;1600;mon,tue,wed,thu;fri;sat;0400;"
    region = var.region
    role_region = "brokers-${var.region}"
  }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "brokers" {
  count = var.broker-count
  zone_id = var.hosted-zone-id
  name = "kafka-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.brokers.*.private_ip, count.index)}"]
}

resource "aws_instance" "connect-cluster" {
  count         = var.connect-count
  ami           = var.aws-ami-id
  instance_type = var.connect-instance-type
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]
  key_name = var.key-name
  tags = {
    Name = "${var.owner-name}-connect-${count.index}"
    description = "Connect nodes - Managed by Terraform"
    role = "connect"
    Schedule = "mon-8am-fri-6pm"
    sshUser = var.linux-user
    region = var.region
    role_region = "connect-${var.region}"
  }

  root_block_device {
    volume_size = 20 # 20 GB
  }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "connect-cluster" {
  count = var.connect-count
  zone_id = var.hosted-zone-id
  name = "connect-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.connect-cluster.*.private_ip, count.index)}"]
}

resource "aws_instance" "schema" {
  count         = var.schema-count
  ami           = var.aws-ami-id
  instance_type = var.schema-instance-type
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]
  key_name = var.key-name
  tags = {
    Name = "${var.owner-name}-schema-${count.index}"
    description = "Schema nodes - Managed by Terraform"
    role = "schema"
    Schedule = "mon-8am-fri-6pm"
    sshUser = var.linux-user
    region = var.region
    role_region = "schema-${var.region}"
  }

  root_block_device {
    volume_size = 20 # 20 GB
 }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "schema" {
  count = var.schema-count
  zone_id = var.hosted-zone-id
  name = "schema-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.schema.*.private_ip, count.index)}"]
}

resource "aws_instance" "control-center" {
  count         = var.c3-count
  ami           = var.aws-ami-id
  instance_type = var.c3-instance-type
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]
  key_name = var.key-name

  root_block_device {
    volume_size = 64 # 64GB
  }

  tags = {
    Name = "${var.owner-name}-control-center-${count.index}"
    description = "Control Center nodes - Managed by Terraform"
    role = "schema"
    Schedule = "mon-8am-fri-6pm"
    sshUser = var.linux-user
    region = var.region
    role_region = "schema-${var.region}"
  }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "control-center" {
  count = var.c3-count
  zone_id = var.hosted-zone-id
  name = "controlcenter-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.control-center.*.private_ip, count.index)}"]
}

resource "aws_instance" "rest" {
  count         = var.rest-count
  ami           = var.aws-ami-id
  instance_type = var.rest-instance-type
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]

  key_name = var.key-name

  root_block_device {
    volume_size = 20 # 20 GB
  }

  tags = {
    Name = "${var.owner-name}-rest-${count.index}"
    description = "Rest nodes - Managed by Terraform"
    role = "schema"
    Schedule = "mon-8am-fri-6pm"
    sshUser = var.linux-user
    region = var.region
    role_region = "schema-${var.region}"
  }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "rest" {
  count = var.rest-count
  zone_id = var.hosted-zone-id
  name = "rest-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.rest.*.private_ip, count.index)}"]
}

resource "aws_instance" "ksql" {
  count         = var.ksql-count
  ami           = var.aws-ami-id
  instance_type = var.ksql-instance-type
  availability_zone = var.availability-zone[count.index % length(var.availability-zone)]
  key_name = var.key-name

  root_block_device {
    volume_size = 64 # 64 GB
  }

  tags = {
    Name = "${var.owner-name}-ksql-${count.index}"
    description = "Rest nodes - Managed by Terraform"
    role = "schema"
    Schedule = "mon-8am-fri-6pm"
    sshUser = var.linux-user
    region = var.region
    role_region = "schema-${var.region}"
  }

  subnet_id = var.subnet-id[count.index % length(var.subnet-id)]
  vpc_security_group_ids = var.vpc-security-group-ids
  associate_public_ip_address = true
}

resource "aws_route53_record" "ksql" {
  count = var.ksql-count
  zone_id = var.hosted-zone-id
  name = "ksql-${count.index}.${var.dns-suffix}"
  type = "A"
  ttl = "300"
  records = ["${element(aws_instance.ksql.*.private_ip, count.index)}"]
}

terraform plan runs fine but I keep running into these errors when running terraform apply

Error: error collecting instance settings: empty result
│
│   with aws_instance.zookeepers[1],
│   on bootcamp2.tf line 17, in resource "aws_instance" "zookeepers":
│   17: resource "aws_instance" "zookeepers" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.zookeepers[0],
│   on bootcamp2.tf line 17, in resource "aws_instance" "zookeepers":
│   17: resource "aws_instance" "zookeepers" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.zookeepers[2],
│   on bootcamp2.tf line 17, in resource "aws_instance" "zookeepers":
│   17: resource "aws_instance" "zookeepers" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.brokers[0],
│   on bootcamp2.tf line 53, in resource "aws_instance" "brokers":
│   53: resource "aws_instance" "brokers" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.brokers[1],
│   on bootcamp2.tf line 53, in resource "aws_instance" "brokers":
│   53: resource "aws_instance" "brokers" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.brokers[2],
│   on bootcamp2.tf line 53, in resource "aws_instance" "brokers":
│   53: resource "aws_instance" "brokers" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.connect-cluster[0],
│   on bootcamp2.tf line 97, in resource "aws_instance" "connect-cluster":
│   97: resource "aws_instance" "connect-cluster" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.connect-cluster[1],
│   on bootcamp2.tf line 97, in resource "aws_instance" "connect-cluster":
│   97: resource "aws_instance" "connect-cluster" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.schema[0],
│   on bootcamp2.tf line 131, in resource "aws_instance" "schema":
│  131: resource "aws_instance" "schema" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.schema[1],
│   on bootcamp2.tf line 131, in resource "aws_instance" "schema":
│  131: resource "aws_instance" "schema" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.control-center[0],
│   on bootcamp2.tf line 165, in resource "aws_instance" "control-center":
│  165: resource "aws_instance" "control-center" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.rest[0],
│   on bootcamp2.tf line 200, in resource "aws_instance" "rest":
│  200: resource "aws_instance" "rest" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.ksql[0],
│   on bootcamp2.tf line 236, in resource "aws_instance" "ksql":
│  236: resource "aws_instance" "ksql" {
│
╵
╷
│ Error: error collecting instance settings: empty result
│
│   with aws_instance.ksql[1],
│   on bootcamp2.tf line 236, in resource "aws_instance" "ksql":
│  236: resource "aws_instance" "ksql" {

all the variables are set in the variables.tf file and references are made to the .tfvars file:

variable "owner-name" {
  default = "wetfwefwef"
}

variable "owner-email" {
  default = "stwfefxef.io"
}

variable "dns-suffix" {
  default = "srgrwgsofxfwegwegia"
  description = "Suffix for DNS entry in Route 53. No spaces!"
}

variable "purpose" {
  default = "rhwgrwx"
}

variable "key-name" {
  default = "tertqwf"
}

variable "zk-count" {
  default = 3
}

variable "broker-count" {
  default = 3
}

variable "connect-count" {
  default = 2
}

variable "schema-count" {
  default = 2
}

variable "rest-count" {
  default = 1
}

variable "c3-count" {
  default = 1
}

variable "ksql-count" {
  default = 2
}

variable "zk-instance-type" {
  default = "t3a.large"
}

variable "broker-instance-type" {
  default = "t3a.large"
}

variable "schema-instance-type" {
  default = "t3a.large"
}

variable "connect-instance-type" {
  default = "t3a.large"
}

variable "rest-instance-type" {
  default = "t3a.large"
}

variable "c3-instance-type" {
  default = "t3a.large"
}

variable "ksql-instance-type" {
  default = "t3a.large"
}

variable "client-instance-type" {
  default = "t3a.large"
}

variable "hosted-zone-id" {
}


variable "aws-ami-id"  {
  default = "ami-00000000"
}

variable "linux-user" {
  default = "ubuntu" // ec2-user
}

variable "vpc-id" {
}

variable "subnet-id" {
  type = list(string)
}

variable "vpc-security-group-ids" {
  type = list(string)
}

2

Answers


  1. I stumbled on this trying to quickly find the answer for why I was getting the same error.

    I’m pretty sure it’s because the default AMI you’re supplying doesn’t exist. Otherwise think you’re possibly supplying a bad value as a variable, or the AMI is not shared with the account you’re running it in.
    In my case, it was the last problem: in the console, I had added the account to share with in the AMI, but hadn’t followed up with a save :-/

    Error: error collecting instance settings: empty result

    isn’t very descriptive for diagnosing the problem. It could potentially be some other field not giving results I guess – haven’t looked further. If it was a problem with the key pair as suggested in one of the comments, you would have clearly seen in the error message including InvalidKeyPair.NotFound.

    To debug further, you can increase the logging, e.g. export TF_LOG=debug

    Login or Signup to reply.
  2. It was AWS AMI ID issue. First of all you need to add or subscribe correct AMI ID manually, after that add this AMI ID in terraform code. It works for me.

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search