diff --git a/README.md b/README.md
index 45b06aaebac..d9596dd0da5 100644
--- a/README.md
+++ b/README.md
@@ -57,12 +57,12 @@ module "my-cluster" {
subnets = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
vpc_id = "vpc-1234556abcdef"
- worker_groups = [
- {
+ worker_groups = {
+ group = {
instance_type = "m4.large"
asg_max_size = 5
}
- ]
+ }
}
```
## Conditional creation
@@ -161,8 +161,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Source | Version |
|------|--------|---------|
-| [fargate](#module\_fargate) | ./modules/fargate | |
-| [node\_groups](#module\_node\_groups) | ./modules/node_groups | |
+| [fargate](#module\_fargate) | ./modules/fargate | n/a |
+| [node\_groups](#module\_node\_groups) | ./modules/node_groups | n/a |
+| [worker\_groups](#module\_worker\_groups) | ./modules/worker_groups | n/a |
## Resources
@@ -266,7 +267,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | n/a | yes |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only. | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | n/a | yes |
-| [wait\_for\_cluster\_timeout](#wait\_for\_cluster\_timeout) | Allows for a configurable timeout (in seconds) when waiting for a cluster to come up | `number` | `300` | no |
+| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available. | `number` | `300` | no |
| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
@@ -275,8 +276,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group. | `bool` | `false` | no |
| [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups. | `bool` | `false` | no |
| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | `bool` | `true` | no |
-| [worker\_groups](#input\_worker\_groups) | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
-| [worker\_groups\_launch\_template](#input\_worker\_groups\_launch\_template) | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
+| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Templates. See workers\_group\_defaults for valid keys. | `any` | `{}` | no |
+| [worker\_groups\_launch\_template\_legacy](#input\_worker\_groups\_launch\_template\_legacy) | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
+| [worker\_groups\_legacy](#input\_worker\_groups\_legacy) | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | `number` | `1025` | no |
| [workers\_additional\_policies](#input\_workers\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
@@ -311,6 +313,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by var.node\_groups keys |
| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
| [security\_group\_rule\_cluster\_https\_worker\_ingress](#output\_security\_group\_rule\_cluster\_https\_worker\_ingress) | Security group rule responsible for allowing pods to communicate with the EKS cluster API. |
+| [worker\_groups](#output\_worker\_groups) | Outputs from EKS worker groups. Map of maps, keyed by var.worker\_groups keys |
| [worker\_iam\_instance\_profile\_arns](#output\_worker\_iam\_instance\_profile\_arns) | default IAM instance profile ARN for EKS worker groups |
| [worker\_iam\_instance\_profile\_names](#output\_worker\_iam\_instance\_profile\_names) | default IAM instance profile name for EKS worker groups |
| [worker\_iam\_role\_arn](#output\_worker\_iam\_role\_arn) | default IAM role ARN for EKS worker groups |
diff --git a/aws_auth.tf b/aws_auth.tf
index 6eb563203d3..bc3681360b7 100644
--- a/aws_auth.tf
+++ b/aws_auth.tf
@@ -1,6 +1,9 @@
locals {
+ ## DEPRECATED section which should be removed when users will be done migration to
+ ## worker nodes managed via maps. When updating remember about proper update in modules/worker_groups
+
auth_launch_template_worker_roles = [
- for index in range(0, var.create_eks ? local.worker_group_launch_template_count : 0) : {
+ for index in range(0, var.create_eks ? local.worker_group_launch_template_legacy_count : 0) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
coalescelist(
aws_iam_instance_profile.workers_launch_template.*.role,
@@ -10,7 +13,7 @@ locals {
index
)}"
platform = lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"platform",
local.workers_group_defaults["platform"]
)
@@ -18,7 +21,7 @@ locals {
]
auth_worker_roles = [
- for index in range(0, var.create_eks ? local.worker_group_count : 0) : {
+ for index in range(0, var.create_eks ? local.worker_group_legacy_count : 0) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
coalescelist(
aws_iam_instance_profile.workers.*.role,
@@ -28,18 +31,20 @@ locals {
index,
)}"
platform = lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"platform",
local.workers_group_defaults["platform"]
)
}
]
+ ## ~DEPRECATED
# Convert to format needed by aws-auth ConfigMap
configmap_roles = [
for role in concat(
local.auth_launch_template_worker_roles,
local.auth_worker_roles,
+ module.worker_groups.aws_auth_roles,
module.node_groups.aws_auth_roles,
module.fargate.aws_auth_roles,
) :
diff --git a/cluster.tf b/cluster.tf
index 13d38a09a7a..91d74a3f662 100644
--- a/cluster.tf
+++ b/cluster.tf
@@ -57,6 +57,7 @@ resource "aws_security_group" "cluster" {
name_prefix = var.cluster_name
description = "EKS cluster security group."
vpc_id = var.vpc_id
+
tags = merge(
var.tags,
{
diff --git a/data.tf b/data.tf
index bc80e74a69e..a9ac01a7934 100644
--- a/data.tf
+++ b/data.tf
@@ -64,23 +64,23 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}
data "aws_iam_role" "custom_cluster_iam_role" {
- count = var.manage_cluster_iam_resources ? 0 : 1
+ count = var.create_eks && !var.manage_cluster_iam_resources ? 1 : 0
name = var.cluster_iam_role_name
}
data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
- count = var.manage_worker_iam_resources ? 0 : local.worker_group_count
+ count = var.create_eks && !var.manage_worker_iam_resources ? local.worker_group_legacy_count : 0
name = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"iam_instance_profile_name",
local.workers_group_defaults["iam_instance_profile_name"],
)
}
data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
- count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
+ count = var.create_eks && !var.manage_worker_iam_resources ? local.worker_group_launch_template_legacy_count : 0
name = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"iam_instance_profile_name",
local.workers_group_defaults["iam_instance_profile_name"],
)
diff --git a/docs/faq.md b/docs/faq.md
index 3b9e118f6b6..4fbf4314724 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -2,7 +2,7 @@
## How do I customize X on the worker group's settings?
-All the options that can be customized for worker groups are listed in [local.tf](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/local.tf) under `workers_group_defaults_defaults`.
+All the options that can be customized for worker groups are listed in [local.tf](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/modules/worker_groups/local.tf) under `workers_group_defaults_defaults`.
Please open Issues or PRs if you think something is missing.
@@ -61,12 +61,6 @@ You need to add the tags to the VPC and subnets yourself. See the [basic example
An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore\_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
-## How do I safely remove old worker groups?
-
-You've added new worker groups. Deleting worker groups from earlier in the list causes Terraform to want to recreate all worker groups. This is a limitation with how Terraform works and the module using `count` to create the ASGs and other resources.
-
-The safest and easiest option is to set `asg_min_size` and `asg_max_size` to 0 on the worker groups to "remove".
-
## Why does changing the worker group's desired count not do anything?
The module is configured to ignore this value. Unfortunately Terraform does not support variables within the `lifecycle` block.
@@ -77,9 +71,9 @@ You can change the desired count via the CLI or console if you're not using the
If you are not using autoscaling and really want to control the number of nodes via terraform then set the `asg_min_size` and `asg_max_size` instead. AWS will remove a random instance when you scale down. You will have to weigh the risks here.
-## Why are nodes not recreated when the `launch_configuration`/`launch_template` is recreated?
+## Why are nodes not recreated when the `launch_configuration` is recreated?
-By default the ASG is not configured to be recreated when the launch configuration or template changes. Terraform spins up new instances and then deletes all the old instances in one go as the AWS provider team have refused to implement rolling updates of autoscaling groups. This is not good for kubernetes stability.
+By default the ASG is not configured to be recreated when the launch configuration changes. Terraform spins up new instances and then deletes all the old instances in one go as the AWS provider team have refused to implement rolling updates of autoscaling groups. This is not good for kubernetes stability.
You need to use a process to drain and cycle the workers.
@@ -137,14 +131,13 @@ Amazon EKS clusters must contain one or more Linux worker nodes to run core syst
1. Build AWS EKS cluster with the next workers configuration (default Linux):
```
-worker_groups = [
- {
- name = "worker-group-linux"
+worker_groups = {
+ worker-group-linux = {
instance_type = "m5.large"
platform = "linux"
asg_desired_capacity = 2
},
- ]
+ }
```
2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
@@ -152,20 +145,18 @@ worker_groups = [
3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
```
-worker_groups = [
- {
- name = "worker-group-linux"
+worker_groups = {
+ worker-group-linux = {
instance_type = "m5.large"
platform = "linux"
asg_desired_capacity = 2
},
- {
- name = "worker-group-windows"
+ worker-group-windows = {
instance_type = "m5.large"
platform = "windows"
asg_desired_capacity = 1
},
- ]
+ }
```
4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
diff --git a/docs/spot-instances.md b/docs/spot-instances.md
index f140fe55ea2..da0d9645dda 100644
--- a/docs/spot-instances.md
+++ b/docs/spot-instances.md
@@ -22,57 +22,19 @@ Notes:
- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
-## Using Launch Configuration
-
-Example worker group configuration that uses an ASG with launch configuration for each worker group:
-
-```hcl
- worker_groups = [
- {
- name = "on-demand-1"
- instance_type = "m4.xlarge"
- asg_max_size = 1
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=normal"
- suspended_processes = ["AZRebalance"]
- },
- {
- name = "spot-1"
- spot_price = "0.199"
- instance_type = "c4.xlarge"
- asg_max_size = 20
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
- suspended_processes = ["AZRebalance"]
- },
- {
- name = "spot-2"
- spot_price = "0.20"
- instance_type = "m4.xlarge"
- asg_max_size = 20
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
- suspended_processes = ["AZRebalance"]
- }
- ]
-```
-
## Using Launch Templates
Launch Template support is a recent addition to both AWS and this module. It might not be as tried and tested but it's more suitable for spot instances as it allowed multiple instance types in the same worker group:
```hcl
- worker_groups = [
- {
- name = "on-demand-1"
+ worker_groups = {
+ on-demand-1 = {
instance_type = "m4.xlarge"
asg_max_size = 10
kubelet_extra_args = "--node-labels=spot=false"
suspended_processes = ["AZRebalance"]
- }
- ]
-
-
- worker_groups_launch_template = [
- {
- name = "spot-1"
+ },
+ spot-1 = {
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
spot_instance_pools = 4
asg_max_size = 5
@@ -80,7 +42,7 @@ Launch Template support is a recent addition to both AWS and this module. It mig
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = true
},
- ]
+ }
```
## Using Launch Templates With Both Spot and On Demand
diff --git a/docs/upgrades.md b/docs/upgrades.md
index 88d29ae1bf6..17f86e59bad 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -58,3 +58,70 @@ Plan: 0 to add, 0 to change, 1 to destroy.
5. If everything sounds good to you, run `terraform apply`
After the first apply, we recommand you to create a new node group and let the module use the `node_group_name_prefix` (by removing the `name` argument) to generate names and avoid collision during node groups re-creation if needed, because the lifce cycle is `create_before_destroy = true`.
+
+## Upgrade module to vXX.X.X for Worker Groups Managed as maps
+
+In this release, we added ability to manage Worker Groups as maps (not lists) which improves the ability to add/remove worker groups.
+
+>NOTE: The new functionality supports only creating groups using Launch Templates!
+
+1. Run `terraform apply` with the previous module version. Make sure all changes are applied before proceeding.
+
+2. Upgrade your module and configure your worker groups by renaming existing variable names as follows:
+
+```
+worker_groups = [...] => worker_groups_legacy = [...]
+
+worker_groups_launch_template = [...] => worker_groups_launch_template_legacy = [...]
+```
+
+Example:
+
+FROM:
+
+```hcl
+ worker_groups_launch_template = [
+ {
+ name = "worker-group-1"
+ instance_type = "t3.small"
+ asg_desired_capacity = 2
+ public_ip = true
+ },
+ ]
+```
+
+TO:
+
+```hcl
+ worker_groups_launch_template_legacy = [
+ {
+ name = "worker-group-1"
+ instance_type = "t3.small"
+ asg_desired_capacity = 2
+ public_ip = true
+ },
+ ]
+```
+
+3. Run `terraform plan`. No infrastructure changes expected
+
+4. Starting from now on you could define worker groups in a new way and migrate your workload there. Eventually the legacy groups could be deleted.
+
+Example:
+
+```hcl
+ worker_groups_launch_template_legacy = [
+ {
+ name = "worker-group-1"
+ instance_type = "t3.small"
+ asg_desired_capacity = 2
+ },
+ ]
+
+ worker_groups = {
+ worker-group-1 = {
+ instance_type = "t3.small"
+ asg_desired_capacity = 2
+ },
+ }
+```
diff --git a/examples/basic/main.tf b/examples/basic/main.tf
index c6257a25eb4..bc52c5a5315 100644
--- a/examples/basic/main.tf
+++ b/examples/basic/main.tf
@@ -114,22 +114,39 @@ module "eks" {
vpc_id = module.vpc.vpc_id
- worker_groups = [
- {
- name = "worker-group-1"
+ // worker_groups_legacy = [
+ // {
+ // name = "worker-group-1"
+ // instance_type = "t3.small"
+ // additional_userdata = "echo foo bar"
+ // asg_desired_capacity = 2
+ // additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
+ // root_volume_type = "gp2"
+ // },
+ // {
+ // name = "worker-group-2"
+ // instance_type = "t3.medium"
+ // additional_userdata = "echo foo bar"
+ // additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
+ // asg_desired_capacity = 1
+ // root_volume_type = "gp2"
+ // },
+ // ]
+
+ worker_groups = {
+ worker-group-1 = {
instance_type = "t3.small"
additional_userdata = "echo foo bar"
asg_desired_capacity = 2
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
},
- {
- name = "worker-group-2"
+ worker-group-2 = {
instance_type = "t3.medium"
additional_userdata = "echo foo bar"
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
asg_desired_capacity = 1
},
- ]
+ }
worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
map_roles = var.map_roles
diff --git a/examples/basic/outputs.tf b/examples/basic/outputs.tf
index a0788aff1d2..40f0088ea5e 100644
--- a/examples/basic/outputs.tf
+++ b/examples/basic/outputs.tf
@@ -23,3 +23,7 @@ output "region" {
value = var.region
}
+output "worker_group_instance_profile_arns" {
+ description = "EKS cluster worker groups arns."
+ value = module.eks.worker_iam_instance_profile_arns
+}
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index 86166df0e5c..ce0de6f7b17 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -22,7 +22,7 @@ module "eks" {
write_kubeconfig = false
manage_aws_auth = false
- worker_groups_launch_template = [
+ worker_groups_launch_template_legacy = [
{
name = "bottlerocket-nodes"
# passing bottlerocket ami id
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
index 1883ecc70c0..1167d827c38 100644
--- a/examples/instance_refresh/main.tf
+++ b/examples/instance_refresh/main.tf
@@ -232,7 +232,8 @@ module "eks" {
subnets = module.vpc.public_subnets
vpc_id = module.vpc.vpc_id
enable_irsa = true
- worker_groups_launch_template = [
+
+ worker_groups_launch_template_legacy = [
{
name = "refresh"
asg_max_size = 2
@@ -257,4 +258,29 @@ module "eks" {
]
}
]
+
+ // worker_groups = {
+ // refresh = {
+ // asg_max_size = 2
+ // asg_desired_capacity = 2
+ // instance_refresh_enabled = true
+ // instance_refresh_instance_warmup = 60
+ // public_ip = true
+ // metadata_http_put_response_hop_limit = 3
+ // update_default_version = true
+ // instance_refresh_triggers = ["tag"]
+ // tags = [
+ // {
+ // key = "aws-node-termination-handler/managed"
+ // value = ""
+ // propagate_at_launch = true
+ // },
+ // {
+ // key = "foo"
+ // value = "buzz"
+ // propagate_at_launch = true
+ // }
+ // ]
+ // }
+ // }
}
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
index e6c9fa40188..e4d05bb4482 100644
--- a/examples/irsa/main.tf
+++ b/examples/irsa/main.tf
@@ -30,6 +30,10 @@ module "vpc" {
public_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
enable_dns_hostnames = true
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.cluster_name}" = "shared"
+ }
+
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
@@ -44,11 +48,11 @@ module "eks" {
vpc_id = module.vpc.vpc_id
enable_irsa = true
- worker_groups = [
- {
- name = "worker-group-1"
+ worker_groups = {
+ worker-group-1 = {
instance_type = "t3.medium"
asg_desired_capacity = 1
+
tags = [
{
"key" = "k8s.io/cluster-autoscaler/enabled"
@@ -62,5 +66,5 @@ module "eks" {
}
]
}
- ]
+ }
}
diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf
index fe5df294ce9..5187fac28b2 100644
--- a/examples/launch_templates/main.tf
+++ b/examples/launch_templates/main.tf
@@ -17,8 +17,7 @@ provider "kubernetes" {
load_config_file = false
}
-data "aws_availability_zones" "available" {
-}
+data "aws_availability_zones" "available" {}
locals {
cluster_name = "test-eks-lt-${random_string.suffix.result}"
@@ -38,6 +37,14 @@ module "vpc" {
azs = data.aws_availability_zones.available.names
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_dns_hostnames = true
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.cluster_name}" = "shared"
+ }
+
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.cluster_name}" = "shared"
+ }
}
module "eks" {
@@ -47,7 +54,7 @@ module "eks" {
subnets = module.vpc.public_subnets
vpc_id = module.vpc.vpc_id
- worker_groups_launch_template = [
+ worker_groups_launch_template_legacy = [
{
name = "worker-group-1"
instance_type = "t3.small"
@@ -68,4 +75,23 @@ module "eks" {
elastic_inference_accelerator = "eia2.medium"
},
]
+
+ worker_groups = {
+ worker-group-1 = {
+ instance_type = "t3.small"
+ asg_desired_capacity = 2
+ public_ip = true
+ },
+ worker-group-2 = {
+ instance_type = "t3.medium"
+ asg_desired_capacity = 1
+ public_ip = true
+ },
+ worker-group-3 = {
+ instance_type = "t2.large"
+ asg_desired_capacity = 1
+ public_ip = true
+ elastic_inference_accelerator = "eia2.medium"
+ },
+ }
}
diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf
index a0788aff1d2..4c8847b03d2 100644
--- a/examples/launch_templates/outputs.tf
+++ b/examples/launch_templates/outputs.tf
@@ -18,6 +18,11 @@ output "config_map_aws_auth" {
value = module.eks.config_map_aws_auth
}
+output "worker_groups" {
+ description = "Outputs from EKS worker groups. Map of maps, keyed by var.worker_groups keys"
+ value = module.eks.worker_groups
+}
+
output "region" {
description = "AWS region."
value = var.region
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
index 9aebd4cbe50..7785bf944fc 100644
--- a/examples/secrets_encryption/main.tf
+++ b/examples/secrets_encryption/main.tf
@@ -78,14 +78,13 @@ module "eks" {
vpc_id = module.vpc.vpc_id
- worker_groups = [
- {
- name = "worker-group-1"
+ worker_groups = {
+ worker-group-1 = {
instance_type = "t3.small"
additional_userdata = "echo foo bar"
asg_desired_capacity = 2
},
- ]
+ }
map_roles = var.map_roles
map_users = var.map_users
diff --git a/examples/spot_instances/main.tf b/examples/spot_instances/main.tf
index fb2ad23ee94..f74b62b94a0 100644
--- a/examples/spot_instances/main.tf
+++ b/examples/spot_instances/main.tf
@@ -47,9 +47,8 @@ module "eks" {
subnets = module.vpc.public_subnets
vpc_id = module.vpc.vpc_id
- worker_groups_launch_template = [
- {
- name = "spot-1"
+ worker_groups = {
+ spot-1 = {
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
spot_instance_pools = 4
asg_max_size = 5
@@ -57,5 +56,5 @@ module "eks" {
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = true
},
- ]
+ }
}
diff --git a/local.tf b/local.tf
index dc836a47297..63323a9330e 100644
--- a/local.tf
+++ b/local.tf
@@ -1,22 +1,22 @@
locals {
-
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
cluster_primary_security_group_id = var.cluster_version >= 1.14 ? element(concat(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, [""]), 0) : null
cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
- default_platform = "linux"
- default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
+ default_platform = "linux"
+ default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
+
default_ami_id_linux = local.workers_group_defaults.ami_id != "" ? local.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0]
default_ami_id_windows = local.workers_group_defaults.ami_id_windows != "" ? local.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0]
kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name
- worker_group_count = length(var.worker_groups)
- worker_group_launch_template_count = length(var.worker_groups_launch_template)
+ worker_group_legacy_count = length(var.worker_groups_legacy)
+ worker_group_launch_template_legacy_count = length(var.worker_groups_launch_template_legacy)
- worker_has_linux_ami = length([for x in concat(var.worker_groups, var.worker_groups_launch_template) : x if lookup(
+ worker_has_linux_ami = length([for x in concat(var.worker_groups_legacy, var.worker_groups_launch_template_legacy) : x if lookup(
x,
"platform",
# Fallback on default `platform` if it's not defined in current worker group
@@ -26,7 +26,7 @@ locals {
null
)
) == "linux"]) > 0
- worker_has_windows_ami = length([for x in concat(var.worker_groups, var.worker_groups_launch_template) : x if lookup(
+ worker_has_windows_ami = length([for x in concat(var.worker_groups_legacy, var.worker_groups_launch_template_legacy) : x if lookup(
x,
"platform",
# Fallback on default `platform` if it's not defined in current worker group
@@ -47,6 +47,7 @@ locals {
sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
+
workers_group_defaults_defaults = {
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
tags = [] # A list of map defining extra tags to be applied to the worker group autoscaling group.
@@ -184,42 +185,42 @@ locals {
}) : ""
userdata_rendered = [
- for index in range(var.create_eks ? local.worker_group_count : 0) : templatefile(
+ for index in range(var.create_eks ? local.worker_group_legacy_count : 0) : templatefile(
lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"userdata_template_file",
- lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"]) == "windows"
+ lookup(var.worker_groups_legacy[index], "platform", local.workers_group_defaults["platform"]) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
),
merge({
- platform = lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"])
+ platform = lookup(var.worker_groups_legacy[index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
},
lookup(
- var.worker_groups[index],
+ var.worker_groups_legacy[index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
@@ -228,42 +229,42 @@ locals {
]
launch_template_userdata_rendered = [
- for index in range(var.create_eks ? local.worker_group_launch_template_count : 0) : templatefile(
+ for index in range(var.create_eks ? local.worker_group_launch_template_legacy_count : 0) : templatefile(
lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"userdata_template_file",
- lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"]) == "windows"
+ lookup(var.worker_groups_launch_template_legacy[index], "platform", local.workers_group_defaults["platform"]) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
),
merge({
- platform = lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"])
+ platform = lookup(var.worker_groups_launch_template_legacy[index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
},
lookup(
- var.worker_groups_launch_template[index],
+ var.worker_groups_launch_template_legacy[index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
index ee026c86d7a..189d8eb6508 100644
--- a/modules/node_groups/locals.tf
+++ b/modules/node_groups/locals.tf
@@ -35,5 +35,6 @@ locals {
"name_prefix",
join("-", [var.cluster_name, k])
)
- ) }
+ )
+ }
}
diff --git a/modules/worker_groups/README.md b/modules/worker_groups/README.md
new file mode 100644
index 00000000000..9518235d25e
--- /dev/null
+++ b/modules/worker_groups/README.md
@@ -0,0 +1,81 @@
+# eks `worker_groups` submodule
+
+Helper submodule to create and manage resources related to `eks_worker_groups`.
+
+## Assumptions
+
+* Designed for use by the parent module and not directly by end users
+
+## Worker Groups' IAM Role
+
+The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module.
+
+`iam_role_arn` must be specified in either `var.worker_groups_defaults` or `var.worker_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent.
+
+## `worker_groups` and `worker_groups_defaults` keys
+
+`worker_groups_defaults` is a map that can take the below keys. Values will be used if not specified in individual worker groups.
+
+`worker_groups` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_autoscaling_group` and `aws_launch_template`. Inner map can take alle the values from `workers_group_defaults_defaults` map.
+
+
+## Requirements
+
+No requirements.
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | n/a |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_autoscaling_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
+| [aws_iam_instance_profile.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_ami.eks_worker_windows](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_iam_instance_profile.custom_worker_group_iam_instance_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_instance_profile) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Cluster auth data | `string` | n/a | yes |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Cluster endpojnt | `string` | n/a | yes |
+| [cluster\_name](#input\_cluster\_name) | Cluster name | `string` | n/a | yes |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for the EKS cluster. | `string` | n/a | yes |
+| [create\_workers](#input\_create\_workers) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [default\_iam\_role\_id](#input\_default\_iam\_role\_id) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | n/a | yes |
+| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path. | `string` | `"/"` | no |
+| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no |
+| [ng\_depends\_on](#input\_ng\_depends\_on) | List of references to other resources this submodule depends on | `any` | `null` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | n/a | yes |
+| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | n/a | yes |
+| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
+| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
+| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"602401143452"` | no |
+| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"801119661308"` | no |
+| [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups. | `bool` | `false` | no |
+| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Templates. See workers\_group\_defaults for valid keys. | `any` | `{}` | no |
+| [worker\_security\_group\_ids](#input\_worker\_security\_group\_ids) | A list of security group ids to attach to worker instances | `list(string)` | `[]` | no |
+| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
+| [worker\_groups](#output\_worker\_groups) | Outputs from EKS worker groups. Map of maps, keyed by `var.worker_groups` keys. |
+| [worker\_iam\_instance\_profile\_arns](#output\_worker\_iam\_instance\_profile\_arns) | default IAM instance profile ARN for EKS worker groups |
+| [worker\_iam\_instance\_profile\_names](#output\_worker\_iam\_instance\_profile\_names) | default IAM instance profile name for EKS worker groups |
+
diff --git a/modules/worker_groups/data.tf b/modules/worker_groups/data.tf
new file mode 100644
index 00000000000..744beb54031
--- /dev/null
+++ b/modules/worker_groups/data.tf
@@ -0,0 +1,39 @@
+data "aws_caller_identity" "current" {}
+data "aws_partition" "current" {}
+
+data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
+ for_each = var.manage_worker_iam_resources ? {} : local.worker_group_configurations
+
+ name = each.value["iam_instance_profile_name"]
+}
+
+data "aws_ami" "eks_worker" {
+ count = local.worker_has_linux_ami ? 1 : 0
+
+ filter {
+ name = "name"
+ values = [local.worker_ami_name_filter]
+ }
+
+ most_recent = true
+
+ owners = [var.worker_ami_owner_id]
+}
+
+data "aws_ami" "eks_worker_windows" {
+ count = local.worker_has_windows_ami ? 1 : 0
+
+ filter {
+ name = "name"
+ values = [local.worker_ami_name_filter_windows]
+ }
+
+ filter {
+ name = "platform"
+ values = ["windows"]
+ }
+
+ most_recent = true
+
+ owners = [var.worker_ami_owner_id_windows]
+}
diff --git a/modules/worker_groups/local.tf b/modules/worker_groups/local.tf
new file mode 100644
index 00000000000..30b870e16e5
--- /dev/null
+++ b/modules/worker_groups/local.tf
@@ -0,0 +1,88 @@
+locals {
+ ebs_optimized_not_supported = [
+ "c1.medium",
+ "c3.8xlarge",
+ "c3.large",
+ "c5d.12xlarge",
+ "c5d.24xlarge",
+ "c5d.metal",
+ "cc2.8xlarge",
+ "cr1.8xlarge",
+ "g2.8xlarge",
+ "g4dn.metal",
+ "hs1.8xlarge",
+ "i2.8xlarge",
+ "m1.medium",
+ "m1.small",
+ "m2.xlarge",
+ "m3.large",
+ "m3.medium",
+ "m5ad.16xlarge",
+ "m5ad.8xlarge",
+ "m5dn.metal",
+ "m5n.metal",
+ "r3.8xlarge",
+ "r3.large",
+ "r5ad.16xlarge",
+ "r5ad.8xlarge",
+ "r5dn.metal",
+ "r5n.metal",
+ "t1.micro",
+ "t2.2xlarge",
+ "t2.large",
+ "t2.medium",
+ "t2.micro",
+ "t2.nano",
+ "t2.small",
+ "t2.xlarge"
+ ]
+
+ worker_group_configurations = {
+ for k, v in var.worker_groups : k => merge(
+ var.workers_group_defaults,
+ v,
+ ) if var.create_workers
+ }
+
+ default_platform = "linux"
+ default_ami_id_linux = var.workers_group_defaults.ami_id != "" ? var.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0]
+ default_ami_id_windows = var.workers_group_defaults.ami_id_windows != "" ? var.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0]
+
+ default_root_block_device_name = concat(data.aws_ami.eks_worker.*.root_device_name, [""])[0]
+ default_root_block_device_name_windows = concat(data.aws_ami.eks_worker_windows.*.root_device_name, [""])[0]
+
+ worker_has_linux_ami = length([for k, v in local.worker_group_configurations : k if v["platform"] == "linux"]) > 0
+ worker_has_windows_ami = length([for k, v in local.worker_group_configurations : k if v["platform"] == "windows"]) > 0
+
+ worker_ami_name_filter = var.worker_ami_name_filter != "" ? var.worker_ami_name_filter : "amazon-eks-node-${var.cluster_version}-v*"
+ # Windows nodes are available from k8s 1.14. If cluster version is less than 1.14, fix ami filter to some constant to not fail on 'terraform plan'.
+ worker_ami_name_filter_windows = (var.worker_ami_name_filter_windows != "" ?
+ var.worker_ami_name_filter_windows : "Windows_Server-2019-English-Core-EKS_Optimized-${tonumber(var.cluster_version) >= 1.14 ? var.cluster_version : 1.14}-*"
+ )
+
+ userdata_rendered = {
+ for k, v in local.worker_group_configurations : k => templatefile(
+ lookup(
+ var.worker_groups[k],
+ "userdata_template_file",
+ v["platform"] == "windows" ?
+ "${path.module}/templates/userdata_windows.tpl" :
+ "${path.module}/templates/userdata.sh.tpl"
+ ),
+ merge(
+ {
+ cluster_name = var.cluster_name
+ endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+
+ platform = v["platform"]
+ pre_userdata = v["pre_userdata"]
+ additional_userdata = v["additional_userdata"]
+ bootstrap_extra_args = v["bootstrap_extra_args"]
+ kubelet_extra_args = v["kubelet_extra_args"]
+ },
+ v["userdata_template_extra_args"]
+ )
+ )
+ }
+}
diff --git a/modules/worker_groups/main.tf b/modules/worker_groups/main.tf
new file mode 100644
index 00000000000..d83f666a09b
--- /dev/null
+++ b/modules/worker_groups/main.tf
@@ -0,0 +1,349 @@
+resource "aws_autoscaling_group" "workers" {
+ for_each = local.worker_group_configurations
+
+ name_prefix = join(
+ "-",
+ compact(
+ [
+ var.cluster_name,
+ each.key,
+ ]
+ )
+ )
+
+ desired_capacity = each.value["asg_desired_capacity"]
+ max_size = each.value["asg_max_size"]
+ min_size = each.value["asg_min_size"]
+ force_delete = each.value["asg_force_delete"]
+ target_group_arns = each.value["target_group_arns"]
+ load_balancers = each.value["load_balancers"]
+ service_linked_role_arn = each.value["service_linked_role_arn"]
+ vpc_zone_identifier = each.value["subnets"]
+ protect_from_scale_in = each.value["protect_from_scale_in"]
+ suspended_processes = each.value["suspended_processes"]
+
+ enabled_metrics = each.value["enabled_metrics"]
+
+ placement_group = each.value["placement_group"]
+
+ termination_policies = each.value["termination_policies"]
+ max_instance_lifetime = each.value["max_instance_lifetime"]
+ default_cooldown = each.value["default_cooldown"]
+ health_check_type = each.value["health_check_type"]
+ health_check_grace_period = each.value["health_check_grace_period"]
+ capacity_rebalance = each.value["capacity_rebalance"]
+
+ dynamic "mixed_instances_policy" {
+ iterator = item
+ for_each = ((lookup(var.worker_groups[each.key], "override_instance_types", null) != null) || (each.value["on_demand_allocation_strategy"] != null)) ? [each.value] : []
+
+ content {
+ instances_distribution {
+ on_demand_allocation_strategy = lookup(item.value, "on_demand_allocation_strategy", "prioritized")
+ on_demand_base_capacity = item.value["on_demand_base_capacity"]
+ on_demand_percentage_above_base_capacity = item.value["on_demand_percentage_above_base_capacity"]
+
+ spot_allocation_strategy = item.value["spot_allocation_strategy"]
+ spot_instance_pools = item.value["spot_instance_pools"]
+ spot_max_price = item.value["spot_max_price"]
+ }
+
+ launch_template {
+ launch_template_specification {
+ launch_template_id = aws_launch_template.workers[each.key].id
+ version = lookup(var.worker_groups[each.key],
+ "launch_template_version",
+ var.workers_group_defaults["launch_template_version"] == "$Latest"
+ ? aws_launch_template.workers[each.key].latest_version
+ : aws_launch_template.workers[each.key].default_version
+ )
+ }
+
+ dynamic "override" {
+ for_each = item.value["override_instance_types"]
+
+ content {
+ instance_type = override.value
+ }
+ }
+ }
+ }
+ }
+
+ dynamic "launch_template" {
+ iterator = item
+ for_each = ((lookup(var.worker_groups[each.key], "override_instance_types", null) != null) || (each.value["on_demand_allocation_strategy"] != null)) ? [] : [each.value]
+
+ content {
+ id = aws_launch_template.workers[each.key].id
+ version = lookup(var.worker_groups[each.key],
+ "launch_template_version",
+ var.workers_group_defaults["launch_template_version"] == "$Latest"
+ ? aws_launch_template.workers[each.key].latest_version
+ : aws_launch_template.workers[each.key].default_version
+ )
+ }
+ }
+
+ dynamic "initial_lifecycle_hook" {
+ for_each = var.worker_create_initial_lifecycle_hooks ? each.value["asg_initial_lifecycle_hooks"] : []
+
+ content {
+ name = initial_lifecycle_hook.value["name"]
+ lifecycle_transition = initial_lifecycle_hook.value["lifecycle_transition"]
+ notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null)
+ heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null)
+ notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null)
+ role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null)
+ default_result = lookup(initial_lifecycle_hook.value, "default_result", null)
+ }
+ }
+
+ dynamic "warm_pool" {
+ for_each = lookup(var.worker_groups[each.key], "warm_pool", null) != null ? [each.value["warm_pool"]] : []
+
+ content {
+ pool_state = lookup(warm_pool.value, "pool_state", null)
+ min_size = lookup(warm_pool.value, "min_size", null)
+ max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
+ }
+ }
+
+ dynamic "tag" {
+ for_each = concat(
+ [
+ {
+ key = "Name"
+ value = "${var.cluster_name}-${each.key}-eks_asg"
+ propagate_at_launch = true
+ },
+ {
+ key = "kubernetes.io/cluster/${var.cluster_name}"
+ value = "owned"
+ propagate_at_launch = true
+ },
+ ],
+ [
+ for tag_key, tag_value in var.tags :
+ tomap({
+ key = tag_key
+ value = tag_value
+ propagate_at_launch = "true"
+ })
+ if tag_key != "Name" && !contains([for tag in each.value["tags"] : tag["key"]], tag_key)
+ ],
+ each.value["tags"]
+ )
+ content {
+ key = tag.value.key
+ value = tag.value.value
+ propagate_at_launch = tag.value.propagate_at_launch
+ }
+ }
+
+ dynamic "instance_refresh" {
+ for_each = each.value["instance_refresh_enabled"] ? [1] : []
+
+ content {
+ strategy = each.value["instance_refresh_strategy"]
+ preferences {
+ instance_warmup = each.value["instance_refresh_instance_warmup"]
+ min_healthy_percentage = each.value["instance_refresh_min_healthy_percentage"]
+ }
+ triggers = each.value["instance_refresh_triggers"]
+ }
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ ignore_changes = [desired_capacity]
+ }
+}
+
+resource "aws_launch_template" "workers" {
+ for_each = local.worker_group_configurations
+
+ name_prefix = "${var.cluster_name}-${each.key}"
+
+ update_default_version = each.value["update_default_version"]
+
+ network_interfaces {
+ associate_public_ip_address = each.value["public_ip"]
+ delete_on_termination = each.value["eni_delete"]
+
+ security_groups = flatten([
+ var.worker_security_group_ids,
+ each.value["additional_security_group_ids"]
+ ])
+ }
+
+ iam_instance_profile {
+ name = var.manage_worker_iam_resources ? aws_iam_instance_profile.workers[each.key].name : data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile[each.key].name
+ }
+
+ enclave_options {
+ enabled = each.value["enclave_support"]
+ }
+
+ image_id = lookup(
+ var.worker_groups[each.key],
+ "ami_id",
+ each.value["platform"] == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
+ )
+
+ instance_type = each.value["instance_type"]
+ key_name = each.value["key_name"]
+ user_data = base64encode(local.userdata_rendered[each.key])
+
+ dynamic "elastic_inference_accelerator" {
+ for_each = each.value["elastic_inference_accelerator"] != null ? [each.value["elastic_inference_accelerator"]] : []
+
+ content {
+ type = elastic_inference_accelerator.value
+ }
+ }
+
+ ebs_optimized = lookup(
+ var.worker_groups[each.key],
+ "ebs_optimized",
+ !contains(local.ebs_optimized_not_supported, each.value["instance_type"])
+ )
+
+ metadata_options {
+ http_endpoint = each.value["metadata_http_endpoint"]
+ http_tokens = each.value["metadata_http_tokens"]
+ http_put_response_hop_limit = each.value["metadata_http_put_response_hop_limit"]
+ }
+
+ dynamic "credit_specification" {
+ for_each = lookup(var.worker_groups[each.key], "cpu_credits", each.value["cpu_credits"]) != null ? [each.value["cpu_credits"]] : []
+ content {
+ cpu_credits = credit_specification.value
+ }
+ }
+
+ monitoring {
+ enabled = each.value["enable_monitoring"]
+ }
+
+ dynamic "placement" {
+ for_each = each.value["launch_template_placement_group"] != null ? [each.value["launch_template_placement_group"]] : []
+
+ content {
+ tenancy = each.value["launch_template_placement_tenancy"]
+ group_name = placement.value
+ }
+ }
+
+ dynamic "instance_market_options" {
+ for_each = lookup(var.worker_groups[each.key], "market_type", null) == null ? [] : tolist([lookup(var.worker_groups[each.key], "market_type", null)])
+
+ content {
+ market_type = instance_market_options.value
+ }
+ }
+
+ block_device_mappings {
+ device_name = lookup(
+ var.worker_groups[each.key],
+ "root_block_device_name",
+ each.value["platform"] == "windows" ? local.default_root_block_device_name_windows : local.default_root_block_device_name,
+ )
+
+ ebs {
+ volume_size = each.value["root_volume_size"]
+ volume_type = each.value["root_volume_type"]
+ iops = each.value["root_iops"]
+ throughput = each.value["root_volume_throughput"]
+ encrypted = each.value["root_encrypted"]
+ kms_key_id = each.value["root_kms_key_id"]
+
+ delete_on_termination = true
+ }
+ }
+
+ dynamic "block_device_mappings" {
+ for_each = each.value["additional_ebs_volumes"]
+
+ content {
+ device_name = block_device_mappings.value.block_device_name
+
+ ebs {
+ volume_size = lookup(block_device_mappings.value, "volume_size", var.workers_group_defaults["root_volume_size"])
+ volume_type = lookup(block_device_mappings.value, "volume_type", var.workers_group_defaults["root_volume_type"])
+ iops = lookup(block_device_mappings.value, "iops", var.workers_group_defaults["root_iops"])
+ throughput = lookup(block_device_mappings.value, "throughput", var.workers_group_defaults["root_volume_throughput"])
+ encrypted = lookup(block_device_mappings.value, "encrypted", var.workers_group_defaults["root_encrypted"])
+ kms_key_id = lookup(block_device_mappings.value, "kms_key_id", var.workers_group_defaults["root_kms_key_id"])
+
+ delete_on_termination = lookup(block_device_mappings.value, "delete_on_termination", true)
+ }
+ }
+ }
+
+ dynamic "block_device_mappings" {
+ for_each = each.value["additional_instance_store_volumes"]
+
+ content {
+ device_name = block_device_mappings.value.block_device_name
+ virtual_name = lookup(block_device_mappings.value,
+ "virtual_name",
+ var.workers_group_defaults["instance_store_virtual_name"]
+ )
+ }
+ }
+
+ tag_specifications {
+ resource_type = "volume"
+
+ tags = merge(
+ {
+ Name = "${var.cluster_name}-${each.key}-eks_asg"
+ },
+ var.tags,
+ )
+ }
+
+ tag_specifications {
+ resource_type = "instance"
+
+ tags = merge(
+ {
+ Name = "${var.cluster_name}-${each.key}-eks_asg"
+ },
+ {
+ for tag_key, tag_value in var.tags :
+ tag_key => tag_value
+ if tag_key != "Name" && !contains([for tag in each.value["tags"] : tag["key"]], tag_key)
+ }
+ )
+ }
+
+ tags = var.tags
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ depends_on = [
+ var.ng_depends_on,
+ ]
+}
+
+resource "aws_iam_instance_profile" "workers" {
+ for_each = var.manage_worker_iam_resources ? local.worker_group_configurations : {}
+
+ name_prefix = var.cluster_name
+
+ role = lookup(
+ var.worker_groups[each.key],
+ "iam_role_id",
+ var.default_iam_role_id,
+ )
+ path = var.iam_path
+ tags = var.tags
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
diff --git a/modules/worker_groups/outputs.tf b/modules/worker_groups/outputs.tf
new file mode 100644
index 00000000000..d4ca01685fe
--- /dev/null
+++ b/modules/worker_groups/outputs.tf
@@ -0,0 +1,30 @@
+output "aws_auth_roles" {
+ description = "Roles for use in aws-auth ConfigMap"
+ value = [
+ for k, v in local.worker_group_configurations : {
+ worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${var.manage_worker_iam_resources ? aws_iam_instance_profile.workers[k].role : data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile[k].role_name}"
+ platform = v["platform"]
+ }
+ ]
+}
+
+output "worker_groups" {
+ description = "Outputs from EKS worker groups. Map of maps, keyed by `var.worker_groups` keys."
+ value = aws_autoscaling_group.workers
+}
+
+output "worker_iam_instance_profile_arns" {
+ description = "default IAM instance profile ARN for EKS worker groups"
+ value = {
+ for k, v in local.worker_group_configurations :
+ k => var.manage_worker_iam_resources ? aws_iam_instance_profile.workers[k].arn : data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile[k].arn
+ }
+}
+
+output "worker_iam_instance_profile_names" {
+ description = "default IAM instance profile name for EKS worker groups"
+ value = {
+ for k, v in local.worker_group_configurations :
+ k => var.manage_worker_iam_resources ? aws_iam_instance_profile.workers[k].name : data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile[k].role_name
+ }
+}
diff --git a/modules/worker_groups/templates/userdata.sh.tpl b/modules/worker_groups/templates/userdata.sh.tpl
new file mode 100644
index 00000000000..cf314b88007
--- /dev/null
+++ b/modules/worker_groups/templates/userdata.sh.tpl
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+
+# Allow user supplied pre userdata code
+${pre_userdata}
+
+# Bootstrap and join the cluster
+/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
+
+# Allow user supplied userdata code
+${additional_userdata}
diff --git a/modules/worker_groups/templates/userdata_windows.tpl b/modules/worker_groups/templates/userdata_windows.tpl
new file mode 100644
index 00000000000..61be8e8b110
--- /dev/null
+++ b/modules/worker_groups/templates/userdata_windows.tpl
@@ -0,0 +1,11 @@
+
+${pre_userdata}
+
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -KubeletExtraArgs '${kubelet_extra_args}' 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+
+${additional_userdata}
+
diff --git a/modules/worker_groups/variables.tf b/modules/worker_groups/variables.tf
new file mode 100644
index 00000000000..5c6115b1b26
--- /dev/null
+++ b/modules/worker_groups/variables.tf
@@ -0,0 +1,107 @@
+variable "create_workers" {
+ description = "Controls if EKS resources should be created (it affects almost all resources)"
+ type = bool
+ default = true
+}
+
+variable "cluster_version" {
+ description = "Kubernetes version to use for the EKS cluster."
+ type = string
+}
+
+variable "cluster_name" {
+ description = "Cluster name"
+ type = string
+}
+
+variable "cluster_endpoint" {
+ description = "Cluster endpojnt"
+ type = string
+}
+
+variable "cluster_auth_base64" {
+ description = "Cluster auth data"
+ type = string
+}
+
+variable "default_iam_role_id" {
+ description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`"
+ type = string
+}
+
+variable "workers_group_defaults" {
+ description = "Workers group defaults from parent"
+ type = any
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+}
+
+variable "worker_groups" {
+ description = "A map of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys."
+ type = any
+ default = {}
+}
+
+variable "worker_create_initial_lifecycle_hooks" {
+ description = "Whether to create initial lifecycle hooks provided in worker groups."
+ type = bool
+ default = false
+}
+
+variable "iam_path" {
+ description = "If provided, all IAM roles will be created on this path."
+ type = string
+ default = "/"
+}
+
+variable "manage_worker_iam_resources" {
+ description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers."
+ type = bool
+ default = true
+}
+
+variable "vpc_id" {
+ description = "VPC where the cluster and workers will be deployed."
+ type = string
+}
+
+variable "worker_security_group_ids" {
+ description = "A list of security group ids to attach to worker instances"
+ type = list(string)
+ default = []
+}
+
+variable "worker_ami_name_filter" {
+ description = "Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used."
+ type = string
+ default = ""
+}
+
+variable "worker_ami_name_filter_windows" {
+ description = "Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used."
+ type = string
+ default = ""
+}
+
+variable "worker_ami_owner_id" {
+ description = "The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')."
+ type = string
+ default = "602401143452" // The ID of the owner of the official AWS EKS AMIs.
+}
+
+variable "worker_ami_owner_id_windows" {
+ description = "The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')."
+ type = string
+ default = "801119661308" // The ID of the owner of the official AWS EKS Windows AMIs.
+}
+
+# Hack for a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2
+# Will be removed in Terraform 0.13 with the support of module's `depends_on` https://github.com/hashicorp/terraform/issues/10462
+variable "ng_depends_on" {
+ description = "List of references to other resources this submodule depends on"
+ type = any
+ default = null
+}
diff --git a/outputs.tf b/outputs.tf
index f6c53513d96..a9996a2843b 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -209,3 +209,8 @@ output "security_group_rule_cluster_https_worker_ingress" {
description = "Security group rule responsible for allowing pods to communicate with the EKS cluster API."
value = aws_security_group_rule.cluster_https_worker_ingress
}
+
+output "worker_groups" {
+ description = "Outputs from EKS worker groups. Map of maps, keyed by var.worker_groups keys"
+ value = module.worker_groups.worker_groups
+}
diff --git a/variables.tf b/variables.tf
index b7d560e4025..0674bdd33a2 100644
--- a/variables.tf
+++ b/variables.tf
@@ -104,6 +104,12 @@ variable "vpc_id" {
}
variable "worker_groups" {
+ description = "A map of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys."
+ type = any
+ default = {}
+}
+
+variable "worker_groups_legacy" {
description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys."
type = any
default = []
@@ -115,7 +121,7 @@ variable "workers_group_defaults" {
default = {}
}
-variable "worker_groups_launch_template" {
+variable "worker_groups_launch_template_legacy" {
description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys."
type = any
default = []
diff --git a/worker_groups.tf b/worker_groups.tf
new file mode 100644
index 00000000000..00846eac205
--- /dev/null
+++ b/worker_groups.tf
@@ -0,0 +1,47 @@
+module "worker_groups" {
+ source = "./modules/worker_groups"
+
+ create_workers = var.create_eks
+
+ cluster_version = var.cluster_version
+ cluster_name = var.cluster_name
+ cluster_endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
+ cluster_auth_base64 = flatten(concat(aws_eks_cluster.this[*].certificate_authority[*].data, [""]))[0]
+
+ default_iam_role_id = coalescelist(aws_iam_role.workers[*].id, [""])[0]
+
+ vpc_id = var.vpc_id
+
+ iam_path = var.iam_path
+ manage_worker_iam_resources = var.manage_worker_iam_resources
+ worker_create_initial_lifecycle_hooks = var.worker_create_initial_lifecycle_hooks
+
+ workers_group_defaults = local.workers_group_defaults
+ worker_groups = var.worker_groups
+
+ worker_ami_name_filter = var.worker_ami_name_filter
+ worker_ami_name_filter_windows = var.worker_ami_name_filter_windows
+ worker_ami_owner_id = var.worker_ami_owner_id
+ worker_ami_owner_id_windows = var.worker_ami_owner_id_windows
+
+ worker_security_group_ids = flatten([
+ local.worker_security_group_id,
+ var.worker_additional_security_group_ids
+ ])
+
+ tags = var.tags
+
+ # Hack to ensure ordering of resource creation.
+ # This is a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2
+ # Do not create node_groups before other resources are ready and removes race conditions
+ # Ensure these resources are created before "unlocking" the data source.
+ # Will be removed in Terraform 0.13
+ ng_depends_on = [
+ aws_eks_cluster.this,
+ kubernetes_config_map.aws_auth,
+ aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
+ aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
+ aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly,
+ aws_iam_role_policy_attachment.workers_additional_policies,
+ ]
+}
diff --git a/worker_groups_support.tf b/worker_groups_support.tf
new file mode 100644
index 00000000000..786bfc65399
--- /dev/null
+++ b/worker_groups_support.tf
@@ -0,0 +1,127 @@
+resource "aws_iam_role" "workers" {
+ count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+ name_prefix = var.workers_role_name != "" ? null : coalescelist(aws_eks_cluster.this[*].name, [""])[0]
+ name = var.workers_role_name != "" ? var.workers_role_name : null
+ assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
+ permissions_boundary = var.permissions_boundary
+ path = var.iam_path
+ force_detach_policies = true
+ tags = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
+ count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+ policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
+ role = aws_iam_role.workers[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
+ count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0
+ policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
+ role = aws_iam_role.workers[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
+ count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+ policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
+ role = aws_iam_role.workers[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
+ count = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0
+ role = aws_iam_role.workers[0].name
+ policy_arn = var.workers_additional_policies[count.index]
+}
+
+resource "aws_security_group" "workers" {
+ count = var.worker_create_security_group && var.create_eks ? 1 : 0
+
+ name_prefix = var.cluster_name
+ description = "Security group for all nodes in the cluster."
+ vpc_id = var.vpc_id
+
+ tags = merge(
+ var.tags,
+ {
+ "Name" = "${var.cluster_name}-eks_worker_sg"
+ "kubernetes.io/cluster/${var.cluster_name}" = "owned"
+ },
+ )
+}
+
+resource "aws_security_group_rule" "workers_egress_internet" {
+ count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ description = "Allow nodes all egress to the Internet."
+ protocol = "-1"
+ security_group_id = local.worker_security_group_id
+ cidr_blocks = var.workers_egress_cidrs
+ from_port = 0
+ to_port = 0
+ type = "egress"
+}
+
+resource "aws_security_group_rule" "workers_ingress_self" {
+ count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ description = "Allow node to communicate with each other."
+ protocol = "-1"
+ security_group_id = local.worker_security_group_id
+ source_security_group_id = local.worker_security_group_id
+ from_port = 0
+ to_port = 65535
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "workers_ingress_cluster" {
+ count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ description = "Allow workers pods to receive communication from the cluster control plane."
+ protocol = "tcp"
+ security_group_id = local.worker_security_group_id
+ source_security_group_id = local.cluster_security_group_id
+ from_port = var.worker_sg_ingress_from_port
+ to_port = 65535
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
+ count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
+ description = "Allow workers Kubelets to receive communication from the cluster control plane."
+ protocol = "tcp"
+ security_group_id = local.worker_security_group_id
+ source_security_group_id = local.cluster_security_group_id
+ from_port = 10250
+ to_port = 10250
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "workers_ingress_cluster_https" {
+ count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
+ protocol = "tcp"
+ security_group_id = local.worker_security_group_id
+ source_security_group_id = local.cluster_security_group_id
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
+ count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
+ description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
+ protocol = "all"
+ security_group_id = local.worker_security_group_id
+ source_security_group_id = local.cluster_primary_security_group_id
+ from_port = 0
+ to_port = 65535
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
+ count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
+ description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
+ protocol = "all"
+ security_group_id = local.cluster_primary_security_group_id
+ source_security_group_id = local.worker_security_group_id
+ from_port = 0
+ to_port = 65535
+ type = "ingress"
+}
diff --git a/workers.tf b/workers.tf
index 31a2ffd5685..dab62971a3f 100644
--- a/workers.tf
+++ b/workers.tf
@@ -1,110 +1,110 @@
# Worker Groups using Launch Configurations
resource "aws_autoscaling_group" "workers" {
- count = var.create_eks ? local.worker_group_count : 0
+ count = var.create_eks ? local.worker_group_legacy_count : 0
name_prefix = join(
"-",
compact(
[
coalescelist(aws_eks_cluster.this[*].name, [""])[0],
- lookup(var.worker_groups[count.index], "name", count.index)
+ lookup(var.worker_groups_legacy[count.index], "name", count.index),
]
)
)
desired_capacity = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"asg_desired_capacity",
local.workers_group_defaults["asg_desired_capacity"],
)
max_size = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"asg_max_size",
local.workers_group_defaults["asg_max_size"],
)
min_size = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"asg_min_size",
local.workers_group_defaults["asg_min_size"],
)
force_delete = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"asg_force_delete",
local.workers_group_defaults["asg_force_delete"],
)
target_group_arns = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"target_group_arns",
local.workers_group_defaults["target_group_arns"]
)
load_balancers = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"load_balancers",
local.workers_group_defaults["load_balancers"]
)
service_linked_role_arn = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"service_linked_role_arn",
local.workers_group_defaults["service_linked_role_arn"],
)
launch_configuration = aws_launch_configuration.workers.*.id[count.index]
vpc_zone_identifier = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"subnets",
local.workers_group_defaults["subnets"]
)
protect_from_scale_in = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"protect_from_scale_in",
local.workers_group_defaults["protect_from_scale_in"],
)
suspended_processes = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"suspended_processes",
local.workers_group_defaults["suspended_processes"]
)
enabled_metrics = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"enabled_metrics",
local.workers_group_defaults["enabled_metrics"]
)
placement_group = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"placement_group",
local.workers_group_defaults["placement_group"],
)
termination_policies = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"termination_policies",
local.workers_group_defaults["termination_policies"]
)
max_instance_lifetime = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"max_instance_lifetime",
local.workers_group_defaults["max_instance_lifetime"],
)
default_cooldown = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"default_cooldown",
local.workers_group_defaults["default_cooldown"]
)
health_check_type = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"health_check_type",
local.workers_group_defaults["health_check_type"]
)
health_check_grace_period = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"health_check_grace_period",
local.workers_group_defaults["health_check_grace_period"]
)
capacity_rebalance = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"capacity_rebalance",
local.workers_group_defaults["capacity_rebalance"]
)
dynamic "initial_lifecycle_hook" {
- for_each = var.worker_create_initial_lifecycle_hooks ? lookup(var.worker_groups[count.index], "asg_initial_lifecycle_hooks", local.workers_group_defaults["asg_initial_lifecycle_hooks"]) : []
+ for_each = var.worker_create_initial_lifecycle_hooks ? lookup(var.worker_groups_legacy[count.index], "asg_initial_lifecycle_hooks", local.workers_group_defaults["asg_initial_lifecycle_hooks"]) : []
content {
name = initial_lifecycle_hook.value["name"]
lifecycle_transition = initial_lifecycle_hook.value["lifecycle_transition"]
@@ -117,7 +117,7 @@ resource "aws_autoscaling_group" "workers" {
}
dynamic "warm_pool" {
- for_each = lookup(var.worker_groups[count.index], "warm_pool", null) != null ? [lookup(var.worker_groups[count.index], "warm_pool")] : []
+ for_each = lookup(var.worker_groups_legacy[count.index], "warm_pool", null) != null ? [lookup(var.worker_groups_legacy[count.index], "warm_pool")] : []
content {
pool_state = lookup(warm_pool.value, "pool_state", null)
@@ -131,7 +131,7 @@ resource "aws_autoscaling_group" "workers" {
[
{
"key" = "Name"
- "value" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg"
+ "value" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups_legacy[count.index], "name", count.index)}-eks_asg"
"propagate_at_launch" = true
},
{
@@ -152,10 +152,10 @@ resource "aws_autoscaling_group" "workers" {
"value" = tag_value,
"propagate_at_launch" = "true"
}
- if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
+ if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_legacy[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
],
lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"tags",
local.workers_group_defaults["tags"]
)
@@ -169,26 +169,26 @@ resource "aws_autoscaling_group" "workers" {
# logic duplicated in workers_launch_template.tf
dynamic "instance_refresh" {
- for_each = lookup(var.worker_groups[count.index],
+ for_each = lookup(var.worker_groups_legacy[count.index],
"instance_refresh_enabled",
local.workers_group_defaults["instance_refresh_enabled"]) ? [1] : []
content {
strategy = lookup(
- var.worker_groups[count.index], "instance_refresh_strategy",
+ var.worker_groups_legacy[count.index], "instance_refresh_strategy",
local.workers_group_defaults["instance_refresh_strategy"]
)
preferences {
instance_warmup = lookup(
- var.worker_groups[count.index], "instance_refresh_instance_warmup",
+ var.worker_groups_legacy[count.index], "instance_refresh_instance_warmup",
local.workers_group_defaults["instance_refresh_instance_warmup"]
)
min_healthy_percentage = lookup(
- var.worker_groups[count.index], "instance_refresh_min_healthy_percentage",
+ var.worker_groups_legacy[count.index], "instance_refresh_min_healthy_percentage",
local.workers_group_defaults["instance_refresh_min_healthy_percentage"]
)
}
triggers = lookup(
- var.worker_groups[count.index], "instance_refresh_triggers",
+ var.worker_groups_legacy[count.index], "instance_refresh_triggers",
local.workers_group_defaults["instance_refresh_triggers"]
)
}
@@ -201,10 +201,10 @@ resource "aws_autoscaling_group" "workers" {
}
resource "aws_launch_configuration" "workers" {
- count = var.create_eks ? local.worker_group_count : 0
- name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups[count.index], "name", count.index)}"
+ count = var.create_eks ? local.worker_group_legacy_count : 0
+ name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups_legacy[count.index], "name", count.index)}"
associate_public_ip_address = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"public_ip",
local.workers_group_defaults["public_ip"],
)
@@ -212,7 +212,7 @@ resource "aws_launch_configuration" "workers" {
local.worker_security_group_id,
var.worker_additional_security_group_ids,
lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"additional_security_group_ids",
local.workers_group_defaults["additional_security_group_ids"]
)
@@ -222,62 +222,62 @@ resource "aws_launch_configuration" "workers" {
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.name,
)[count.index]
image_id = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"ami_id",
- lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
+ lookup(var.worker_groups_legacy[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
)
instance_type = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"instance_type",
local.workers_group_defaults["instance_type"],
)
key_name = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"key_name",
local.workers_group_defaults["key_name"],
)
user_data_base64 = base64encode(local.userdata_rendered[count.index])
ebs_optimized = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"ebs_optimized",
!contains(
local.ebs_optimized_not_supported,
lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"instance_type",
local.workers_group_defaults["instance_type"]
)
)
)
enable_monitoring = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"enable_monitoring",
local.workers_group_defaults["enable_monitoring"],
)
spot_price = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"spot_price",
local.workers_group_defaults["spot_price"],
)
placement_tenancy = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"placement_tenancy",
local.workers_group_defaults["placement_tenancy"],
)
metadata_options {
http_endpoint = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"metadata_http_endpoint",
local.workers_group_defaults["metadata_http_endpoint"],
)
http_tokens = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"metadata_http_tokens",
local.workers_group_defaults["metadata_http_tokens"],
)
http_put_response_hop_limit = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"metadata_http_put_response_hop_limit",
local.workers_group_defaults["metadata_http_put_response_hop_limit"],
)
@@ -285,22 +285,22 @@ resource "aws_launch_configuration" "workers" {
root_block_device {
encrypted = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"root_encrypted",
local.workers_group_defaults["root_encrypted"],
)
volume_size = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"root_volume_size",
local.workers_group_defaults["root_volume_size"],
)
volume_type = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"root_volume_type",
local.workers_group_defaults["root_volume_type"],
)
iops = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"root_iops",
local.workers_group_defaults["root_iops"],
)
@@ -308,7 +308,7 @@ resource "aws_launch_configuration" "workers" {
}
dynamic "ebs_block_device" {
- for_each = lookup(var.worker_groups[count.index], "additional_ebs_volumes", local.workers_group_defaults["additional_ebs_volumes"])
+ for_each = lookup(var.worker_groups_legacy[count.index], "additional_ebs_volumes", local.workers_group_defaults["additional_ebs_volumes"])
content {
device_name = ebs_block_device.value.block_device_name
@@ -357,113 +357,11 @@ resource "aws_launch_configuration" "workers" {
]
}
-resource "aws_security_group" "workers" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
- name_prefix = var.cluster_name
- description = "Security group for all nodes in the cluster."
- vpc_id = var.vpc_id
- tags = merge(
- var.tags,
- {
- "Name" = "${var.cluster_name}-eks_worker_sg"
- "kubernetes.io/cluster/${var.cluster_name}" = "owned"
- },
- )
-}
-
-resource "aws_security_group_rule" "workers_egress_internet" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
- description = "Allow nodes all egress to the Internet."
- protocol = "-1"
- security_group_id = local.worker_security_group_id
- cidr_blocks = var.workers_egress_cidrs
- from_port = 0
- to_port = 0
- type = "egress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_self" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
- description = "Allow node to communicate with each other."
- protocol = "-1"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.worker_security_group_id
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
- description = "Allow workers pods to receive communication from the cluster control plane."
- protocol = "tcp"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_security_group_id
- from_port = var.worker_sg_ingress_from_port
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
- count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
- description = "Allow workers Kubelets to receive communication from the cluster control plane."
- protocol = "tcp"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_security_group_id
- from_port = 10250
- to_port = 10250
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster_https" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
- description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
- protocol = "tcp"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_security_group_id
- from_port = 443
- to_port = 443
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
- count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
- description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
- protocol = "all"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_primary_security_group_id
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
- count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
- description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
- protocol = "all"
- security_group_id = local.cluster_primary_security_group_id
- source_security_group_id = local.worker_security_group_id
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_iam_role" "workers" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
- name_prefix = var.workers_role_name != "" ? null : coalescelist(aws_eks_cluster.this[*].name, [""])[0]
- name = var.workers_role_name != "" ? var.workers_role_name : null
- assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
- permissions_boundary = var.permissions_boundary
- path = var.iam_path
- force_detach_policies = true
- tags = var.tags
-}
-
resource "aws_iam_instance_profile" "workers" {
- count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_count : 0
+ count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_legacy_count : 0
name_prefix = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
role = lookup(
- var.worker_groups[count.index],
+ var.worker_groups_legacy[count.index],
"iam_role_id",
local.default_iam_role_id,
)
@@ -475,27 +373,3 @@ resource "aws_iam_instance_profile" "workers" {
create_before_destroy = true
}
}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
- count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0
- policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
- policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
- count = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0
- role = aws_iam_role.workers[0].name
- policy_arn = var.workers_additional_policies[count.index]
-}
diff --git a/workers_launch_template.tf b/workers_launch_template.tf
index 6e14b7dcb0e..877596f3458 100644
--- a/workers_launch_template.tf
+++ b/workers_launch_template.tf
@@ -1,110 +1,110 @@
# Worker Groups using Launch Templates
resource "aws_autoscaling_group" "workers_launch_template" {
- count = var.create_eks ? local.worker_group_launch_template_count : 0
+ count = var.create_eks ? local.worker_group_launch_template_legacy_count : 0
name_prefix = join(
"-",
compact(
[
coalescelist(aws_eks_cluster.this[*].name, [""])[0],
- lookup(var.worker_groups_launch_template[count.index], "name", count.index)
+ lookup(var.worker_groups_launch_template_legacy[count.index], "name", count.index),
]
)
)
desired_capacity = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"asg_desired_capacity",
local.workers_group_defaults["asg_desired_capacity"],
)
max_size = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"asg_max_size",
local.workers_group_defaults["asg_max_size"],
)
min_size = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"asg_min_size",
local.workers_group_defaults["asg_min_size"],
)
force_delete = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"asg_force_delete",
local.workers_group_defaults["asg_force_delete"],
)
target_group_arns = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"target_group_arns",
local.workers_group_defaults["target_group_arns"]
)
load_balancers = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"load_balancers",
local.workers_group_defaults["load_balancers"]
)
service_linked_role_arn = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"service_linked_role_arn",
local.workers_group_defaults["service_linked_role_arn"],
)
vpc_zone_identifier = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"subnets",
local.workers_group_defaults["subnets"]
)
protect_from_scale_in = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"protect_from_scale_in",
local.workers_group_defaults["protect_from_scale_in"],
)
suspended_processes = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"suspended_processes",
local.workers_group_defaults["suspended_processes"]
)
enabled_metrics = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"enabled_metrics",
local.workers_group_defaults["enabled_metrics"]
)
placement_group = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"placement_group",
local.workers_group_defaults["placement_group"],
)
termination_policies = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"termination_policies",
local.workers_group_defaults["termination_policies"]
)
max_instance_lifetime = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"max_instance_lifetime",
local.workers_group_defaults["max_instance_lifetime"],
)
default_cooldown = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"default_cooldown",
local.workers_group_defaults["default_cooldown"]
)
health_check_type = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"health_check_type",
local.workers_group_defaults["health_check_type"]
)
health_check_grace_period = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"health_check_grace_period",
local.workers_group_defaults["health_check_grace_period"]
)
capacity_rebalance = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"capacity_rebalance",
local.workers_group_defaults["capacity_rebalance"]
)
dynamic "mixed_instances_policy" {
iterator = item
- for_each = (lookup(var.worker_groups_launch_template[count.index], "override_instance_types", null) != null) || (lookup(var.worker_groups_launch_template[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"]) != null) ? [var.worker_groups_launch_template[count.index]] : []
+ for_each = (lookup(var.worker_groups_launch_template_legacy[count.index], "override_instance_types", null) != null) || (lookup(var.worker_groups_launch_template_legacy[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"]) != null) ? [var.worker_groups_launch_template_legacy[count.index]] : []
content {
instances_distribution {
@@ -144,10 +144,10 @@ resource "aws_autoscaling_group" "workers_launch_template" {
launch_template_specification {
launch_template_id = aws_launch_template.workers_launch_template.*.id[count.index]
version = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"launch_template_version",
lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"launch_template_version",
local.workers_group_defaults["launch_template_version"]
) == "$Latest"
@@ -158,7 +158,7 @@ resource "aws_autoscaling_group" "workers_launch_template" {
dynamic "override" {
for_each = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"override_instance_types",
local.workers_group_defaults["override_instance_types"]
)
@@ -173,15 +173,15 @@ resource "aws_autoscaling_group" "workers_launch_template" {
dynamic "launch_template" {
iterator = item
- for_each = (lookup(var.worker_groups_launch_template[count.index], "override_instance_types", null) != null) || (lookup(var.worker_groups_launch_template[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"]) != null) ? [] : [var.worker_groups_launch_template[count.index]]
+ for_each = (lookup(var.worker_groups_launch_template_legacy[count.index], "override_instance_types", null) != null) || (lookup(var.worker_groups_launch_template_legacy[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"]) != null) ? [] : [var.worker_groups_launch_template_legacy[count.index]]
content {
id = aws_launch_template.workers_launch_template.*.id[count.index]
version = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"launch_template_version",
lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"launch_template_version",
local.workers_group_defaults["launch_template_version"]
) == "$Latest"
@@ -192,7 +192,7 @@ resource "aws_autoscaling_group" "workers_launch_template" {
}
dynamic "initial_lifecycle_hook" {
- for_each = var.worker_create_initial_lifecycle_hooks ? lookup(var.worker_groups_launch_template[count.index], "asg_initial_lifecycle_hooks", local.workers_group_defaults["asg_initial_lifecycle_hooks"]) : []
+ for_each = var.worker_create_initial_lifecycle_hooks ? lookup(var.worker_groups_launch_template_legacy[count.index], "asg_initial_lifecycle_hooks", local.workers_group_defaults["asg_initial_lifecycle_hooks"]) : []
content {
name = initial_lifecycle_hook.value["name"]
lifecycle_transition = initial_lifecycle_hook.value["lifecycle_transition"]
@@ -205,7 +205,7 @@ resource "aws_autoscaling_group" "workers_launch_template" {
}
dynamic "warm_pool" {
- for_each = lookup(var.worker_groups_launch_template[count.index], "warm_pool", null) != null ? [lookup(var.worker_groups_launch_template[count.index], "warm_pool")] : []
+ for_each = lookup(var.worker_groups_launch_template_legacy[count.index], "warm_pool", null) != null ? [lookup(var.worker_groups_launch_template_legacy[count.index], "warm_pool")] : []
content {
pool_state = lookup(warm_pool.value, "pool_state", null)
@@ -220,7 +220,7 @@ resource "aws_autoscaling_group" "workers_launch_template" {
{
"key" = "Name"
"value" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"name",
count.index,
)}-eks_asg"
@@ -239,10 +239,10 @@ resource "aws_autoscaling_group" "workers_launch_template" {
value = tag_value
propagate_at_launch = "true"
})
- if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_launch_template[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
+ if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_launch_template_legacy[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
],
lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"tags",
local.workers_group_defaults["tags"]
)
@@ -256,26 +256,26 @@ resource "aws_autoscaling_group" "workers_launch_template" {
# logic duplicated in workers.tf
dynamic "instance_refresh" {
- for_each = lookup(var.worker_groups_launch_template[count.index],
+ for_each = lookup(var.worker_groups_launch_template_legacy[count.index],
"instance_refresh_enabled",
local.workers_group_defaults["instance_refresh_enabled"]) ? [1] : []
content {
strategy = lookup(
- var.worker_groups_launch_template[count.index], "instance_refresh_strategy",
+ var.worker_groups_launch_template_legacy[count.index], "instance_refresh_strategy",
local.workers_group_defaults["instance_refresh_strategy"]
)
preferences {
instance_warmup = lookup(
- var.worker_groups_launch_template[count.index], "instance_refresh_instance_warmup",
+ var.worker_groups_launch_template_legacy[count.index], "instance_refresh_instance_warmup",
local.workers_group_defaults["instance_refresh_instance_warmup"]
)
min_healthy_percentage = lookup(
- var.worker_groups_launch_template[count.index], "instance_refresh_min_healthy_percentage",
+ var.worker_groups_launch_template_legacy[count.index], "instance_refresh_min_healthy_percentage",
local.workers_group_defaults["instance_refresh_min_healthy_percentage"]
)
}
triggers = lookup(
- var.worker_groups_launch_template[count.index], "instance_refresh_triggers",
+ var.worker_groups_launch_template_legacy[count.index], "instance_refresh_triggers",
local.workers_group_defaults["instance_refresh_triggers"]
)
}
@@ -288,27 +288,27 @@ resource "aws_autoscaling_group" "workers_launch_template" {
}
resource "aws_launch_template" "workers_launch_template" {
- count = var.create_eks ? (local.worker_group_launch_template_count) : 0
+ count = var.create_eks ? (local.worker_group_launch_template_legacy_count) : 0
name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"name",
count.index,
)}"
update_default_version = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"update_default_version",
local.workers_group_defaults["update_default_version"],
)
network_interfaces {
associate_public_ip_address = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"public_ip",
local.workers_group_defaults["public_ip"],
)
delete_on_termination = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"eni_delete",
local.workers_group_defaults["eni_delete"],
)
@@ -316,7 +316,7 @@ resource "aws_launch_template" "workers_launch_template" {
local.worker_security_group_id,
var.worker_additional_security_group_ids,
lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"additional_security_group_ids",
local.workers_group_defaults["additional_security_group_ids"],
),
@@ -332,36 +332,36 @@ resource "aws_launch_template" "workers_launch_template" {
enclave_options {
enabled = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"enclave_support",
local.workers_group_defaults["enclave_support"],
)
}
image_id = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"ami_id",
- lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
+ lookup(var.worker_groups_launch_template_legacy[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
)
instance_type = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"instance_type",
local.workers_group_defaults["instance_type"],
)
dynamic "elastic_inference_accelerator" {
for_each = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"elastic_inference_accelerator",
local.workers_group_defaults["elastic_inference_accelerator"]
- ) != null ? [lookup(var.worker_groups_launch_template[count.index], "elastic_inference_accelerator", local.workers_group_defaults["elastic_inference_accelerator"])] : []
+ ) != null ? [lookup(var.worker_groups_launch_template_legacy[count.index], "elastic_inference_accelerator", local.workers_group_defaults["elastic_inference_accelerator"])] : []
content {
type = elastic_inference_accelerator.value
}
}
key_name = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"key_name",
local.workers_group_defaults["key_name"],
)
@@ -370,12 +370,12 @@ resource "aws_launch_template" "workers_launch_template" {
)
ebs_optimized = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"ebs_optimized",
!contains(
local.ebs_optimized_not_supported,
lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"instance_type",
local.workers_group_defaults["instance_type"],
)
@@ -384,17 +384,17 @@ resource "aws_launch_template" "workers_launch_template" {
metadata_options {
http_endpoint = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"metadata_http_endpoint",
local.workers_group_defaults["metadata_http_endpoint"],
)
http_tokens = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"metadata_http_tokens",
local.workers_group_defaults["metadata_http_tokens"],
)
http_put_response_hop_limit = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"metadata_http_put_response_hop_limit",
local.workers_group_defaults["metadata_http_put_response_hop_limit"],
)
@@ -402,10 +402,10 @@ resource "aws_launch_template" "workers_launch_template" {
dynamic "credit_specification" {
for_each = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"cpu_credits",
local.workers_group_defaults["cpu_credits"]
- ) != null ? [lookup(var.worker_groups_launch_template[count.index], "cpu_credits", local.workers_group_defaults["cpu_credits"])] : []
+ ) != null ? [lookup(var.worker_groups_launch_template_legacy[count.index], "cpu_credits", local.workers_group_defaults["cpu_credits"])] : []
content {
cpu_credits = credit_specification.value
}
@@ -413,18 +413,18 @@ resource "aws_launch_template" "workers_launch_template" {
monitoring {
enabled = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"enable_monitoring",
local.workers_group_defaults["enable_monitoring"],
)
}
dynamic "placement" {
- for_each = lookup(var.worker_groups_launch_template[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"]) != null ? [lookup(var.worker_groups_launch_template[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"])] : []
+ for_each = lookup(var.worker_groups_launch_template_legacy[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"]) != null ? [lookup(var.worker_groups_launch_template_legacy[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"])] : []
content {
tenancy = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"launch_template_placement_tenancy",
local.workers_group_defaults["launch_template_placement_tenancy"],
)
@@ -433,7 +433,7 @@ resource "aws_launch_template" "workers_launch_template" {
}
dynamic "instance_market_options" {
- for_each = lookup(var.worker_groups_launch_template[count.index], "market_type", null) == null ? [] : tolist([lookup(var.worker_groups_launch_template[count.index], "market_type", null)])
+ for_each = lookup(var.worker_groups_launch_template_legacy[count.index], "market_type", null) == null ? [] : tolist([lookup(var.worker_groups_launch_template_legacy[count.index], "market_type", null)])
content {
market_type = instance_market_options.value
}
@@ -441,39 +441,39 @@ resource "aws_launch_template" "workers_launch_template" {
block_device_mappings {
device_name = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_block_device_name",
- lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.workers_group_defaults["root_block_device_name_windows"] : local.workers_group_defaults["root_block_device_name"],
+ lookup(var.worker_groups_launch_template_legacy[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.workers_group_defaults["root_block_device_name_windows"] : local.workers_group_defaults["root_block_device_name"],
)
ebs {
volume_size = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_volume_size",
local.workers_group_defaults["root_volume_size"],
)
volume_type = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_volume_type",
local.workers_group_defaults["root_volume_type"],
)
iops = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_iops",
local.workers_group_defaults["root_iops"],
)
throughput = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_volume_throughput",
local.workers_group_defaults["root_volume_throughput"],
)
encrypted = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_encrypted",
local.workers_group_defaults["root_encrypted"],
)
kms_key_id = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"root_kms_key_id",
local.workers_group_defaults["root_kms_key_id"],
)
@@ -482,7 +482,7 @@ resource "aws_launch_template" "workers_launch_template" {
}
dynamic "block_device_mappings" {
- for_each = lookup(var.worker_groups_launch_template[count.index], "additional_ebs_volumes", local.workers_group_defaults["additional_ebs_volumes"])
+ for_each = lookup(var.worker_groups_launch_template_legacy[count.index], "additional_ebs_volumes", local.workers_group_defaults["additional_ebs_volumes"])
content {
device_name = block_device_mappings.value.block_device_name
@@ -524,7 +524,7 @@ resource "aws_launch_template" "workers_launch_template" {
}
dynamic "block_device_mappings" {
- for_each = lookup(var.worker_groups_launch_template[count.index], "additional_instance_store_volumes", local.workers_group_defaults["additional_instance_store_volumes"])
+ for_each = lookup(var.worker_groups_launch_template_legacy[count.index], "additional_instance_store_volumes", local.workers_group_defaults["additional_instance_store_volumes"])
content {
device_name = block_device_mappings.value.block_device_name
virtual_name = lookup(
@@ -541,7 +541,7 @@ resource "aws_launch_template" "workers_launch_template" {
tags = merge(
{
"Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"name",
count.index,
)}-eks_asg"
@@ -556,14 +556,14 @@ resource "aws_launch_template" "workers_launch_template" {
tags = merge(
{
"Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"name",
count.index,
)}-eks_asg"
},
{ for tag_key, tag_value in var.tags :
tag_key => tag_value
- if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_launch_template[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
+ if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_launch_template_legacy[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
}
)
}
@@ -592,10 +592,10 @@ resource "aws_launch_template" "workers_launch_template" {
}
resource "aws_iam_instance_profile" "workers_launch_template" {
- count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_template_count : 0
+ count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_template_legacy_count : 0
name_prefix = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
role = lookup(
- var.worker_groups_launch_template[count.index],
+ var.worker_groups_launch_template_legacy[count.index],
"iam_role_id",
local.default_iam_role_id,
)