diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 65df33743e..53127506fa 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -31,4 +31,3 @@ To generate changelog, Pull Requests or Commits must have semantic and must foll
- `chore:` for chores stuff
The `chore` prefix skipped during changelog generation. It can be used for `chore: update changelog` commit message by example.
-
diff --git a/docs/upgrades.md b/.github/UPGRADE-17.0.md
similarity index 100%
rename from docs/upgrades.md
rename to .github/UPGRADE-17.0.md
diff --git a/.github/images/security_groups.svg b/.github/images/security_groups.svg
new file mode 100644
index 0000000000..6b120e98ba
--- /dev/null
+++ b/.github/images/security_groups.svg
@@ -0,0 +1 @@
+
diff --git a/.github/images/user_data.svg b/.github/images/user_data.svg
new file mode 100644
index 0000000000..a36b8051e4
--- /dev/null
+++ b/.github/images/user_data.svg
@@ -0,0 +1 @@
+
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 141937d863..63efd61394 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -7,6 +7,7 @@ on:
- main
- master
paths:
+ - '**/*.tpl'
- '**/*.py'
- '**/*.tf'
diff --git a/.github/workflows/stale-actions.yaml b/.github/workflows/stale-actions.yaml
index 93c90dfcd1..d2a5f6b96e 100644
--- a/.github/workflows/stale-actions.yaml
+++ b/.github/workflows/stale-actions.yaml
@@ -29,4 +29,4 @@ jobs:
days-before-close: 10
delete-branch: true
close-issue-message: This issue was automatically closed because of stale in 10 days
- close-pr-message: This PR was automatically closed because of stale in 10 days
\ No newline at end of file
+ close-pr-message: This PR was automatically closed because of stale in 10 days
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 20e651ab6a..006410918f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
- rev: v1.58.0
+ rev: v1.62.0
hooks:
- id: terraform_fmt
- id: terraform_validate
@@ -17,7 +17,7 @@ repos:
- '--args=--only=terraform_documented_variables'
- '--args=--only=terraform_typed_variables'
- '--args=--only=terraform_module_pinned_source'
- # - '--args=--only=terraform_naming_convention'
+ - '--args=--only=terraform_naming_convention'
- '--args=--only=terraform_required_version'
- '--args=--only=terraform_required_providers'
- '--args=--only=terraform_standard_module_structure'
@@ -26,3 +26,4 @@ repos:
rev: v4.0.1
hooks:
- id: check-merge-conflict
+ - id: end-of-file-fixer
diff --git a/README.md b/README.md
index 26c6f11cec..c93a37a8d8 100644
--- a/README.md
+++ b/README.md
@@ -1,114 +1,641 @@
# AWS EKS Terraform module
-[![Lint Status](https://github.com/terraform-aws-modules/terraform-aws-eks/workflows/Lint/badge.svg)](https://github.com/terraform-aws-modules/terraform-aws-eks/actions)
-[![LICENSE](https://img.shields.io/github/license/terraform-aws-modules/terraform-aws-eks)](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/LICENSE)
+Terraform module which creates AWS EKS (Kubernetes) resources
+
+## Available Features
+
+- AWS EKS Cluster
+- AWS EKS Cluster Addons
+- AWS EKS Identity Provider Configuration
+- All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported:
+ - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)
+ - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html)
+ - [Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
+- Support for custom AMI, custom launch template, and custom user data
+- Support for Amazon Linux 2 EKS Optimized AMI and Bottlerocket nodes
+ - Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes
+- Support for module created security group, bring your own security groups, as well as adding additional security group rules to the module created security group(s)
+- Support for providing maps of node groups/Fargate profiles to the cluster module definition or use separate node group/Fargate profile sub-modules
+- Provisions to provide node group/Fargate profile "default" settings - useful for when creating multiple node groups/Fargate profiles where you want to set a common set of configurations once, and then individual control only select features
+
+## Usage
-Terraform module which creates Kubernetes cluster resources on AWS EKS.
+```hcl
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
-## Features
+ cluster_name = "my-cluster"
+ cluster_version = "1.21"
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [{
+ provider_key_arn = "ac01234b-00d9-40f6-ac95-e42345f78b00"
+ resources = ["secrets"]
+ }]
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+ # Self Managed Node Group(s)
+ self_managed_node_group_defaults = {
+ instance_type = "m6i.large"
+ update_launch_template_default_version = true
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ }
+
+ self_managed_node_groups = {
+ one = {
+ name = "spot-1"
+
+ public_ip = true
+ max_size = 5
+ desired_size = 2
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "1"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "2"
+ },
+ ]
+ }
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ cd /tmp
+ sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl enable amazon-ssm-agent
+ sudo systemctl start amazon-ssm-agent
+ EOT
+ }
+ }
+
+ # EKS Managed Node Group(s)
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ }
+
+ eks_managed_node_groups = {
+ blue = {}
+ green = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+ taints = {
+ dedicated = {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ }
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ # Fargate Profile(s)
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
-- Create an EKS cluster
-- All node types are supported:
- - [Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)
- - [Self-managed Nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html)
- - [Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
-- Support AWS EKS Optimized or Custom AMI
-- Create or manage security groups that allow communication and coordination
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+```
-## Important note
+## Node Group Configuration
-Kubernetes is evolving a lot, and each minor version includes new features, fixes, or changes.
+⚠️ The configurations shown below are referenced from within the root EKS module; there will be slight differences in the default values provided when compared to the underlying sub-modules (`eks-managed-node-group`, `self-managed-node-group`, and `fargate-profile`).
-**Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version, and [CHANGELOG.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) for all changes in this EKS module.**
+### EKS Managed Node Groups
-You also need to ensure that your applications and add ons are updated, or workloads could fail after the upgrade is complete. For action, you may need to take before upgrading, see the steps in the [EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html).
+ℹ️ Only the pertinent attributes are shown for brevity
-## Usage example
+1. AWS EKS Managed Node Group can provide its own launch template and utilize the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
```hcl
-data "aws_eks_cluster" "eks" {
- name = module.eks.cluster_id
-}
+ eks_managed_node_groups = {
+ default = {}
+ }
+```
-data "aws_eks_cluster_auth" "eks" {
- name = module.eks.cluster_id
-}
+2. AWS EKS Managed Node Group also offers native, default support for Bottlerocket OS by simply specifying the AMI type:
-provider "kubernetes" {
- host = data.aws_eks_cluster.eks.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.eks.token
-}
+```hcl
+ eks_managed_node_groups = {
+ bottlerocket_default = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+ }
+ }
+```
-module "eks" {
- source = "terraform-aws-modules/eks/aws"
+3. AWS EKS Managed Node Groups allow you to extend configurations by providing your own launch template and user data that is merged with what the service provides. For example, to provide additional user data before the nodes are bootstrapped as well as supply additional arguments to the bootstrap script:
+
+```hcl
+ eks_managed_node_groups = {
+ extend_config = {
+ # This is supplied to the AWS EKS Optimized AMI
+ # bootstrap script https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+
+ # This user data will be injected prior to the user data provided by the
+ # AWS EKS Managed Node Group service (contains the actually bootstrap configuration)
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+ }
+ }
+```
+
+4. The same configurations extension is offered when utilizing Bottlerocket OS AMIs, but the user data is slightly different. Bottlerocket OS uses a TOML user data file and you can provide additional configuration settings via the `bootstrap_extra_args` variable which gets merged into what is provided by the AWS EKS Managed Node Service:
+
+```hcl
+ eks_managed_node_groups = {
+ bottlerocket_extend_config = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+
+ # this will get added to what AWS provides
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
+ }
+```
+
+5. Users can also utilize a custom AMI, but doing so means that AWS EKS Managed Node Group will NOT inject the necessary bootstrap script and configurations into the user data supplied to the launch template. When using a custom AMI, users must also opt in to bootstrapping the nodes via user data and either use the module default user data template or provide your own user data template file:
+```hcl
+ eks_managed_node_groups = {
+ custom_ami = {
+ ami_id = "ami-0caf35bc73450c396"
+
+ # By default, EKS managed node groups will not append bootstrap script;
+ # this adds it back in using the default template provided by the module
+ # Note: this assumes the AMI provided is an EKS optimized AMI derivative
+ enable_bootstrap_user_data = true
+
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+
+ # Because we have full control over the user data supplied, we can also run additional
+ # scripts/configuration changes after the bootstrap script has been run
+ post_bootstrap_user_data = <<-EOT
+ echo "you are free little kubelet!"
+ EOT
+ }
+ }
+```
+
+6. Similarly, for Bottlerocket there is similar support:
+
+```hcl
+ eks_managed_node_groups = {
+ bottlerocket_custom_ami = {
+ ami_id = "ami-0ff61e0bcfc81dc94"
+ platform = "bottlerocket"
+
+ # use module user data template to bootstrap
+ enable_bootstrap_user_data = true
+ # this will get added to the template
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+
+ [settings.kubernetes.node-labels]
+ "label1" = "foo"
+ "label2" = "bar"
+
+ [settings.kubernetes.node-taints]
+ "dedicated" = "experimental:PreferNoSchedule"
+ "special" = "true:NoSchedule"
+ EOT
+ }
+ }
+```
+
+See the [`examples/eks_managed_node_group/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group) for a working example of these configurations.
+
+### Self Managed Node Groups
+
+ℹ️ Only the pertinent attributes are shown for brevity
+
+1. By default, the `self-managed-node-group` sub-module will use the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
+
+```hcl
cluster_version = "1.21"
- cluster_name = "my-cluster"
- vpc_id = "vpc-1234556abcdef"
- subnets = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
-
- worker_groups = [
- {
- instance_type = "m4.large"
- asg_max_size = 5
+
+ # This self managed node group will use the latest AWS EKS Optimized AMI for Kubernetes 1.21
+ self_managed_node_groups = {
+ default = {}
+ }
+```
+
+2. To use Bottlerocket, specify the `platform` as `bottlerocket` and supply the Bottlerocket AMI. The module provided user data for Bottlerocket will be used to bootstrap the nodes created:
+
+```hcl
+ cluster_version = "1.21"
+
+ self_managed_node_groups = {
+ bottlerocket = {
+ platform = "bottlerocket"
+ ami_id = data.aws_ami.bottlerocket_ami.id
}
- ]
-}
+ }
+```
+
+### Fargate Profiles
+
+Fargate profiles are rather straightforward. Simply supply the necessary information for the desired profile(s). See the [`examples/fargate_profile/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile) for a working example of the various configurations.
+
+### Mixed Node Groups
+
+ℹ️ Only the pertinent attributes are shown for brevity
+
+Users are free to mix and match the different node group types that meet their needs. For example, the following are just an example of the different possibilities:
+- AWS EKS Cluster with one or more AWS EKS Managed Node Groups
+- AWS EKS Cluster with one or more Self Managed Node Groups
+- AWS EKS Cluster with one or more Fargate profiles
+- AWS EKS Cluster with one or more AWS EKS Managed Node Groups, one or more Self Managed Node Groups, one or more Fargate profiles
+
+It is also possible to configure the various node groups of each family differently. Node groups may also be defined outside of the root `eks` module definition by using the provided sub-modules. There are no restrictions on the the various different possibilities provided by the module.
+
+```hcl
+ self_managed_node_group_defaults = {
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ }
+
+ self_managed_node_groups = {
+ one = {
+ name = "spot-1"
+
+ public_ip = true
+ max_size = 5
+ desired_size = 2
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "1"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "2"
+ },
+ ]
+ }
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ cd /tmp
+ sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl enable amazon-ssm-agent
+ sudo systemctl start amazon-ssm-agent
+ EOT
+ }
+ }
+
+ # EKS Managed Node Group(s)
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ }
+
+ eks_managed_node_groups = {
+ blue = {}
+ green = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = {
+ dedicated = {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ }
+
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
+
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ # Fargate Profile(s)
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
+```
+
+See the [`examples/complete/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) for a working example of these configurations.
+
+### Default configurations
+
+Each node group type (EKS managed node group, self managed node group, or Fargate profile) provides a default configuration setting that allows users to provide their own default configuration instead of the module's default configuration. This allows users to set a common set of defaults for their node groups and still maintain the ability to override these settings within the specific node group definition. The order of precedence for each node group type roughly follows (from highest to least precedence):
+- Node group individual configuration
+ - Node group family default configuration
+ - Module default configuration
+
+These are provided via the following variables for the respective node group family:
+- `eks_managed_node_group_defaults`
+- `self_managed_node_group_defaults`
+- `fargate_profile_defaults`
+
+For example, the following creates 4 AWS EKS Managed Node Groups:
+
+```hcl
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ }
+
+ eks_managed_node_groups = {
+ # Uses defaults provided by module with the default settings above overriding the module defaults
+ default = {}
+
+ # This further overrides the instance types used
+ compute = {
+ instance_types = ["c5.large", "c6i.large", "c6d.large"]
+ }
+
+ # This further overrides the instance types and disk size used
+ persistent = {
+ disk_size = 1024
+ instance_types = ["r5.xlarge", "r6i.xlarge", "r5b.xlarge"]
+ }
+
+ # This overrides the OS used
+ bottlerocket = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+ }
+ }
```
-There is also a [complete example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) which shows large set of features available in the module.
+## Module Design Considerations
+
+### General Notes
+
+While the module is designed to be flexible and support as many use cases and configurations as possible, there is a limit to what first class support can be provided without over-burdening the complexity of the module. Below are a list of general notes on the design intent captured by this module which hopefully explains some of the decisions that are, or will be made, in terms of what is added/supported natively by the module:
+
+- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constructs and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much fully supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Windows based nodes when using the module. User can refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What this means is:
+ - AWS EKS Managed Node Groups default to `linux` as the `platform`, but `bottlerocket` is also supported by AWS (`windows` is not supported by AWS EKS Managed Node groups)
+ - AWS Self Managed Node Groups also default to `linux` and the default AMI used is the latest AMI for the selected Kubernetes version. If you wish to use a different OS or AMI then you will need to opt in to the necessary configurations to ensure the correct AMI is used in conjunction with the necessary user data to ensure the nodes are launched and joined to your cluster successfully.
+- AWS EKS Managed Node groups are currently the preferred route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When requesting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
+- Due to the plethora of tooling and different manners of configuring your cluster, cluster configuration is intentionally left out of the module in order to simplify the module for a broader user base. Previous module versions provided support for managing the aws-auth configmap via the Kubernetes Terraform provider using the now deprecated aws-iam-authenticator; these are no longer included in the module. This module strictly focuses on the infrastructure resources to provision an EKS cluster as well as any supporting AWS resources. How the internals of the cluster are configured and managed is up to users and is outside the scope of this module. There is an output attribute, `aws_auth_configmap_yaml`, that has been provided that can be useful to help bridge this transition. Please see the various examples provided where this attribute is used to ensure that self managed node groups or external node groups have their IAM roles appropriately mapped to the aws-auth configmap. How users elect to manage the aws-auth configmap is left up to their choosing.
+
+### User Data & Bootstrapping
+
+There are a multitude of different possible configurations for how module users require their user data to be configured. In order to better support the various combinations from simple, out of the box support provided by the module to full customization of the user data using a template provided by users - the user data has been abstracted out to its own module. Users can see the various methods of using and providing user data through the [user data examples](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) as well more detailed information on the design and possible configurations via the [user data module itself](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data)
+
+In general (tl;dr):
+- AWS EKS Managed Node Groups
+ - `linux` platform (default) -> user data is pre-pended to the AWS provided bootstrap user data (bash/shell script) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to bootstrap nodes to join the cluster
+ - `bottlerocket` platform -> user data is merged with the AWS provided bootstrap user data (TOML file) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to bootstrap nodes to join the cluster
+- Self Managed Node Groups
+ - `linux` platform (default) -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template
+ - `bottlerocket` platform -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template
+ - `windows` platform -> the user data template (powershell/PS1 script) provided by the module is used as the default; users are able to provide their own user data template
+
+Module provided default templates can be found under the [templates directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/templates)
+
+### Security Groups
+
+- Cluster Security Group
+ - This module by default creates a cluster security group ("additional" security group when viewed from the console) in addition to the default security group created by the AWS EKS service. This "additional" security group allows users to customize inbound and outbound rules via the module as they see fit
+ - The default inbound/outbound rules provided by the module are derived from the [AWS minimum recommendations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in addition to NTP and HTTPS public internet egress rules (without, these show up in VPC flow logs as rejects - they are used for clock sync and downloading necessary packages/updates)
+ - The minimum inbound/outbound rules are provided for cluster and node creation to succeed without errors, but users will most likely need to add the necessary port and protocol for node-to-node communication (this is user specific based on how nodes are configured to communicate across the cluster)
+ - Users have the ability to opt out of the security group creation and instead provide their own externally created security group if so desired
+ - The security group that is created is designed to handle the bare minimum communication necessary between the control plane and the nodes, as well as any external egress to allow the cluster to successfully launch without error
+ - Users also have the option to supply additional, externally created security groups to the cluster as well via the `cluster_additional_security_group_ids` variable
-## Submodules
+- Node Group Security Group(s)
+ - Each node group (EKS Managed Node Group and Self Managed Node Group) by default creates its own security group. By default, this security group does not contain any additional security group rules. It is merely an "empty container" that offers users the ability to opt into any addition inbound our outbound rules as necessary
+ - Users also have the option to supply their own, and/or additional, externally created security group(s) to the node group as well via the `vpc_security_group_ids` variable
-Root module calls these modules which can also be used separately to create independent resources:
+The security groups created by this module are depicted in the image shown below along with their default inbound/outbound rules:
-- [fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/fargate) - creates Fargate profiles, see [examples/fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) for detailed examples.
-
+
+
+
## Notes
-- By default, this module manages the `aws-auth` configmap for you (`manage_aws_auth=true`). To avoid the following [issue](https://github.com/aws/containers-roadmap/issues/654) where the EKS creation is `ACTIVE` but not ready. We implemented a "retry" logic with a [fork of the http provider](https://github.com/terraform-aws-modules/terraform-provider-http). This fork adds the support of a self-signed CA certificate. The original PR can be found [here](https://github.com/hashicorp/terraform-provider-http/pull/29).
+- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. See the [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh) example provided.
-- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. Find the complete example here [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh).
+Frequently Asked Questions
-## Documentation
+
Why are nodes not being registered?
-### Official docs
+Often an issue caused by one of two reasons:
+1. Networking or endpoint mis-configuration.
+2. Permissions (IAM/RBAC)
-- [Amazon Elastic Kubernetes Service (Amazon EKS)](https://docs.aws.amazon.com/eks/latest/userguide/).
+At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More info regarding communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
-### Module docs
+Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access the endpoint, the nodes need outgoing internet access:
-- [Autoscaling](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/autoscaling.md): How to enable worker node autoscaling.
-- [Enable Docker Bridge Network](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/enable-docker-bridge-network.md): How to enable the docker bridge network when using the EKS-optimized AMI, which disables it by default.
-- [Spot instances](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/spot-instances.md): How to use spot instances with this module.
-- [IAM Permissions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md): Minimum IAM permissions needed to setup EKS Cluster.
-- [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md): Frequently Asked Questions
+- Nodes in private subnets: via a NAT gateway or instance along with the appropriate routing rules
+- Nodes in public subnets: ensure that nodes are launched with public IPs is enabled (either through the module here or your subnet setting defaults)
-## Examples
+Important: If you apply only the public endpoint and configure the `cluster_endpoint_public_access_cidrs` to restrict access, know that EKS nodes will also use the public endpoint and you must allow access to the endpoint. If not, then your nodes will fail to work correctly.
-There are detailed examples available for you to see how certain features of this module can be used in a straightforward way. Make sure to check them and run them before opening an issue. [Here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md) you can find the list of the minimum IAM Permissions required to create EKS cluster.
+Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node communication to the endpoint stays within the VPC. Ensure that VPC DNS resolution and hostnames are also enabled for your VPC when the private endpoint is enabled.
-- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) - Create EKS Cluster with all available workers types in various combinations with many of supported features.
-- [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket) - Create EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html).
-- [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) - Create EKS cluster with [Fargate profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) and attach Fargate profiles to an existing EKS cluster.
-
-## Contributing
+Nodes need to be able to connect to other AWS services plus pull down container images from container registries (ECR). If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
-Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section.
+
How can I work with the cluster if I disable the public endpoint?
-Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md).
+You have to interact with the cluster from within the VPC that it is associated with; either through a VPN connection, a bastion EC2 instance, etc.
-## Authors
+
How can I stop Terraform from removing the EKS tags from my VPC and subnets?
-This module has been originally created by [Brandon O'Connor](https://github.com/brandoconnor), and was maintained by [Max Williams](https://github.com/max-rocket-internet), [Thierno IB. BARRY](https://github.com/barryib) and many more [contributors listed here](https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors)!
+You need to add the tags to the Terraform definition of the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
-## License
+An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
+
+
Why are there no changes when a node group's desired count is modified?
+
+The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block. The setting is ignored to allow the cluster autoscaler to work correctly so that `terraform apply` does not accidentally remove running workers. You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
+
+If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_size` and `max_size` for node groups. Before changing those values, you must satisfy AWS `desired_size` constraints (which must be between new min/max values).
+
+
Why are nodes not recreated when the `launch_template` is recreated?
+
+By default the ASG for a self-managed node group is not configured to be recreated when the launch configuration or template changes; you will need to use a process to drain and cycle the nodes.
+
+If you are NOT using the cluster autoscaler:
+
+- Add a new instance
+- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
+- Wait for pods to be Running
+- Terminate the old node instance. ASG will start a new instance
+- Repeat the drain and delete process until all old nodes are replaced
+
+If you are using the cluster autoscaler:
+
+- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
+- Wait for pods to be Running
+- Cluster autoscaler will create new nodes when required
+- Repeat until all old nodes are drained
+- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
+
+You can also use a third-party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
+
+Alternatively, use a managed node group instead.
+
How can I use Windows workers?
-Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details.
+To enable Windows support for your EKS cluster, you should apply some configuration manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
+
+Windows based nodes require an additional cluster role (`eks:kube-proxy-windows`).
+
+
Worker nodes with labels do not join a 1.16+ cluster
+
+As of Kubernetes 1.16, kubelet restricts which labels with names in the `kubernetes.io` namespace can be applied to nodes. Labels such as `kubernetes.io/lifecycle=spot` are no longer allowed; instead use `node.kubernetes.io/lifecycle=spot`
+
+See your Kubernetes version's documentation for the `--node-labels` kubelet flag for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
+
+
+
+## Examples
+
+- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations
+- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups
+- [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
+- [IRSA, Node Autoscaler, Instance Refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/irsa_autoscale_instance_refresh): EKS Cluster using self-managed node group demonstrating how to enable/utilize instance refresh configuration along with node termination handler
+- [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups
+- [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data
+
+## Contributing
+
+Report issues/questions/feature requests via [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new)
+Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md)
## Requirements
@@ -116,195 +643,129 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
-| [http](#requirement\_http) | >= 2.4.1 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [tls](#requirement\_tls) | >= 2.2 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [http](#provider\_http) | >= 2.4.1 |
-| [kubernetes](#provider\_kubernetes) | >= 1.11.1 |
-| [local](#provider\_local) | >= 1.4 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [tls](#provider\_tls) | >= 2.2 |
## Modules
| Name | Source | Version |
|------|--------|---------|
-| [fargate](#module\_fargate) | ./modules/fargate | n/a |
-| [node\_groups](#module\_node\_groups) | ./modules/node_groups | n/a |
+| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a |
+| [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a |
+| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
## Resources
| Name | Type |
|------|------|
-| [aws_autoscaling_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
-| [aws_autoscaling_group.workers_launch_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
+| [aws_eks_addon.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource |
| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
-| [aws_iam_instance_profile.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
-| [aws_iam_instance_profile.workers_launch_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_eks_identity_provider_config.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_identity_provider_config) | resource |
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
-| [aws_iam_policy.cluster_deny_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_policy.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_deny_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_additional_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_launch_configuration.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration) | resource |
-| [aws_launch_template.workers_launch_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_https_worker_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_primary_ingress_workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_kubelet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_primary](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
-| [local_file.kubeconfig](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
-| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
-| [aws_ami.eks_worker_windows](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_iam_instance_profile.custom_worker_group_iam_instance_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_instance_profile) | data source |
-| [aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_instance_profile) | data source |
-| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_deny_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.workers_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_role.custom_cluster_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
+| [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [http_http.wait_for_cluster](https://registry.terraform.io/providers/terraform-aws-modules/http/latest/docs/data-sources/http) | data source |
+| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster. | `bool` | `true` | no |
-| [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no |
-| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted. | `bool` | `false` | no |
-| [cluster\_create\_security\_group](#input\_cluster\_create\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`. | `bool` | `true` | no |
-| [cluster\_create\_timeout](#input\_cluster\_create\_timeout) | Timeout value when creating the EKS cluster. | `string` | `"30m"` | no |
-| [cluster\_delete\_timeout](#input\_cluster\_delete\_timeout) | Timeout value when deleting the EKS cluster. | `string` | `"15m"` | no |
-| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
-| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
-| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled. | `bool` | `false` | no |
-| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
-| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
-| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`. | `bool` | `true` | no |
-| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | IAM role name for the cluster. If manage\_cluster\_iam\_resources is set to false, set this to reuse an existing IAM role. If manage\_cluster\_iam\_resources is set to true, set this to force the created role name. | `string` | `""` | no |
-| [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
-| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. Also used as a prefix in names of related resources. | `string` | `""` | no |
-| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
-| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
-| [cluster\_tags](#input\_cluster\_tags) | A map of tags to add to just the eks resource. | `map(string)` | `{}` | no |
-| [cluster\_update\_timeout](#input\_cluster\_update\_timeout) | Timeout value when updating the EKS cluster. | `string` | `"60m"` | no |
-| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21). | `string` | `null` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created. | `bool` | `true` | no |
-| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows`. | `string` | `"linux"` | no |
-| [eks\_oidc\_root\_ca\_thumbprint](#input\_eks\_oidc\_root\_ca\_thumbprint) | Thumbprint of Root CA for EKS OIDC, Valid until 2037 | `string` | `"9e99a48a9960b14926bb7f3b02e22da2b0ab7280"` | no |
-| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
-| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details | `any` | `{}` | no |
-| [fargate\_subnets](#input\_fargate\_subnets) | A list of subnets to place fargate workers within (if different from subnets). | `list(string)` | `[]` | no |
-| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path. | `string` | `"/"` | no |
-| [kubeconfig\_api\_version](#input\_kubeconfig\_api\_version) | KubeConfig API version. Defaults to client.authentication.k8s.io/v1alpha1 | `string` | `"client.authentication.k8s.io/v1alpha1"` | no |
-| [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"]. | `list(string)` | `[]` | no |
-| [kubeconfig\_aws\_authenticator\_command](#input\_kubeconfig\_aws\_authenticator\_command) | Command to use to fetch AWS EKS credentials. | `string` | `"aws-iam-authenticator"` | no |
-| [kubeconfig\_aws\_authenticator\_command\_args](#input\_kubeconfig\_aws\_authenticator\_command\_args) | Default arguments passed to the authenticator command. Defaults to [token -i $cluster\_name]. | `list(string)` | `[]` | no |
-| [kubeconfig\_aws\_authenticator\_env\_variables](#input\_kubeconfig\_aws\_authenticator\_env\_variables) | Environment variables that should be used when executing the authenticator. e.g. { AWS\_PROFILE = "eks"}. | `map(string)` | `{}` | no |
-| [kubeconfig\_file\_permission](#input\_kubeconfig\_file\_permission) | File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.` | `string` | `"0600"` | no |
-| [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig. | `string` | `""` | no |
-| [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`. | `string` | `"./"` | no |
-| [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file. | `bool` | `true` | no |
-| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, cluster\_iam\_role\_name must be specified. | `bool` | `true` | no |
-| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no |
-| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no |
-| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
| `[]` | no |
-| [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
-| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
-| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider. | `list(string)` | `[]` | no |
-| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only. | `map(string)` | `{}` | no |
-| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | `null` | no |
-| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available. | `number` | `300` | no |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
-| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
-| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"amazon"` | no |
-| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"amazon"` | no |
-| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group. | `bool` | `false` | no |
-| [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups. | `bool` | `false` | no |
-| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | `bool` | `true` | no |
-| [worker\_groups](#input\_worker\_groups) | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
-| [worker\_groups\_launch\_template](#input\_worker\_groups\_launch\_template) | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | `number` | `1025` | no |
-| [workers\_additional\_policies](#input\_workers\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
-| [workers\_egress\_cidrs](#input\_workers\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Override default values for target groups. See workers\_group\_defaults\_defaults in local.tf for valid keys. | `any` | `{}` | no |
-| [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name. | `string` | `""` | no |
-| [write\_kubeconfig](#input\_write\_kubeconfig) | Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`. | `bool` | `true` | no |
+| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
+| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
+| [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no |
+| [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no |
+| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
+| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster |
| `[]` | no |
+| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
+| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `true` | no |
+| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created | `map(any)` | `{}` | no |
+| [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false` | `string` | `""` | no |
+| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no |
+| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
+| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `string` | `true` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
+| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
+| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.21`) | `string` | `null` | no |
+| [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no |
+| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
+| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
+| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
+| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
+| [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
+| [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no |
+| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created | `map(any)` | `{}` | no |
+| [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no |
+| [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no |
+| [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no |
+| [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no |
+| [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `string` | `true` | no |
+| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
+| [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no |
+| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs where the EKS cluster (ENIs) will be provisioned along with the nodes/node groups. Node groups can be deployed within a different set of subnet IDs from within the node group configuration | `list(string)` | `[]` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and its nodes will be provisioned | `string` | `null` | no |
## Outputs
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster. |
-| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster. |
-| [cluster\_endpoint](#output\_cluster\_endpoint) | The endpoint for your EKS Kubernetes API. |
-| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster. |
-| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster. |
-| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready. |
-| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer |
-| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ID attached to the EKS cluster. On 1.14 or later, this is the 'Additional security groups' in the EKS console. |
-| [cluster\_version](#output\_cluster\_version) | The Kubernetes server version for the EKS cluster. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [fargate\_iam\_role\_arn](#output\_fargate\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
-| [fargate\_iam\_role\_name](#output\_fargate\_iam\_role\_name) | IAM role name for EKS Fargate pods |
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
-| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| [kubeconfig](#output\_kubeconfig) | kubectl config file contents for this EKS cluster. Will block on cluster creation until the cluster is really ready. |
-| [kubeconfig\_filename](#output\_kubeconfig\_filename) | The filename of the generated kubectl config. Will block on cluster creation until the cluster is really ready. |
-| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by var.node\_groups keys |
-| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
-| [security\_group\_rule\_cluster\_https\_worker\_ingress](#output\_security\_group\_rule\_cluster\_https\_worker\_ingress) | Security group rule responsible for allowing pods to communicate with the EKS cluster API. |
-| [worker\_iam\_instance\_profile\_arns](#output\_worker\_iam\_instance\_profile\_arns) | default IAM instance profile ARN for EKS worker groups |
-| [worker\_iam\_instance\_profile\_names](#output\_worker\_iam\_instance\_profile\_names) | default IAM instance profile name for EKS worker groups |
-| [worker\_iam\_role\_arn](#output\_worker\_iam\_role\_arn) | default IAM role ARN for EKS worker groups |
-| [worker\_iam\_role\_name](#output\_worker\_iam\_role\_name) | default IAM role name for EKS worker groups |
-| [worker\_security\_group\_id](#output\_worker\_security\_group\_id) | Security group ID attached to the EKS workers. |
-| [workers\_asg\_arns](#output\_workers\_asg\_arns) | IDs of the autoscaling groups containing workers. |
-| [workers\_asg\_names](#output\_workers\_asg\_names) | Names of the autoscaling groups containing workers. |
-| [workers\_default\_ami\_id](#output\_workers\_default\_ami\_id) | ID of the default worker group AMI |
-| [workers\_default\_ami\_id\_windows](#output\_workers\_default\_ami\_id\_windows) | ID of the default Windows worker group AMI |
-| [workers\_launch\_template\_arns](#output\_workers\_launch\_template\_arns) | ARNs of the worker launch templates. |
-| [workers\_launch\_template\_ids](#output\_workers\_launch\_template\_ids) | IDs of the worker launch templates. |
-| [workers\_launch\_template\_latest\_versions](#output\_workers\_launch\_template\_latest\_versions) | Latest versions of the worker launch templates. |
-| [workers\_user\_data](#output\_workers\_user\_data) | User data of worker groups |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+
+## License
+
+Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-rds-aurora/tree/master/LICENSE) for full details.
diff --git a/UPGRADE-18.0.md b/UPGRADE-18.0.md
new file mode 100644
index 0000000000..0c6d56dde6
--- /dev/null
+++ b/UPGRADE-18.0.md
@@ -0,0 +1,550 @@
+# Upgrade from v17.x to v18.x
+
+Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce.
+
+## List of backwards incompatible changes
+
+- Launch configuration support has been removed and only launch template is supported going forward. AWS is no longer adding new features back into launch configuration and their docs state [`We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. We provide information about launch configurations for customers who have not yet migrated from launch configurations to launch templates.`](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
+- Support for managing aws-auth configmap has been removed. This change also removes the dependency on the Kubernetes Terraform provider, the local dependency on aws-iam-authenticator for users, as well as the reliance on the forked http provider to wait and poll on cluster creation. To aid users in this change, an output variable `aws_auth_configmap_yaml` has been provided which renders the aws-auth configmap necessary to support at least the IAM roles used by the module (additional mapRoles/mapUsers definitions to be provided by users)
+- Support for managing kubeconfig and its associated `local_file` resources have been removed; users are able to use the awscli provided `aws eks update-kubeconfig --name ` to update their local kubeconfig as necessary
+- The terminology used in the module has been modified to reflect that used by the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html).
+ - [AWS EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), `eks_managed_node_groups`, was previously referred to as simply node group, `node_groups`
+ - [Self Managed Node Group Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html), `self_managed_node_groups`, was previously referred to as worker group, `worker_groups`
+ - [AWS Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html), `fargate_profiles`, remains unchanged in terms of naming and terminology
+- The three different node group types supported by AWS and the module have been refactored into standalone sub-modules that are both used by the root `eks` module as well as available for individual, standalone consumption if desired.
+ - The previous `node_groups` sub-module is now named `eks-managed-node-group` and provisions a single AWS EKS Managed Node Group per sub-module definition (previous version utilized `for_each` to create 0 or more node groups)
+ - Additional changes for the `eks-managed-node-group` sub-module over the previous `node_groups` module include:
+ - Variable name changes defined in section `Variable and output changes` below
+ - Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
+ - Support for nearly full control of the security group created, or provide the ID of an existing security group, has been added
+ - User data has been revamped and all user data logic moved to the `_user_data` internal sub-module; the local `userdata.sh.tpl` has been removed entirely
+ - The previous `fargate` sub-module is now named `fargate-profile` and provisions a single AWS EKS Fargate Profile per sub-module definition (previous version utilized `for_each` to create 0 or more profiles)
+ - Additional changes for the `fargate-profile` sub-module over the previous `fargate` module include:
+ - Variable name changes defined in section `Variable and output changes` below
+ - Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
+ - Similar to the `eks_managed_node_group_defaults` and `self_managed_node_group_defaults`, a `fargate_profile_defaults` has been provided to allow users to control the default configurations for the Fargate profiles created
+ - A sub-module for `self-managed-node-group` has been created and provisions a single self managed node group (autoscaling group) per sub-module definition
+ - Additional changes for the `self-managed-node-group` sub-module over the previous `node_groups` variable include:
+ - The underlying autoscaling group and launch template have been updated to more closely match that of the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module and the features it offers
+ - The previous iteration used a count over a list of node group definitions which was prone to disruptive updates; this is now replaced with a map/for_each to align with that of the EKS managed node group and Fargate profile behaviors/style
+- The user data configuration supported across the module has been completely revamped. A new `_user_data` internal sub-module has been created to consolidate all user data configuration in one location which provides better support for testability (via the [`examples/user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) example). The new sub-module supports nearly all possible combinations including the ability to allow users to provide their own user data template which will be rendered by the module. See the `examples/user_data` example project for the full plethora of example configuration possibilities and more details on the logic of the design can be found in the [`modules/_user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data_) directory.
+
+## Additional changes
+
+### Added
+
+- Support for AWS EKS Addons has been added
+- Support for AWS EKS Cluster Identity Provider Configuration has been added
+- AWS Terraform provider minimum required version has been updated to 3.64 to support the changes made and additional resources supported
+- An example `user_data` project has been added to aid in demonstrating, testing, and validating the various methods of configuring user data with the `_user_data` sub-module as well as the root `eks` module
+- Template for rendering the aws-auth configmap output - `aws_auth_cm.tpl`
+- Template for Bottlerocket OS user data bootstrapping - `bottlerocket_user_data.tpl`
+
+### Modified
+
+- The previous `fargate` example has been renamed to `fargate_profile`
+- The previous `irsa` and `instance_refresh` examples have been merged into one example `irsa_autoscale_refresh`
+- The previous `managed_node_groups` example has been renamed to `self_managed_node_group`
+- The previously hardcoded EKS OIDC root CA thumbprint value and variable has been replaced with a `tls_certificate` data source that refers to the cluster OIDC issuer url. Thumbprint values should remain unchanged however
+- Individual cluster security group resources have been replaced with a single security group resource that takes a map of rules as input. The default ingress/egress rules have had their scope reduced in order to provide the bare minimum of access to permit successful cluster creation and allow users to opt in to any additional network access as needed for a better security posture. This means the `0.0.0.0/0` egress rule has been removed, instead TCP/443 and TCP/10250 egress rules to the node group security group are used instead
+- The Linux/bash user data template has been updated to include the bare minimum necessary for bootstrapping AWS EKS Optimized AMI derivative nodes with provisions for providing additional user data and configurations; was named `userdata.sh.tpl` and is now named `linux_user_data.tpl`
+- The Windows user data template has been renamed from `userdata_windows.tpl` to `windows_user_data.tpl`
+
+### Removed
+
+- Miscellaneous documents on how to configure Kubernetes cluster internals have been removed. Documentation related to how to configure the AWS EKS Cluster and its supported infrastructure resources provided by the module are supported, while cluster internal configuration is out of scope for this project
+- The previous `bottlerocket` example has been removed in favor of demonstrating the use and configuration of Bottlerocket nodes via the respective `eks_managed_node_group` and `self_managed_node_group` examples
+- The previous `launch_template` and `launch_templates_with_managed_node_groups` examples have been removed; only launch templates are now supported (default) and launch configuration support has been removed
+- The previous `secrets_encryption` example has been removed; the functionality has been demonstrated in several of the new examples rendering this standalone example redundant
+- The additional, custom IAM role policy for the cluster role has been removed. The permissions are either now provided in the attached managed AWS permission policies used or are no longer required
+- The `kubeconfig.tpl` template; kubeconfig management is no longer supported under this module
+- The HTTP Terraform provider (forked copy) dependency has been removed
+
+### Variable and output changes
+
+1. Removed variables:
+
+ - `cluster_create_timeout`, `cluster_update_timeout`, and `cluster_delete_timeout` have been replaced with `cluster_timeouts`
+ - `kubeconfig_name`
+ - `kubeconfig_output_path`
+ - `kubeconfig_file_permission`
+ - `kubeconfig_api_version`
+ - `kubeconfig_aws_authenticator_command`
+ - `kubeconfig_aws_authenticator_command_args`
+ - `kubeconfig_aws_authenticator_additional_args`
+ - `kubeconfig_aws_authenticator_env_variables`
+ - `write_kubeconfig`
+ - `default_platform`
+ - `manage_aws_auth`
+ - `aws_auth_additional_labels`
+ - `map_accounts`
+ - `map_roles`
+ - `map_users`
+ - `fargate_subnets`
+ - `worker_groups_launch_template`
+ - `worker_security_group_id`
+ - `worker_ami_name_filter`
+ - `worker_ami_name_filter_windows`
+ - `worker_ami_owner_id`
+ - `worker_ami_owner_id_windows`
+ - `worker_additional_security_group_ids`
+ - `worker_sg_ingress_from_port`
+ - `workers_additional_policies`
+ - `worker_create_security_group`
+ - `worker_create_initial_lifecycle_hooks`
+ - `worker_create_cluster_primary_security_group_rules`
+ - `cluster_create_endpoint_private_access_sg_rule`
+ - `cluster_endpoint_private_access_cidrs`
+ - `cluster_endpoint_private_access_sg`
+ - `manage_worker_iam_resources`
+ - `workers_role_name`
+ - `attach_worker_cni_policy`
+ - `eks_oidc_root_ca_thumbprint`
+ - `create_fargate_pod_execution_role`
+ - `fargate_pod_execution_role_name`
+ - `cluster_egress_cidrs`
+ - `workers_egress_cidrs`
+ - `wait_for_cluster_timeout`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `default_iam_role_arn`
+ - `workers_group_defaults`
+ - `worker_security_group_id`
+ - `node_groups_defaults`
+ - `node_groups`
+ - `ebs_optimized_not_supported`
+ - Fargate profile sub-module (was `fargate`)
+ - `create_eks` and `create_fargate_pod_execution_role` have been replaced with simply `create`
+
+2. Renamed variables:
+
+ - `create_eks` -> `create`
+ - `subnets` -> `subnet_ids`
+ - `cluster_create_security_group` -> `create_cluster_security_group`
+ - `cluster_log_retention_in_days` -> `cloudwatch_log_group_retention_in_days`
+ - `cluster_log_kms_key_id` -> `cloudwatch_log_group_kms_key_id`
+ - `manage_cluster_iam_resources` -> `create_iam_role`
+ - `cluster_iam_role_name` -> `iam_role_name`
+ - `permissions_boundary` -> `iam_role_permissions_boundary`
+ - `iam_path` -> `iam_role_path`
+ - `pre_userdata` -> `pre_bootstrap_user_data`
+ - `additional_userdata` -> `post_bootstrap_user_data`
+ - `worker_groups` -> `self_managed_node_groups`
+ - `workers_group_defaults` -> `self_managed_node_group_defaults`
+ - `node_groups` -> `eks_managed_node_groups`
+ - `node_groups_defaults` -> `eks_managed_node_group_defaults`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `create_eks` -> `create`
+ - `worker_additional_security_group_ids` -> `vpc_security_group_ids`
+ - Fargate profile sub-module
+ - `fargate_pod_execution_role_name` -> `name`
+ - `create_fargate_pod_execution_role` -> `create_iam_role`
+ - `subnets` -> `subnet_ids`
+ - `iam_path` -> `iam_role_path`
+ - `permissions_boundary` -> `iam_role_permissions_boundary`
+
+3. Added variables:
+
+ - `cluster_additional_security_group_ids` added to allow users to add additional security groups to the cluster as needed
+ - `cluster_security_group_name`
+ - `cluster_security_group_use_name_prefix` added to allow users to use either the name as specified or default to using the name specified as a prefix
+ - `cluster_security_group_description`
+ - `cluster_security_group_additional_rules`
+ - `cluster_security_group_tags`
+ - `create_cloudwatch_log_group` added in place of the logic that checked if any cluster log types were enabled to allow users to opt in as they see fit
+ - `create_node_security_group` added to create single security group that connects node groups and cluster in central location
+ - `node_security_group_id`
+ - `node_security_group_name`
+ - `node_security_group_use_name_prefix`
+ - `node_security_group_description`
+ - `node_security_group_additional_rules`
+ - `node_security_group_tags`
+ - `iam_role_arn`
+ - `iam_role_use_name_prefix`
+ - `iam_role_description`
+ - `iam_role_additional_policies`
+ - `iam_role_tags`
+ - `cluster_addons`
+ - `cluster_identity_providers`
+ - `fargate_profile_defaults`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `platform`
+ - `enable_bootstrap_user_data`
+ - `pre_bootstrap_user_data`
+ - `post_bootstrap_user_data`
+ - `bootstrap_extra_args`
+ - `user_data_template_path`
+ - `create_launch_template`
+ - `launch_template_name`
+ - `launch_template_use_name_prefix`
+ - `description`
+ - `ebs_optimized`
+ - `ami_id`
+ - `key_name`
+ - `launch_template_default_version`
+ - `update_launch_template_default_version`
+ - `disable_api_termination`
+ - `kernel_id`
+ - `ram_disk_id`
+ - `block_device_mappings`
+ - `capacity_reservation_specification`
+ - `cpu_options`
+ - `credit_specification`
+ - `elastic_gpu_specifications`
+ - `elastic_inference_accelerator`
+ - `enclave_options`
+ - `instance_market_options`
+ - `license_specifications`
+ - `metadata_options`
+ - `enable_monitoring`
+ - `network_interfaces`
+ - `placement`
+ - `min_size`
+ - `max_size`
+ - `desired_size`
+ - `use_name_prefix`
+ - `ami_type`
+ - `ami_release_version`
+ - `capacity_type`
+ - `disk_size`
+ - `force_update_version`
+ - `instance_types`
+ - `labels`
+ - `cluster_version`
+ - `launch_template_version`
+ - `remote_access`
+ - `taints`
+ - `update_config`
+ - `timeouts`
+ - `create_security_group`
+ - `security_group_name`
+ - `security_group_use_name_prefix`
+ - `security_group_description`
+ - `vpc_id`
+ - `security_group_rules`
+ - `cluster_security_group_id`
+ - `security_group_tags`
+ - `create_iam_role`
+ - `iam_role_arn`
+ - `iam_role_name`
+ - `iam_role_use_name_prefix`
+ - `iam_role_path`
+ - `iam_role_description`
+ - `iam_role_permissions_boundary`
+ - `iam_role_additional_policies`
+ - `iam_role_tags`
+ - Fargate profile sub-module (was `fargate`)
+ - `iam_role_arn` (for if `create_iam_role` is `false` to bring your own externally created role)
+ - `iam_role_name`
+ - `iam_role_use_name_prefix`
+ - `iam_role_description`
+ - `iam_role_additional_policies`
+ - `iam_role_tags`
+ - `selectors`
+ - `timeouts`
+
+4. Removed outputs:
+
+ - `cluster_version`
+ - `kubeconfig`
+ - `kubeconfig_filename`
+ - `workers_asg_arns`
+ - `workers_asg_names`
+ - `workers_user_data`
+ - `workers_default_ami_id`
+ - `workers_default_ami_id_windows`
+ - `workers_launch_template_ids`
+ - `workers_launch_template_arns`
+ - `workers_launch_template_latest_versions`
+ - `worker_security_group_id`
+ - `worker_iam_instance_profile_arns`
+ - `worker_iam_instance_profile_names`
+ - `worker_iam_role_name`
+ - `worker_iam_role_arn`
+ - `fargate_profile_ids`
+ - `fargate_profile_arns`
+ - `fargate_iam_role_name`
+ - `fargate_iam_role_arn`
+ - `node_groups`
+ - `security_group_rule_cluster_https_worker_ingress`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `node_groups`
+ - `aws_auth_roles`
+ - Fargate profile sub-module (was `fargate`)
+ - `aws_auth_roles`
+
+5. Renamed outputs:
+
+ - `config_map_aws_auth` -> `aws_auth_configmap_yaml`
+ - Fargate profile sub-module (was `fargate`)
+ - `fargate_profile_ids` -> `fargate_profile_id`
+ - `fargate_profile_arns` -> `fargate_profile_arn`
+
+6. Added outputs:
+
+ - `cluster_platform_version`
+ - `cluster_status`
+ - `cluster_security_group_arn`
+ - `cluster_security_group_id`
+ - `node_security_group_arn`
+ - `node_security_group_id`
+ - `cluster_iam_role_unique_id`
+ - `cluster_addons`
+ - `cluster_identity_providers`
+ - `fargate_profiles`
+ - `eks_managed_node_groups`
+ - `self_managed_node_groups`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `launch_template_id`
+ - `launch_template_arn`
+ - `launch_template_latest_version`
+ - `node_group_arn`
+ - `node_group_id`
+ - `node_group_resources`
+ - `node_group_status`
+ - `security_group_arn`
+ - `security_group_id`
+ - `iam_role_name`
+ - `iam_role_arn`
+ - `iam_role_unique_id`
+ - Fargate profile sub-module (was `fargate`)
+ - `iam_role_unique_id`
+ - `fargate_profile_status`
+
+## Upgrade Migrations
+
+### Before 17.x Example
+
+```hcl
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 17.0"
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnets = module.vpc.private_subnets
+
+ # Managed Node Groups
+ node_groups_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ }
+
+ node_groups = {
+ node_group = {
+ min_capacity = 1
+ max_capacity = 10
+ desired_capacity = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+
+ update_config = {
+ max_unavailable_percentage = 50
+ }
+
+ k8s_labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = [
+ {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ ]
+
+ additional_tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ # Worker groups
+ worker_additional_security_group_ids = [aws_security_group.additional.id]
+
+ worker_groups_launch_template = [
+ {
+ name = "worker-group"
+ override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
+ spot_instance_pools = 4
+ asg_max_size = 5
+ asg_desired_capacity = 2
+ kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
+ public_ip = true
+ },
+ ]
+
+ # Fargate
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
+
+ tags = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+```
+
+### After 18.x Example
+
+```hcl
+module "cluster_after" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.0"
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ }
+
+ eks_managed_node_groups = {
+ node_group = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+
+ update_config = {
+ max_unavailable_percentage = 50
+ }
+
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = [
+ {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ ]
+
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ self_managed_node_group_defaults = {
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ }
+
+ self_managed_node_groups = {
+ worker_group = {
+ name = "worker-group"
+
+ min_size = 1
+ max_size = 5
+ desired_size = 2
+ instance_type = "m4.large"
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ block_device_mappings = {
+ xvda = {
+ device_name = "/dev/xvda"
+ ebs = {
+ delete_on_termination = true
+ encrypted = false
+ volume_size = 100
+ volume_type = "gp2"
+ }
+
+ }
+ }
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ spot_instance_pools = 4
+ }
+
+ override = [
+ { instance_type = "m5.large" },
+ { instance_type = "m5a.large" },
+ { instance_type = "m5d.large" },
+ { instance_type = "m5ad.large" },
+ ]
+ }
+ }
+ }
+
+ # Fargate
+ fargate_profiles = {
+ default = {
+ name = "default"
+
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
+
+ tags = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+```
diff --git a/aws_auth.tf b/aws_auth.tf
deleted file mode 100644
index 6de697607f..0000000000
--- a/aws_auth.tf
+++ /dev/null
@@ -1,92 +0,0 @@
-locals {
- auth_launch_template_worker_roles = [
- for index in range(0, var.create_eks ? local.worker_group_launch_template_count : 0) : {
- worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
- coalescelist(
- aws_iam_instance_profile.workers_launch_template.*.role,
- data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
- [""]
- ),
- index
- )}"
- platform = lookup(
- var.worker_groups_launch_template[index],
- "platform",
- local.workers_group_defaults["platform"]
- )
- }
- ]
-
- auth_worker_roles = [
- for index in range(0, var.create_eks ? local.worker_group_launch_configuration_count : 0) : {
- worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
- coalescelist(
- aws_iam_instance_profile.workers.*.role,
- data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
- [""]
- ),
- index,
- )}"
- platform = lookup(
- var.worker_groups[index],
- "platform",
- local.workers_group_defaults["platform"]
- )
- }
- ]
-
- # Convert to format needed by aws-auth ConfigMap
- configmap_roles = [
- for role in concat(
- local.auth_launch_template_worker_roles,
- local.auth_worker_roles,
- module.node_groups.aws_auth_roles,
- module.fargate.aws_auth_roles,
- ) :
- {
- # Work around https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
- # Strip the leading slash off so that Terraform doesn't think it's a regex
- rolearn = replace(role["worker_role_arn"], replace(var.iam_path, "/^//", ""), "")
- username = role["platform"] == "fargate" ? "system:node:{{SessionName}}" : "system:node:{{EC2PrivateDNSName}}"
- groups = tolist(concat(
- [
- "system:bootstrappers",
- "system:nodes",
- ],
- role["platform"] == "windows" ? ["eks:kube-proxy-windows"] : [],
- role["platform"] == "fargate" ? ["system:node-proxier"] : [],
- ))
- }
- ]
-}
-
-resource "kubernetes_config_map" "aws_auth" {
- count = var.create_eks && var.manage_aws_auth ? 1 : 0
-
- metadata {
- name = "aws-auth"
- namespace = "kube-system"
- labels = merge(
- {
- "app.kubernetes.io/managed-by" = "Terraform"
- # / are replaced by . because label validator fails in this lib
- # https://github.com/kubernetes/apimachinery/blob/1bdd76d09076d4dc0362456e59c8f551f5f24a72/pkg/util/validation/validation.go#L166
- "terraform.io/module" = "terraform-aws-modules.eks.aws"
- },
- var.aws_auth_additional_labels
- )
- }
-
- data = {
- mapRoles = yamlencode(
- distinct(concat(
- local.configmap_roles,
- var.map_roles,
- ))
- )
- mapUsers = yamlencode(var.map_users)
- mapAccounts = yamlencode(var.map_accounts)
- }
-
- depends_on = [data.http.wait_for_cluster[0]]
-}
diff --git a/data.tf b/data.tf
deleted file mode 100644
index fe3b6e33c7..0000000000
--- a/data.tf
+++ /dev/null
@@ -1,104 +0,0 @@
-data "aws_partition" "current" {}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_iam_policy_document" "workers_assume_role_policy" {
- statement {
- sid = "EKSWorkerAssumeRole"
-
- actions = [
- "sts:AssumeRole",
- ]
-
- principals {
- type = "Service"
- identifiers = [local.ec2_principal]
- }
- }
-}
-
-data "aws_ami" "eks_worker" {
- count = contains(local.worker_groups_platforms, "linux") ? 1 : 0
-
- filter {
- name = "name"
- values = [local.worker_ami_name_filter]
- }
-
- most_recent = true
-
- owners = [var.worker_ami_owner_id]
-}
-
-data "aws_ami" "eks_worker_windows" {
- count = contains(local.worker_groups_platforms, "windows") ? 1 : 0
-
- filter {
- name = "name"
- values = [local.worker_ami_name_filter_windows]
- }
-
- filter {
- name = "platform"
- values = ["windows"]
- }
-
- most_recent = true
-
- owners = [var.worker_ami_owner_id_windows]
-}
-
-data "aws_iam_policy_document" "cluster_assume_role_policy" {
- statement {
- sid = "EKSClusterAssumeRole"
-
- actions = [
- "sts:AssumeRole",
- ]
-
- principals {
- type = "Service"
- identifiers = ["eks.amazonaws.com"]
- }
- }
-}
-
-data "aws_iam_role" "custom_cluster_iam_role" {
- count = var.manage_cluster_iam_resources ? 0 : 1
-
- name = var.cluster_iam_role_name
-}
-
-data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
- count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_configuration_count
-
- name = lookup(
- var.worker_groups[count.index],
- "iam_instance_profile_name",
- local.workers_group_defaults["iam_instance_profile_name"],
- )
-}
-
-data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
- count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
-
- name = lookup(
- var.worker_groups_launch_template[count.index],
- "iam_instance_profile_name",
- local.workers_group_defaults["iam_instance_profile_name"],
- )
-}
-
-data "http" "wait_for_cluster" {
- count = var.create_eks && var.manage_aws_auth ? 1 : 0
-
- url = format("%s/healthz", aws_eks_cluster.this[0].endpoint)
- ca_certificate = base64decode(local.cluster_auth_base64)
- timeout = var.wait_for_cluster_timeout
-
- depends_on = [
- aws_eks_cluster.this,
- aws_security_group_rule.cluster_private_access_sg_source,
- aws_security_group_rule.cluster_private_access_cidrs_source,
- ]
-}
diff --git a/docs/autoscaling.md b/docs/autoscaling.md
deleted file mode 100644
index 3c1aa5ee93..0000000000
--- a/docs/autoscaling.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# Autoscaling
-
-To enable worker node autoscaling you will need to do a few things:
-
-- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
-- Install the cluster-autoscaler
-- Give the cluster-autoscaler access via an IAM policy
-
-It's probably easiest to follow the example in [examples/irsa](../examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
-
-If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
-
-```hcl
-resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
- policy_arn = aws_iam_policy.worker_autoscaling.arn
- role = module.my_cluster.worker_iam_role_name
-}
-
-resource "aws_iam_policy" "worker_autoscaling" {
- name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
- description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
- policy = data.aws_iam_policy_document.worker_autoscaling.json
- path = var.iam_path
- tags = var.tags
-}
-
-data "aws_iam_policy_document" "worker_autoscaling" {
- statement {
- sid = "eksWorkerAutoscalingAll"
- effect = "Allow"
-
- actions = [
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeLaunchConfigurations",
- "autoscaling:DescribeTags",
- "ec2:DescribeLaunchTemplateVersions",
- ]
-
- resources = ["*"]
- }
-
- statement {
- sid = "eksWorkerAutoscalingOwn"
- effect = "Allow"
-
- actions = [
- "autoscaling:SetDesiredCapacity",
- "autoscaling:TerminateInstanceInAutoScalingGroup",
- "autoscaling:UpdateAutoScalingGroup",
- ]
-
- resources = ["*"]
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
- values = ["owned"]
- }
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
- values = ["true"]
- }
- }
-}
-```
-
-And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
-
-```yaml
-rbac:
- create: true
-
-cloudProvider: aws
-awsRegion: YOUR_AWS_REGION
-
-autoDiscovery:
- clusterName: YOUR_CLUSTER_NAME
- enabled: true
-
-image:
- repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
- tag: v1.16.5
-```
-
-To install the chart, simply run helm with the `--values` option:
-
-```
-helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
-```
-
-## Notes
-
-There is a variable `asg_desired_capacity` given in the `local.tf` file, currently it can be used to change the desired worker(s) capacity in the autoscaling group but currently it is being ignored in terraform to reduce the [complexities](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/510#issuecomment-531700442) and the feature of scaling up and down the cluster nodes is being handled by the cluster autoscaler.
-
-The cluster autoscaler major and minor versions must match your cluster. For example if you are running a 1.16 EKS cluster set `image.tag=v1.16.5`. Search through their [releases page](https://github.com/kubernetes/autoscaler/releases) for valid version numbers.
diff --git a/docs/enable-docker-bridge-network.md b/docs/enable-docker-bridge-network.md
deleted file mode 100644
index f6eb8ee11e..0000000000
--- a/docs/enable-docker-bridge-network.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Enable Docker Bridge Network
-
-The latest versions of the AWS EKS-optimized AMI disable the docker bridge network by default. To enable it, add the `bootstrap_extra_args` parameter to your worker group template.
-
-```hcl
-locals {
- worker_groups = [
- {
- # Other parameters omitted for brevity
- bootstrap_extra_args = "--enable-docker-bridge true"
- }
- ]
-}
-```
-
-Examples of when this would be necessary are:
-
-- You are running Continuous Integration in K8s, and building docker images by either mounting the docker sock as a volume or using docker in docker. Without the bridge enabled, internal routing from the inner container can't reach the outside world.
-
-## See More
-
-- [Docker in Docker no longer works without docker0 bridge](https://github.com/awslabs/amazon-eks-ami/issues/183)
-- [Add enable-docker-bridge bootstrap argument](https://github.com/awslabs/amazon-eks-ami/pull/187)
diff --git a/docs/faq.md b/docs/faq.md
deleted file mode 100644
index d1a5d0f4bc..0000000000
--- a/docs/faq.md
+++ /dev/null
@@ -1,235 +0,0 @@
-# Frequently Asked Questions
-
-## How do I customize X on the worker group's settings?
-
-All the options that can be customized for worker groups are listed in [local.tf](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/local.tf) under `workers_group_defaults_defaults`.
-
-Please open Issues or PRs if you think something is missing.
-
-## Why are nodes not being registered?
-
-### Networking
-
-Often caused by a networking or endpoint configuration issue.
-
-At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More about communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
-
-Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access endpoint, the nodes need outgoing internet access:
-
-- Nodes in private subnets: via a NAT gateway or instance. It will need adding along with appropriate routing rules.
-- Nodes in public subnets: assign public IPs to nodes. Set `public_ip = true` in the `worker_groups` list on this module.
-
-> Important:
-> If you apply only the public endpoint and setup `cluster_endpoint_public_access_cidrs` to restrict access, remember that EKS nodes also use the public endpoint, so you must allow access to the endpoint. If not, then your nodes will not be working correctly.
-
-Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node calls to the endpoint stay within the VPC.
-
-When the private endpoint is enabled ensure that VPC DNS resolution and hostnames are also enabled:
-
-- If managing the VPC with Terraform: set `enable_dns_hostnames = true` and `enable_dns_support = true` on the `aws_vpc` resource. The [`terraform-aws-module/vpc/aws`](https://github.com/terraform-aws-modules/terraform-aws-vpc/) community module also has these variables.
-- Otherwise refer to the [AWS VPC docs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-updating) and [AWS EKS Cluster Endpoint Access docs](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for more information.
-
-Nodes need to be able to connect to other AWS services plus pull down container images from repos. If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
-
-### `aws-auth` ConfigMap not present
-
-The module configures the `aws-auth` ConfigMap. This is used by the cluster to grant IAM users and roles RBAC permissions in the cluster, like the IAM role assigned to the worker nodes.
-
-Confirm that the ConfigMap matches the contents of the `config_map_aws_auth` module output. You can retrieve the live config by running the following in your terraform folder:
-`kubectl --kubeconfig=kubeconfig_* -n kube-system get cm aws-auth -o yaml`
-
-If the ConfigMap is missing or the contents are incorrect then ensure that you have properly configured the kubernetes provider block by referring to [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) and run `terraform apply` again.
-
-Users with `manage_aws_auth = false` will need to apply the ConfigMap themselves.
-
-## How can I work with the cluster if I disable the public endpoint?
-
-You have to interact with the cluster from within the VPC that it's associated with, from an instance that's allowed access via the cluster's security group.
-
-Creating a new cluster with the public endpoint disabled is harder to achieve. You will either want to pass in a pre-configured cluster security group or apply the `aws-auth` configmap in a separate action.
-
-## ConfigMap "aws-auth" already exists
-
-This can happen if the kubernetes provider has not been configured for use with the cluster. The kubernetes provider will be accessing your default kubernetes cluster which already has the map defined. Read [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details on how to configure the kubernetes provider correctly.
-
-Users upgrading from modules before 8.0.0 will need to import their existing aws-auth ConfigMap in to the terraform state. See 8.0.0's [CHANGELOG](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/v8.0.0/CHANGELOG.md#v800---2019-12-11) for more details.
-
-## `Error: Get http://localhost/api/v1/namespaces/kube-system/configmaps/aws-auth: dial tcp 127.0.0.1:80: connect: connection refused`
-
-Usually this means that the kubernetes provider has not been configured, there is no default `~/.kube/config` and so the kubernetes provider is attempting to talk to localhost.
-
-You need to configure the kubernetes provider correctly. See [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details.
-
-## How can I stop Terraform from removing the EKS tags from my VPC and subnets?
-
-You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
-
-An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore\_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
-
-## How do I safely remove old worker groups?
-
-You've added new worker groups. Deleting worker groups from earlier in the list causes Terraform to want to recreate all worker groups. This is a limitation with how Terraform works and the module using `count` to create the ASGs and other resources.
-
-The safest and easiest option is to set `asg_min_size` and `asg_max_size` to 0 on the worker groups to "remove".
-
-## Why does changing the node or worker group's desired count not do anything?
-
-The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block.
-
-The setting is ignored to allow the cluster autoscaler to work correctly so that `terraform apply` does not accidentally remove running workers.
-
-You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
-
-If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_capacity` and `max_capacity` for node groups or `asg_min_size` and `asg_max_size` for worker groups. Before changing those values, you must satisfy AWS `desired` capacity constraints (which must be between new min/max values).
-
-When you scale down AWS will remove a random instance, so you will have to weigh the risks here.
-
-## Why are nodes not recreated when the `launch_configuration`/`launch_template` is recreated?
-
-By default the ASG is not configured to be recreated when the launch configuration or template changes. Terraform spins up new instances and then deletes all the old instances in one go as the AWS provider team have refused to implement rolling updates of autoscaling groups. This is not good for kubernetes stability.
-
-You need to use a process to drain and cycle the workers.
-
-You are not using the cluster autoscaler:
-
-- Add a new instance
-- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
-- Wait for pods to be Running
-- Terminate the old node instance. ASG will start a new instance
-- Repeat the drain and delete process until all old nodes are replaced
-
-You are using the cluster autoscaler:
-
-- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
-- Wait for pods to be Running
-- Cluster autoscaler will create new nodes when required
-- Repeat until all old nodes are drained
-- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
-
-You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
-
-## How do I create kubernetes resources when creating the cluster?
-
-You do not need to do anything extra since v12.1.0 of the module as long as the following conditions are met:
-
-- `manage_aws_auth = true` on the module (default)
-- the kubernetes provider is correctly configured like in the [Usage Example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/README.md#usage-example). Primarily the module's `cluster_id` output is used as input to the `aws_eks_cluster*` data sources.
-
-The `cluster_id` depends on a `data.http.wait_for_cluster` that polls the EKS cluster's endpoint until it is alive. This blocks initialisation of the kubernetes provider.
-
-## `aws_auth.tf: At 2:14: Unknown token: 2:14 IDENT`
-
-You are attempting to use a Terraform 0.12 module with Terraform 0.11.
-
-We highly recommend that you upgrade your EKS Terraform config to 0.12 to take advantage of new features in the module.
-
-Alternatively you can lock your module to a compatible version if you must stay with terraform 0.11:
-
-```hcl
-module "eks" {
- source = "terraform-aws-modules/eks/aws"
- version = "~> 4.0"
- # ...
-}
-```
-
-## How can I use Windows workers?
-
-To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
-
-Windows worker nodes requires additional cluster role (eks:kube-proxy-windows). If you are adding windows workers to existing cluster, you should apply config-map-aws-auth again.
-
-#### Example configuration
-
-Amazon EKS clusters must contain one or more Linux worker nodes to run core system pods that only run on Linux, such as coredns and the VPC resource controller.
-
-1. Build AWS EKS cluster with the next workers configuration (default Linux):
-
-```hcl
-worker_groups = [
- {
- name = "worker-group-linux"
- instance_type = "m5.large"
- platform = "linux"
- asg_desired_capacity = 2
- },
- ]
-```
-
-2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
-3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
-
-```hcl
-worker_groups = [
- {
- name = "worker-group-linux"
- instance_type = "m5.large"
- platform = "linux"
- asg_desired_capacity = 2
- },
- {
- name = "worker-group-windows"
- instance_type = "m5.large"
- platform = "windows"
- asg_desired_capacity = 1
- },
- ]
-```
-
-4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
-
-## Worker nodes with labels do not join a 1.16+ cluster
-
-Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16.
-
-Older configurations used labels like `kubernetes.io/lifecycle=spot` and this is no longer allowed. Use `node.kubernetes.io/lifecycle=spot` instead.
-
-Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
-
-## What is the difference between `node_groups` and `worker_groups`?
-
-`node_groups` are [AWS-managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) (configures "Node Groups" that you can find on the EKS dashboard). This system is supposed to ease some of the lifecycle around upgrading nodes. Although they do not do this automatically and you still need to manually trigger the updates.
-
-`worker_groups` are [self-managed nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) (provisions a typical "Autoscaling group" on EC2). It gives you full control over nodes in the cluster like using custom AMI for the nodes. As AWS says, "with worker groups the customer controls the data plane & AWS controls the control plane".
-
-Both can be used together in the same cluster.
-
-## I'm using both AWS-Managed node groups and Self-Managed worker groups and pods scheduled on a AWS Managed node groups are unable resolve DNS (even communication between pods)
-
-This happen because Core DNS can be scheduled on Self-Managed worker groups and by default, the terraform module doesn't create security group rules to ensure communication between pods schedulled on Self-Managed worker group and AWS-Managed node groups.
-
-You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules.
-
-## Dedicated control plane subnets
-
-[AWS recommends](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) to create dedicated subnets for EKS created network interfaces (control plane). The module fully supports this approach. To set up this, you must configure the module by adding additional `subnets` into workers default specification `workers_group_defaults` map or directly `subnets` definition in worker definition.
-
-```hcl
-module "eks" {
- source = "terraform-aws-modules/eks/aws"
-
- cluster_version = "1.21"
- cluster_name = "my-cluster"
- vpc_id = "vpc-1234556abcdef"
- subnets = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
-
- workers_group_defaults = {
- subnets = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
- }
-
- worker_groups = [
- {
- instance_type = "m4.large"
- asg_max_size = 5
- },
- {
- name = "worker-group-2"
- subnets = ["subnet-qwer123"]
- instance_type = "t3.medium"
- asg_desired_capacity = 1
- public_ip = true
- ebs_optimized = true
- }
- ]
-}
-```
\ No newline at end of file
diff --git a/docs/iam-permissions.md b/docs/iam-permissions.md
deleted file mode 100644
index e6a867a9ec..0000000000
--- a/docs/iam-permissions.md
+++ /dev/null
@@ -1,155 +0,0 @@
-# IAM Permissions
-
-Following IAM permissions are the minimum permissions needed for your IAM user or IAM role to create an EKS cluster.
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "VisualEditor0",
- "Effect": "Allow",
- "Action": [
- "autoscaling:AttachInstances",
- "autoscaling:CreateAutoScalingGroup",
- "autoscaling:CreateLaunchConfiguration",
- "autoscaling:CreateOrUpdateTags",
- "autoscaling:DeleteAutoScalingGroup",
- "autoscaling:DeleteLaunchConfiguration",
- "autoscaling:DeleteTags",
- "autoscaling:Describe*",
- "autoscaling:DetachInstances",
- "autoscaling:SetDesiredCapacity",
- "autoscaling:UpdateAutoScalingGroup",
- "autoscaling:SuspendProcesses",
- "ec2:AllocateAddress",
- "ec2:AssignPrivateIpAddresses",
- "ec2:Associate*",
- "ec2:AttachInternetGateway",
- "ec2:AttachNetworkInterface",
- "ec2:AuthorizeSecurityGroupEgress",
- "ec2:AuthorizeSecurityGroupIngress",
- "ec2:CreateDefaultSubnet",
- "ec2:CreateDhcpOptions",
- "ec2:CreateEgressOnlyInternetGateway",
- "ec2:CreateInternetGateway",
- "ec2:CreateNatGateway",
- "ec2:CreateNetworkInterface",
- "ec2:CreateRoute",
- "ec2:CreateRouteTable",
- "ec2:CreateSecurityGroup",
- "ec2:CreateSubnet",
- "ec2:CreateTags",
- "ec2:CreateVolume",
- "ec2:CreateVpc",
- "ec2:CreateVpcEndpoint",
- "ec2:DeleteDhcpOptions",
- "ec2:DeleteEgressOnlyInternetGateway",
- "ec2:DeleteInternetGateway",
- "ec2:DeleteNatGateway",
- "ec2:DeleteNetworkInterface",
- "ec2:DeleteRoute",
- "ec2:DeleteRouteTable",
- "ec2:DeleteSecurityGroup",
- "ec2:DeleteSubnet",
- "ec2:DeleteTags",
- "ec2:DeleteVolume",
- "ec2:DeleteVpc",
- "ec2:DeleteVpnGateway",
- "ec2:Describe*",
- "ec2:DetachInternetGateway",
- "ec2:DetachNetworkInterface",
- "ec2:DetachVolume",
- "ec2:Disassociate*",
- "ec2:ModifySubnetAttribute",
- "ec2:ModifyVpcAttribute",
- "ec2:ModifyVpcEndpoint",
- "ec2:ReleaseAddress",
- "ec2:RevokeSecurityGroupEgress",
- "ec2:RevokeSecurityGroupIngress",
- "ec2:UpdateSecurityGroupRuleDescriptionsEgress",
- "ec2:UpdateSecurityGroupRuleDescriptionsIngress",
- "ec2:CreateLaunchTemplate",
- "ec2:CreateLaunchTemplateVersion",
- "ec2:DeleteLaunchTemplate",
- "ec2:DeleteLaunchTemplateVersions",
- "ec2:DescribeLaunchTemplates",
- "ec2:DescribeLaunchTemplateVersions",
- "ec2:GetLaunchTemplateData",
- "ec2:ModifyLaunchTemplate",
- "ec2:RunInstances",
- "eks:CreateCluster",
- "eks:DeleteCluster",
- "eks:DescribeCluster",
- "eks:ListClusters",
- "eks:UpdateClusterConfig",
- "eks:UpdateClusterVersion",
- "eks:DescribeUpdate",
- "eks:TagResource",
- "eks:UntagResource",
- "eks:ListTagsForResource",
- "eks:CreateFargateProfile",
- "eks:DeleteFargateProfile",
- "eks:DescribeFargateProfile",
- "eks:ListFargateProfiles",
- "eks:CreateNodegroup",
- "eks:DeleteNodegroup",
- "eks:DescribeNodegroup",
- "eks:ListNodegroups",
- "eks:UpdateNodegroupConfig",
- "eks:UpdateNodegroupVersion",
- "iam:AddRoleToInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:CreateInstanceProfile",
- "iam:CreateOpenIDConnectProvider",
- "iam:CreateServiceLinkedRole",
- "iam:CreatePolicy",
- "iam:CreatePolicyVersion",
- "iam:CreateRole",
- "iam:DeleteInstanceProfile",
- "iam:DeleteOpenIDConnectProvider",
- "iam:DeletePolicy",
- "iam:DeletePolicyVersion",
- "iam:DeleteRole",
- "iam:DeleteRolePolicy",
- "iam:DeleteServiceLinkedRole",
- "iam:DetachRolePolicy",
- "iam:GetInstanceProfile",
- "iam:GetOpenIDConnectProvider",
- "iam:GetPolicy",
- "iam:GetPolicyVersion",
- "iam:GetRole",
- "iam:GetRolePolicy",
- "iam:List*",
- "iam:PassRole",
- "iam:PutRolePolicy",
- "iam:RemoveRoleFromInstanceProfile",
- "iam:TagOpenIDConnectProvider",
- "iam:TagRole",
- "iam:UntagRole",
- "iam:TagPolicy",
- "iam:TagInstanceProfile",
- "iam:UpdateAssumeRolePolicy",
- // Following permissions are needed if cluster_enabled_log_types is enabled
- "logs:CreateLogGroup",
- "logs:DescribeLogGroups",
- "logs:DeleteLogGroup",
- "logs:ListTagsLogGroup",
- "logs:PutRetentionPolicy",
- // Following permissions for working with secrets_encryption example
- "kms:CreateAlias",
- "kms:CreateGrant",
- "kms:CreateKey",
- "kms:DeleteAlias",
- "kms:DescribeKey",
- "kms:GetKeyPolicy",
- "kms:GetKeyRotationStatus",
- "kms:ListAliases",
- "kms:ListResourceTags",
- "kms:ScheduleKeyDeletion"
- ],
- "Resource": "*"
- }
- ]
-}
-```
diff --git a/docs/spot-instances.md b/docs/spot-instances.md
deleted file mode 100644
index be89f52f87..0000000000
--- a/docs/spot-instances.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# Using spot instances
-
-Spot instances usually cost around 30-70% less than an on-demand instance. So using them for your EKS workloads can save a lot of money but requires some special considerations as they could be terminated with only 2 minutes warning.
-
-You need to install a daemonset to catch the 2 minute warning before termination. This will ensure the node is gracefully drained before termination. You can install the [k8s-spot-termination-handler](https://github.com/kube-aws/kube-spot-termination-notice-handler) for this. There's a [Helm chart](https://github.com/helm/charts/tree/master/stable/k8s-spot-termination-handler):
-
-```shell
-helm install stable/k8s-spot-termination-handler --namespace kube-system
-```
-
-In the following examples at least 1 worker group that uses on-demand instances is included. This worker group has an added node label that can be used in scheduling. This could be used to schedule any workload not suitable for spot instances but is important for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) as it might be end up unscheduled when spot instances are terminated. You can add this to the values of the [cluster-autoscaler helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart):
-
-```yaml
-nodeSelector:
- kubernetes.io/lifecycle: normal
-```
-
-Notes:
-
-- The `spot_price` is set to the on-demand price so that the spot instances will run as long as they are the cheaper.
-- It's best to have a broad range of instance types to ensure there's always some instances to run when prices fluctuate.
-- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
-- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
-
-## Using Launch Configuration
-
-Example worker group configuration that uses an ASG with launch configuration for each worker group:
-
-```hcl
- worker_groups = [
- {
- name = "on-demand-1"
- instance_type = "m4.xlarge"
- asg_max_size = 1
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=normal"
- suspended_processes = ["AZRebalance"]
- },
- {
- name = "spot-1"
- spot_price = "0.199"
- instance_type = "c4.xlarge"
- asg_max_size = 20
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
- suspended_processes = ["AZRebalance"]
- },
- {
- name = "spot-2"
- spot_price = "0.20"
- instance_type = "m4.xlarge"
- asg_max_size = 20
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
- suspended_processes = ["AZRebalance"]
- }
- ]
-```
-
-## Using Launch Templates
-
-Launch Template support is a recent addition to both AWS and this module. It might not be as tried and tested but it's more suitable for spot instances as it allowed multiple instance types in the same worker group:
-
-```hcl
- worker_groups = [
- {
- name = "on-demand-1"
- instance_type = "m4.xlarge"
- asg_max_size = 10
- kubelet_extra_args = "--node-labels=spot=false"
- suspended_processes = ["AZRebalance"]
- }
- ]
-
-
- worker_groups_launch_template = [
- {
- name = "spot-1"
- override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
- spot_instance_pools = 4
- asg_max_size = 5
- asg_desired_capacity = 5
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
- public_ip = true
- },
- ]
-```
-
-## Using Launch Templates With Both Spot and On Demand
-
-Example launch template to launch 2 on demand instances of type m5.large, and have the ability to scale up using spot instances and on demand instances. The `node.kubernetes.io/lifecycle` node label will be set to the value queried from the EC2 meta-data service: either "on-demand" or "spot".
-
-`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
-
-```hcl
- worker_groups_launch_template = [{
- name = "mixed-demand-spot"
- override_instance_types = ["m5.large", "m5a.large", "m4.large"]
- root_encrypted = true
- root_volume_size = 50
-
- asg_min_size = 2
- asg_desired_capacity = 2
- on_demand_base_capacity = 3
- on_demand_percentage_above_base_capacity = 25
- asg_max_size = 20
- spot_instance_pools = 3
-
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`"
- }]
-```
-
-## Important Notes
-
-An issue with the cluster-autoscaler: https://github.com/kubernetes/autoscaler/issues/1133
-
-AWS have released their own termination handler now: https://github.com/aws/aws-node-termination-handler
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
deleted file mode 100644
index 88d9d4ee14..0000000000
--- a/examples/bottlerocket/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# AWS EKS cluster running Bottlerocket AMI
-
-Configuration in this directory creates EKS cluster with workers group running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket)
-
-This is a minimalistic example which shows what knobs to turn to make Bottlerocket work.
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-| [tls](#requirement\_tls) | >= 2.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-| [tls](#provider\_tls) | >= 2.0 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_key_pair.nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
-| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [node\_groups](#output\_node\_groups) | Outputs from node groups |
-
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
deleted file mode 100644
index bffa83238b..0000000000
--- a/examples/bottlerocket/main.tf
+++ /dev/null
@@ -1,159 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "bottlerocket-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnets = [module.vpc.private_subnets[2]]
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- write_kubeconfig = false
- manage_aws_auth = true
-
- worker_groups_launch_template = [
- {
- name = "bottlerocket-nodes"
- ami_id = data.aws_ami.bottlerocket_ami.id
- instance_type = "t3a.small"
- asg_desired_capacity = 2
- key_name = aws_key_pair.nodes.key_name
-
- # Since we are using default VPC there is no NAT gateway so we need to
- # attach public ip to nodes so they can reach k8s API server
- # do not repeat this at home (i.e. production)
- public_ip = true
-
- # This section overrides default userdata template to pass bottlerocket
- # specific user data
- userdata_template_file = "${path.module}/userdata.toml"
- # we are using this section to pass additional arguments for
- # userdata template rendering
- userdata_template_extra_args = {
- enable_admin_container = false
- enable_control_container = true
- aws_region = data.aws_region.current.name
- }
- # example of k8s/kubelet configuration via additional_userdata
- additional_userdata = < [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [null](#requirement\_null) | >= 3.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [null](#provider\_null) | >= 3.0 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
-| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | n/a |
-| [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | n/a |
+| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
+| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a |
+| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
| [eks](#module\_eks) | ../.. | n/a |
+| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
+| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a |
+| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
| Name | Type |
|------|------|
-| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.worker_group_mgmt_one](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.worker_group_mgmt_two](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
@@ -66,9 +75,25 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [node\_groups](#output\_node\_groups) | Outputs from node groups |
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 5cfcab668b..ff32473737 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "complete-${random_string.suffix.result}"
- cluster_version = "1.20"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+ cluster_version = "1.21"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -15,87 +21,102 @@ locals {
module "eks" {
source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnets = [module.vpc.private_subnets[2]]
-
+ cluster_name = local.name
+ cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
- worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
-
- # Worker groups (using Launch Configurations)
- worker_groups = [
- {
- name = "worker-group-1"
- instance_type = "t3.small"
- additional_userdata = "echo foo bar"
- asg_desired_capacity = 2
- additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
- },
- {
- name = "worker-group-2"
- instance_type = "t3.medium"
- additional_userdata = "echo foo bar"
- additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
- asg_desired_capacity = 1
- },
- ]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- # Worker groups (using Launch Templates)
- worker_groups_launch_template = [
- {
- name = "spot-1"
- override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
- spot_instance_pools = 4
- asg_max_size = 5
- asg_desired_capacity = 5
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
- public_ip = true
- },
- ]
+ enable_irsa = true
- # Managed Node Groups
- node_groups_defaults = {
- ami_type = "AL2_x86_64"
- disk_size = 50
+ # Self Managed Node Group(s)
+ self_managed_node_group_defaults = {
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
- node_groups = {
- example = {
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
+ self_managed_node_groups = {
+ spot = {
+ instance_type = "m5.large"
+ instance_market_options = {
+ market_type = "spot"
+ }
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ cd /tmp
+ sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl enable amazon-ssm-agent
+ sudo systemctl start amazon-ssm-agent
+ EOT
+ }
+ }
+
+ # EKS Managed Node Group(s)
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ }
+
+ eks_managed_node_groups = {
+ blue = {}
+ green = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
instance_types = ["t3.large"]
capacity_type = "SPOT"
- k8s_labels = {
+ labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
- additional_tags = {
- ExtraTag = "example"
- }
- taints = [
- {
+
+ taints = {
+ dedicated = {
key = "dedicated"
value = "gpuGroup"
effect = "NO_SCHEDULE"
}
- ]
+ }
+
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
+
+ tags = {
+ ExtraTag = "example"
+ }
}
}
- # Fargate
+ # Fargate Profile(s)
fargate_profiles = {
default = {
name = "default"
@@ -122,38 +143,59 @@ module "eks" {
}
}
- # AWS Auth (kubernetes_config_map)
- map_roles = [
- {
- rolearn = "arn:aws:iam::66666666666:role/role1"
- username = "role1"
- groups = ["system:masters"]
- },
- ]
+ tags = local.tags
+}
- map_users = [
- {
- userarn = "arn:aws:iam::66666666666:user/user1"
- username = "user1"
- groups = ["system:masters"]
- },
- {
- userarn = "arn:aws:iam::66666666666:user/user2"
- username = "user2"
- groups = ["system:masters"]
- },
- ]
+################################################################################
+# Sub-Module Usage on Existing/Separate Cluster
+################################################################################
+
+module "eks_managed_node_group" {
+ source = "../../modules/eks-managed-node-group"
+
+ name = "separate-eks-mng"
+ cluster_name = module.eks.cluster_id
+ cluster_version = local.cluster_version
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ tags = merge(local.tags, { Separate = "eks-managed-node-group" })
+}
- map_accounts = [
- "777777777777",
- "888888888888",
+module "self_managed_node_group" {
+ source = "../../modules/self-managed-node-group"
+
+ name = "separate-self-mng"
+ cluster_name = module.eks.cluster_id
+ cluster_version = local.cluster_version
+ cluster_endpoint = module.eks.cluster_endpoint
+ cluster_auth_base64 = module.eks.cluster_certificate_authority_data
+
+ instance_type = "m5.large"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+ vpc_security_group_ids = [
+ module.eks.cluster_primary_security_group_id,
+ module.eks.cluster_security_group_id,
]
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = merge(local.tags, { Separate = "self-managed-node-group" })
+}
+
+module "fargate_profile" {
+ source = "../../modules/fargate-profile"
+
+ name = "separate-fargate-profile"
+ cluster_name = module.eks.cluster_id
+
+ subnet_ids = module.vpc.private_subnets
+ selectors = [{
+ namespace = "kube-system"
+ }]
+
+ tags = merge(local.tags, { Separate = "fargate-profile" })
}
################################################################################
@@ -163,128 +205,159 @@ module "eks" {
module "disabled_eks" {
source = "../.."
- create_eks = false
+ create = false
+}
+
+module "disabled_fargate_profile" {
+ source = "../../modules/fargate-profile"
+
+ create = false
}
-module "disabled_fargate" {
- source = "../../modules/fargate"
+module "disabled_eks_managed_node_group" {
+ source = "../../modules/eks-managed-node-group"
- create_fargate_pod_execution_role = false
+ create = false
}
-module "disabled_node_groups" {
- source = "../../modules/node_groups"
+module "disabled_self_managed_node_group" {
+ source = "../../modules/self-managed-node-group"
- create_eks = false
+ create = false
}
################################################################################
-# Kubernetes provider configuration
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
################################################################################
-data "aws_eks_cluster" "cluster" {
+data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = module.eks.cluster_id
+ cluster = {
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = module.eks.cluster_id
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = data.aws_eks_cluster_auth.this.token
+ }
+ }]
+ })
+
+ # we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules
+ aws_auth_configmap_yaml = <<-EOT
+ ${chomp(module.eks.aws_auth_configmap_yaml)}
+ - rolearn: ${module.eks_managed_node_group.iam_role_arn}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - rolearn: ${module.self_managed_node_group.iam_role_arn}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - rolearn: ${module.fargate_profile.fargate_profile_arn}
+ username: system:node:{{SessionName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - system:node-proxier
+ EOT
}
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
+resource "null_resource" "patch" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = "kubectl patch configmap/aws-auth --patch \"${local.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
}
################################################################################
-# Additional security groups for workers
+# Supporting resources
################################################################################
-resource "aws_security_group" "worker_group_mgmt_one" {
- name_prefix = "worker_group_mgmt_one"
- vpc_id = module.vpc.vpc_id
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 3.0"
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
+ name = local.name
+ cidr = "10.0.0.0/16"
- cidr_blocks = [
- "10.0.0.0/8",
- ]
- }
-}
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-resource "aws_security_group" "worker_group_mgmt_two" {
- name_prefix = "worker_group_mgmt_two"
- vpc_id = module.vpc.vpc_id
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ enable_dns_hostnames = true
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
- cidr_blocks = [
- "192.168.0.0/16",
- ]
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/elb" = 1
}
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
}
-resource "aws_security_group" "all_worker_mgmt" {
- name_prefix = "all_worker_management"
+resource "aws_security_group" "additional" {
+ name_prefix = "${local.name}-additional"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
-
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
-}
-
-################################################################################
-# Supporting resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-resource "random_string" "suffix" {
- length = 8
- special = false
+ tags = local.tags
}
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf
index 10a3a96604..8a9153c099 100644
--- a/examples/complete/outputs.tf
+++ b/examples/complete/outputs.tf
@@ -1,24 +1,148 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
}
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
}
-output "node_groups" {
- description = "Outputs from node groups"
- value = module.eks.node_groups
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
}
diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf
index 8e2b837984..adfd0180d4 100644
--- a/examples/complete/versions.tf
+++ b/examples/complete/versions.tf
@@ -4,19 +4,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56"
+ version = ">= 3.64"
}
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
}
}
}
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
new file mode 100644
index 0000000000..b23c13d3f1
--- /dev/null
+++ b/examples/eks_managed_node_group/README.md
@@ -0,0 +1,95 @@
+# EKS Managed Node Group Example
+
+Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configuring/customizing:
+
+- A default, "out of the box" EKS managed node group as supplied by AWS EKS
+- A default, "out of the box" Bottlerocket EKS managed node group as supplied by AWS EKS
+- A Bottlerocket EKS managed node group that supplies additional bootstrap settings
+- A Bottlerocket EKS managed node group that demonstrates many of the configuration/customizations offered by the `eks-managed-node-group` sub-module for the Bottlerocket OS
+- An EKS managed node group created from a launch template created outside of the module
+- An EKS managed node group that utilizes a custom AMI that is an EKS optimized AMI derivative
+- An EKS managed node group that demonstrates nearly all of the configurations/customizations offered by the `eks-managed-node-group` sub-module
+
+See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [null](#requirement\_null) | >= 3.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+| [null](#provider\_null) | >= 3.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks](#module\_eks) | ../.. | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
new file mode 100644
index 0000000000..fb801c1eb1
--- /dev/null
+++ b/examples/eks_managed_node_group/main.tf
@@ -0,0 +1,490 @@
+provider "aws" {
+ region = local.region
+}
+
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+ cluster_version = "1.21"
+ region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+data "aws_caller_identity" "current" {}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+ source = "../.."
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_service_ipv4_cidr = "172.16.0.0/16"
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ enable_irsa = true
+
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ }
+
+ eks_managed_node_groups = {
+ # Default node group - as provided by AWS EKS
+ default_node_group = {}
+
+ # Default node group - as provided by AWS EKS using Bottlerocket
+ bottlerocket_default = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+ }
+
+ # Adds to the AWS provided user data
+ bottlerocket_add = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+
+ # this will get added to what AWS provides
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
+
+ # Custom AMI, using module provided bootstrap data
+ bottlerocket_custom = {
+ # Current bottlerocket AMI
+ ami_id = "ami-0ff61e0bcfc81dc94"
+ platform = "bottlerocket"
+
+ # use module user data template to boostrap
+ enable_bootstrap_user_data = true
+ # this will get added to the template
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+
+ [settings.kubernetes.node-labels]
+ "label1" = "foo"
+ "label2" = "bar"
+
+ [settings.kubernetes.node-taints]
+ "dedicated" = "experimental:PreferNoSchedule"
+ "special" = "true:NoSchedule"
+ EOT
+ }
+
+ # Use existing/external launch template
+ external_lt = {
+ create_launch_template = false
+ launch_template_name = aws_launch_template.external.name
+ launch_template_version = aws_launch_template.external.default_version
+ }
+
+ # Use a custom AMI
+ custom_ami = {
+ # Current default AMI used by managed node groups - pseudo "custom"
+ ami_id = "ami-0caf35bc73450c396"
+
+ # This will ensure the boostrap user data is used to join the node
+ # By default, EKS managed node groups will not append bootstrap script;
+ # this adds it back in using the default template provided by the module
+ # Note: this assumes the AMI provided is an EKS optimized AMI derivative
+ enable_bootstrap_user_data = true
+ }
+
+ # Complete
+ complete = {
+ name = "complete-eks-mng"
+ use_name_prefix = false
+
+ subnet_ids = module.vpc.private_subnets
+
+ min_size = 1
+ max_size = 7
+ desired_size = 1
+
+ ami_id = "ami-0caf35bc73450c396"
+ enable_bootstrap_user_data = true
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+
+ post_bootstrap_user_data = <<-EOT
+ echo "you are free little kubelet!"
+ EOT
+
+ capacity_type = "SPOT"
+ disk_size = 256
+ force_update_version = true
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large", "m3.large", "m4.large"]
+ labels = {
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = [
+ {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ ]
+
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
+
+ description = "EKS managed node group example launch template"
+
+ ebs_optimized = true
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ disable_api_termination = false
+ enable_monitoring = true
+
+ block_device_mappings = {
+ xvda = {
+ device_name = "/dev/xvda"
+ ebs = {
+ volume_size = 75
+ volume_type = "gp3"
+ iops = 3000
+ throughput = 150
+ encrypted = true
+ kms_key_id = aws_kms_key.ebs.arn
+ delete_on_termination = true
+ }
+ }
+ }
+
+ metadata_options = {
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ http_put_response_hop_limit = 2
+ }
+
+ create_iam_role = true
+ iam_role_name = "eks-managed-node-group-complete-example"
+ iam_role_use_name_prefix = false
+ iam_role_description = "EKS managed node group complete example role"
+ iam_role_tags = {
+ Purpose = "Protector of the kubelet"
+ }
+ iam_role_additional_policies = [
+ "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
+ ]
+
+ create_security_group = true
+ security_group_name = "eks-managed-node-group-complete-example"
+ security_group_use_name_prefix = false
+ security_group_description = "EKS managed node group complete example security group"
+ security_group_rules = {
+ phoneOut = {
+ description = "Hello CloudFlare"
+ protocol = "udp"
+ from_port = 53
+ to_port = 53
+ type = "egress"
+ cidr_blocks = ["1.1.1.1/32"]
+ }
+ phoneHome = {
+ description = "Hello cluster"
+ protocol = "udp"
+ from_port = 53
+ to_port = 53
+ type = "egress"
+ source_cluster_security_group = true # bit of reflection lookup
+ }
+ }
+ security_group_tags = {
+ Purpose = "Protector of the kubelet"
+ }
+
+ tags = {
+ ExtraTag = "EKS managed node group complete example"
+ }
+ }
+ }
+
+ tags = local.tags
+}
+
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = module.eks.cluster_id
+ cluster = {
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = module.eks.cluster_id
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = data.aws_eks_cluster_auth.this.token
+ }
+ }]
+ })
+}
+
+resource "null_resource" "patch" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ cidr = "10.0.0.0/16"
+
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ enable_dns_hostnames = true
+
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
+
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
+}
+
+resource "aws_security_group" "additional" {
+ name_prefix = "${local.name}-additional"
+ vpc_id = module.vpc.vpc_id
+
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = [
+ "10.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ ]
+ }
+
+ tags = local.tags
+}
+
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
+
+resource "aws_kms_key" "ebs" {
+ description = "Customer managed key to encrypt EKS managed node group volumes"
+ deletion_window_in_days = 7
+ policy = data.aws_iam_policy_document.ebs.json
+}
+
+# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
+data "aws_iam_policy_document" "ebs" {
+ # Copy of default KMS policy that lets you manage it
+ statement {
+ sid = "Enable IAM User Permissions"
+ actions = ["kms:*"]
+ resources = ["*"]
+
+ principals {
+ type = "AWS"
+ identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
+ }
+ }
+
+ # Required for EKS
+ statement {
+ sid = "Allow service-linked role use of the CMK"
+ actions = [
+ "kms:Encrypt",
+ "kms:Decrypt",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:DescribeKey"
+ ]
+ resources = ["*"]
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
+ module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
+ ]
+ }
+ }
+
+ statement {
+ sid = "Allow attachment of persistent resources"
+ actions = ["kms:CreateGrant"]
+ resources = ["*"]
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
+ module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
+ ]
+ }
+
+ condition {
+ test = "Bool"
+ variable = "kms:GrantIsForAWSResource"
+ values = ["true"]
+ }
+ }
+}
+
+# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
+# there are several more options one could set but you probably dont need to modify them
+# you can take the default and add your custom AMI and/or custom tags
+#
+# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
+# then the default user-data for bootstrapping a cluster is merged in the copy.
+
+resource "aws_launch_template" "external" {
+ name_prefix = "external-eks-ex-"
+ description = "EKS managed node group external launch template"
+ update_default_version = true
+
+ block_device_mappings {
+ device_name = "/dev/xvda"
+
+ ebs {
+ volume_size = 100
+ volume_type = "gp2"
+ delete_on_termination = true
+ }
+ }
+
+ monitoring {
+ enabled = true
+ }
+
+ network_interfaces {
+ associate_public_ip_address = false
+ delete_on_termination = true
+ }
+
+ # if you want to use a custom AMI
+ # image_id = var.ami_id
+
+ # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
+ # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
+ # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
+ # user_data = base64encode(data.template_file.launch_template_userdata.rendered)
+
+ tag_specifications {
+ resource_type = "instance"
+
+ tags = {
+ CustomTag = "Instance custom tag"
+ }
+ }
+
+ tag_specifications {
+ resource_type = "volume"
+
+ tags = {
+ CustomTag = "Volume custom tag"
+ }
+ }
+
+ tag_specifications {
+ resource_type = "network-interface"
+
+ tags = {
+ CustomTag = "EKS example"
+ }
+ }
+
+ tags = {
+ CustomTag = "Launch template custom tag"
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf
new file mode 100644
index 0000000000..3e9620157b
--- /dev/null
+++ b/examples/eks_managed_node_group/outputs.tf
@@ -0,0 +1,167 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+ description = "Endpoint for your Kubernetes API server"
+ value = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+ value = module.eks.cluster_primary_security_group_id
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+ description = "ID of the cluster security group"
+ value = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the node shared security group"
+ value = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+ description = "ID of the node shared security group"
+ value = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/bottlerocket/variables.tf b/examples/eks_managed_node_group/variables.tf
similarity index 100%
rename from examples/bottlerocket/variables.tf
rename to examples/eks_managed_node_group/variables.tf
diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf
new file mode 100644
index 0000000000..adfd0180d4
--- /dev/null
+++ b/examples/eks_managed_node_group/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.64"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+ }
+ }
+}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
deleted file mode 100644
index 1228f5c4c1..0000000000
--- a/examples/fargate/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# AWS EKS Cluster with Fargate profiles
-
-Configuration in this directory creates EKS cluster with Fargate profiles in two different ways:
-
-- Using a root module, where EKS Cluster and Fargate profiles should be created at once. This is the default behaviour for most users.
-- Using `modules/fargate` submodule where Fargate profiles should be attached to the existing EKS Cluster.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-| [tls](#requirement\_tls) | >= 2.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Outputs from node groups |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
deleted file mode 100644
index 09e2e57e2d..0000000000
--- a/examples/fargate/main.tf
+++ /dev/null
@@ -1,235 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "fargate-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnets = [module.vpc.private_subnets[2]]
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- # You require a node group to schedule coredns which is critical for running correctly internal DNS.
- # If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
- # available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
- node_groups = {
- example = {
- desired_capacity = 1
-
- instance_types = ["t3.large"]
- k8s_labels = {
- Example = "managed_node_groups"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example"
- }
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
- }
-
- fargate_profiles = {
- default = {
- name = "default"
- selectors = [
- {
- namespace = "kube-system"
- labels = {
- k8s-app = "kube-dns"
- }
- },
- {
- namespace = "default"
- labels = {
- WorkerType = "fargate"
- }
- }
- ]
-
- tags = {
- Owner = "default"
- }
-
- timeouts = {
- create = "20m"
- delete = "20m"
- }
- }
-
- secondary = {
- name = "secondary"
- selectors = [
- {
- namespace = "default"
- labels = {
- Environment = "test"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- }
- ]
-
- # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
- subnets = [module.vpc.private_subnets[1]]
-
- tags = {
- Owner = "secondary"
- }
- }
- }
-
- manage_aws_auth = false
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-
-##############################################
-# Calling submodule with existing EKS cluster
-##############################################
-
-module "fargate_profile_existing_cluster" {
- source = "../../modules/fargate"
-
- cluster_name = module.eks.cluster_id
- subnets = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
-
- fargate_profiles = {
- profile1 = {
- name = "profile1"
- selectors = [
- {
- namespace = "kube-system"
- labels = {
- k8s-app = "kube-dns"
- }
- },
- {
- namespace = "profile"
- labels = {
- WorkerType = "fargate"
- }
- }
- ]
-
- tags = {
- Owner = "profile1"
- submodule = "true"
- }
- }
-
- profile2 = {
- name = "profile2"
- selectors = [
- {
- namespace = "default"
- labels = {
- Fargate = "profile2"
- }
- }
- ]
-
- # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
- subnets = [module.vpc.private_subnets[0]]
-
- tags = {
- Owner = "profile2"
- submodule = "true"
- }
-
- timeouts = {
- delete = "20m"
- }
- }
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf
deleted file mode 100644
index b7f23eeaf2..0000000000
--- a/examples/fargate/outputs.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
-output "fargate_profile_arns" {
- description = "Outputs from node groups"
- value = module.eks.fargate_profile_arns
-}
diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf
deleted file mode 100644
index cb5115c487..0000000000
--- a/examples/fargate/versions.tf
+++ /dev/null
@@ -1,26 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- tls = {
- source = "hashicorp/tls"
- version = ">= 2.0"
- }
- }
-}
diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md
new file mode 100644
index 0000000000..9ff7b03a4c
--- /dev/null
+++ b/examples/fargate_profile/README.md
@@ -0,0 +1,76 @@
+# AWS EKS Cluster with Fargate profiles
+
+Configuration in this directory creates an AWS EKS cluster utilizing Fargate profiles.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks](#module\_eks) | ../.. | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+
diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf
new file mode 100644
index 0000000000..61fbb43671
--- /dev/null
+++ b/examples/fargate_profile/main.tf
@@ -0,0 +1,164 @@
+provider "aws" {
+ region = local.region
+}
+
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+ cluster_version = "1.21"
+ region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+ source = "../.."
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ # Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ enable_irsa = true
+
+ # You require a node group to schedule coredns which is critical for running correctly internal DNS.
+ # If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
+ # available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
+ eks_managed_node_groups = {
+ example = {
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ labels = {
+ Example = "managed_node_groups"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "backend"
+ labels = {
+ Application = "backend"
+ }
+ },
+ {
+ namespace = "default"
+ labels = {
+ WorkerType = "fargate"
+ }
+ }
+ ]
+
+ tags = {
+ Owner = "default"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+
+ secondary = {
+ name = "secondary"
+ selectors = [
+ {
+ namespace = "default"
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+ }
+ ]
+
+ # Using specific subnets instead of the subnets supplied for the cluster itself
+ subnet_ids = [module.vpc.private_subnets[1]]
+
+ tags = {
+ Owner = "secondary"
+ }
+ }
+ }
+
+ tags = local.tags
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ cidr = "10.0.0.0/16"
+
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ enable_dns_hostnames = true
+
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
+
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
+}
+
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
diff --git a/examples/fargate_profile/outputs.tf b/examples/fargate_profile/outputs.tf
new file mode 100644
index 0000000000..3e9620157b
--- /dev/null
+++ b/examples/fargate_profile/outputs.tf
@@ -0,0 +1,167 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+ description = "Endpoint for your Kubernetes API server"
+ value = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+ value = module.eks.cluster_primary_security_group_id
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+ description = "ID of the cluster security group"
+ value = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the node shared security group"
+ value = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+ description = "ID of the node shared security group"
+ value = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/fargate/variables.tf b/examples/fargate_profile/variables.tf
similarity index 100%
rename from examples/fargate/variables.tf
rename to examples/fargate_profile/variables.tf
diff --git a/modules/fargate/versions.tf b/examples/fargate_profile/versions.tf
similarity index 82%
rename from modules/fargate/versions.tf
rename to examples/fargate_profile/versions.tf
index 9480a77da8..bfce6ae345 100644
--- a/modules/fargate/versions.tf
+++ b/examples/fargate_profile/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56"
+ version = ">= 3.64"
}
}
}
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
deleted file mode 100644
index cc558693ae..0000000000
--- a/examples/instance_refresh/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# Instance refresh example
-
-This is EKS example using [instance refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for worker groups.
-
-See [the official documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [helm](#requirement\_helm) | >= 2.0 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [helm](#provider\_helm) | >= 2.0 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 |
-| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 |
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_autoscaling_lifecycle_hook.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource |
-| [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
-| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
-| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
-| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
-| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [sqs\_queue\_asg\_notification\_arn](#output\_sqs\_queue\_asg\_notification\_arn) | SQS queue ASG notification ARN |
-| [sqs\_queue\_asg\_notification\_url](#output\_sqs\_queue\_asg\_notification\_url) | SQS queue ASG notification URL |
-
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
deleted file mode 100644
index f32964b1d7..0000000000
--- a/examples/instance_refresh/main.tf
+++ /dev/null
@@ -1,306 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "instance_refresh-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup
-
-provider "helm" {
- kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_iam_policy_document" "aws_node_termination_handler" {
- statement {
- effect = "Allow"
- actions = [
- "ec2:DescribeInstances",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeTags",
- ]
- resources = [
- "*",
- ]
- }
- statement {
- effect = "Allow"
- actions = [
- "autoscaling:CompleteLifecycleAction",
- ]
- resources = module.eks.workers_asg_arns
- }
- statement {
- effect = "Allow"
- actions = [
- "sqs:DeleteMessage",
- "sqs:ReceiveMessage"
- ]
- resources = [
- module.aws_node_termination_handler_sqs.sqs_queue_arn
- ]
- }
-}
-
-resource "aws_iam_policy" "aws_node_termination_handler" {
- name = "${local.name}-aws-node-termination-handler"
- policy = data.aws_iam_policy_document.aws_node_termination_handler.json
-}
-
-data "aws_region" "current" {}
-
-data "aws_iam_policy_document" "aws_node_termination_handler_events" {
- statement {
- effect = "Allow"
- principals {
- type = "Service"
- identifiers = [
- "events.amazonaws.com",
- "sqs.amazonaws.com",
- ]
- }
- actions = [
- "sqs:SendMessage",
- ]
- resources = [
- "arn:aws:sqs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:${local.name}",
- ]
- }
-}
-
-module "aws_node_termination_handler_sqs" {
- source = "terraform-aws-modules/sqs/aws"
- version = "~> 3.0.0"
- name = local.name
- message_retention_seconds = 300
- policy = data.aws_iam_policy_document.aws_node_termination_handler_events.json
-}
-
-resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
- name = "${local.name}-asg-termination"
- description = "Node termination event rule"
- event_pattern = jsonencode(
- {
- "source" : [
- "aws.autoscaling"
- ],
- "detail-type" : [
- "EC2 Instance-terminate Lifecycle Action"
- ]
- "resources" : module.eks.workers_asg_arns
- }
- )
-}
-
-resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
- target_id = "${local.name}-asg-termination"
- rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
- arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
- name = "${local.name}-spot-termination"
- description = "Node termination event rule"
- event_pattern = jsonencode(
- {
- "source" : [
- "aws.ec2"
- ],
- "detail-type" : [
- "EC2 Spot Instance Interruption Warning"
- ]
- "resources" : module.eks.workers_asg_arns
- }
- )
-}
-
-resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
- target_id = "${local.name}-spot-termination"
- rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
- arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-module "aws_node_termination_handler_role" {
- source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "4.1.0"
- create_role = true
- role_description = "IRSA role for ANTH, cluster ${local.name}"
- role_name_prefix = local.name
- provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
- role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
- oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
-}
-
-resource "helm_release" "aws_node_termination_handler" {
- depends_on = [
- module.eks
- ]
-
- name = "aws-node-termination-handler"
- namespace = "kube-system"
- repository = "https://aws.github.io/eks-charts"
- chart = "aws-node-termination-handler"
- version = "0.15.0"
- create_namespace = true
-
- set {
- name = "awsRegion"
- value = data.aws_region.current.name
- }
- set {
- name = "serviceAccount.name"
- value = "aws-node-termination-handler"
- }
- set {
- name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
- value = module.aws_node_termination_handler_role.iam_role_arn
- type = "string"
- }
- set {
- name = "enableSqsTerminationDraining"
- value = "true"
- }
- set {
- name = "enableSpotInterruptionDraining"
- value = "true"
- }
- set {
- name = "queueURL"
- value = module.aws_node_termination_handler_sqs.sqs_queue_id
- }
- set {
- name = "logLevel"
- value = "debug"
- }
-}
-
-# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
-# ensures that node termination does not require the lifecycle action to be completed,
-# and thus allows the ASG to be destroyed cleanly.
-resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
- count = length(module.eks.workers_asg_names)
- name = "aws-node-termination-handler"
- autoscaling_group_name = module.eks.workers_asg_names[count.index]
- lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
- heartbeat_timeout = 300
- default_result = "CONTINUE"
-}
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- enable_irsa = true
- worker_groups_launch_template = [
- {
- name = "refresh"
- asg_max_size = 2
- asg_desired_capacity = 2
- instance_refresh_enabled = true
- instance_refresh_instance_warmup = 60
- public_ip = true
- metadata_http_put_response_hop_limit = 3
- update_default_version = true
- instance_refresh_triggers = ["tag"]
- tags = [
- {
- key = "aws-node-termination-handler/managed"
- value = ""
- propagate_at_launch = true
- },
- {
- key = "foo"
- value = "buzz"
- propagate_at_launch = true
- }
- ]
- }
- ]
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf
deleted file mode 100644
index a3dd033616..0000000000
--- a/examples/instance_refresh/outputs.tf
+++ /dev/null
@@ -1,29 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
-output "sqs_queue_asg_notification_arn" {
- description = "SQS queue ASG notification ARN"
- value = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-output "sqs_queue_asg_notification_url" {
- description = "SQS queue ASG notification URL"
- value = module.aws_node_termination_handler_sqs.sqs_queue_id
-}
diff --git a/examples/instance_refresh/versions.tf b/examples/instance_refresh/versions.tf
deleted file mode 100644
index 67c2d66a82..0000000000
--- a/examples/instance_refresh/versions.tf
+++ /dev/null
@@ -1,26 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- helm = {
- source = "hashicorp/helm"
- version = ">= 2.0"
- }
- }
-}
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
deleted file mode 100644
index 137f3d63d8..0000000000
--- a/examples/irsa/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# IAM Roles for Service Accounts
-
-This example shows how to create an IAM role to be used for a Kubernetes `ServiceAccount`. It will create a policy and role to be used by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) using the [public Helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler).
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [helm](#requirement\_helm) | >= 2.0 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [helm](#provider\_helm) | >= 2.0 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [iam\_assumable\_role\_admin](#module\_iam\_assumable\_role\_admin) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [aws\_account\_id](#output\_aws\_account\_id) | IAM AWS account id |
-
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
deleted file mode 100644
index a36d0e3394..0000000000
--- a/examples/irsa/irsa.tf
+++ /dev/null
@@ -1,114 +0,0 @@
-data "aws_caller_identity" "current" {}
-
-data "aws_region" "current" {}
-
-locals {
- k8s_service_account_namespace = "kube-system"
- k8s_service_account_name = "cluster-autoscaler-aws"
-}
-
-provider "helm" {
- kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
-
-resource "helm_release" "cluster-autoscaler" {
- depends_on = [
- module.eks
- ]
-
- name = "cluster-autoscaler"
- namespace = local.k8s_service_account_namespace
- repository = "https://kubernetes.github.io/autoscaler"
- chart = "cluster-autoscaler"
- version = "9.10.7"
- create_namespace = false
-
- set {
- name = "awsRegion"
- value = data.aws_region.current.name
- }
- set {
- name = "rbac.serviceAccount.name"
- value = local.k8s_service_account_name
- }
- set {
- name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
- value = module.iam_assumable_role_admin.iam_role_arn
- type = "string"
- }
- set {
- name = "autoDiscovery.clusterName"
- value = local.name
- }
- set {
- name = "autoDiscovery.enabled"
- value = "true"
- }
- set {
- name = "rbac.create"
- value = "true"
- }
-}
-
-module "iam_assumable_role_admin" {
- source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "~> 4.0"
-
- create_role = true
- role_name = "cluster-autoscaler"
- provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
- role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
- oidc_fully_qualified_subjects = ["system:serviceaccount:${local.k8s_service_account_namespace}:${local.k8s_service_account_name}"]
-}
-
-resource "aws_iam_policy" "cluster_autoscaler" {
- name_prefix = "cluster-autoscaler"
- description = "EKS cluster-autoscaler policy for cluster ${module.eks.cluster_id}"
- policy = data.aws_iam_policy_document.cluster_autoscaler.json
-}
-
-data "aws_iam_policy_document" "cluster_autoscaler" {
- statement {
- sid = "clusterAutoscalerAll"
- effect = "Allow"
-
- actions = [
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeLaunchConfigurations",
- "autoscaling:DescribeTags",
- "ec2:DescribeLaunchTemplateVersions",
- ]
-
- resources = ["*"]
- }
-
- statement {
- sid = "clusterAutoscalerOwn"
- effect = "Allow"
-
- actions = [
- "autoscaling:SetDesiredCapacity",
- "autoscaling:TerminateInstanceInAutoScalingGroup",
- "autoscaling:UpdateAutoScalingGroup",
- ]
-
- resources = ["*"]
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
- values = ["owned"]
- }
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
- values = ["true"]
- }
- }
-}
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
deleted file mode 100644
index ebe2eeffea..0000000000
--- a/examples/irsa/main.tf
+++ /dev/null
@@ -1,115 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "irsa-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- enable_irsa = true
-
- worker_groups = [
- {
- name = "worker-group-1"
- instance_type = "t3.medium"
- asg_desired_capacity = 1
- asg_max_size = 4
- tags = [
- {
- "key" = "k8s.io/cluster-autoscaler/enabled"
- "propagate_at_launch" = "false"
- "value" = "true"
- },
- {
- "key" = "k8s.io/cluster-autoscaler/${local.name}"
- "propagate_at_launch" = "false"
- "value" = "owned"
- }
- ]
- }
- ]
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
diff --git a/examples/irsa/outputs.tf b/examples/irsa/outputs.tf
deleted file mode 100644
index 796e8ee3d4..0000000000
--- a/examples/irsa/outputs.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-output "aws_account_id" {
- description = "IAM AWS account id"
- value = data.aws_caller_identity.current.account_id
-}
diff --git a/examples/irsa/versions.tf b/examples/irsa/versions.tf
deleted file mode 100644
index 67c2d66a82..0000000000
--- a/examples/irsa/versions.tf
+++ /dev/null
@@ -1,26 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- helm = {
- source = "hashicorp/helm"
- version = ">= 2.0"
- }
- }
-}
diff --git a/examples/irsa_autoscale_refresh/README.md b/examples/irsa_autoscale_refresh/README.md
new file mode 100644
index 0000000000..a21f31f6d3
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/README.md
@@ -0,0 +1,102 @@
+# IRSA, Cluster Autoscaler, and Instance Refresh example
+
+Configuration in this directory creates an AWS EKS cluster with:
+- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) enabled
+- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) provisioned via a Helm Chart manifest
+- [Instance Refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for self managed node groups
+- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler) provisioned via a Helm Chart manifest
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [helm](#requirement\_helm) | >= 2.0 |
+| [null](#requirement\_null) | >= 3.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+| [helm](#provider\_helm) | >= 2.0 |
+| [null](#provider\_null) | >= 3.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
+| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
+| [eks](#module\_eks) | ../.. | n/a |
+| [iam\_assumable\_role\_cluster\_autoscaler](#module\_iam\_assumable\_role\_cluster\_autoscaler) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_autoscaling_lifecycle_hook.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource |
+| [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
+| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
+| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
+| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
+| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.aws_node_termination_handler_sqs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+
diff --git a/examples/irsa_autoscale_refresh/charts.tf b/examples/irsa_autoscale_refresh/charts.tf
new file mode 100644
index 0000000000..1ef1195b9e
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/charts.tf
@@ -0,0 +1,294 @@
+provider "helm" {
+ kubernetes {
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+ token = data.aws_eks_cluster_auth.cluster.token
+ }
+}
+
+################################################################################
+# Cluster Autoscaler
+# Based on the official docs at
+# https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler
+################################################################################
+
+resource "helm_release" "cluster_autoscaler" {
+ name = "cluster-autoscaler"
+ namespace = "kube-system"
+ repository = "https://kubernetes.github.io/autoscaler"
+ chart = "cluster-autoscaler"
+ version = "9.10.8"
+ create_namespace = false
+
+ set {
+ name = "awsRegion"
+ value = local.region
+ }
+
+ set {
+ name = "rbac.serviceAccount.name"
+ value = "cluster-autoscaler-aws"
+ }
+
+ set {
+ name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
+ value = module.iam_assumable_role_cluster_autoscaler.iam_role_arn
+ type = "string"
+ }
+
+ set {
+ name = "autoDiscovery.clusterName"
+ value = local.name
+ }
+
+ set {
+ name = "autoDiscovery.enabled"
+ value = "true"
+ }
+
+ set {
+ name = "rbac.create"
+ value = "true"
+ }
+
+ depends_on = [
+ module.eks.cluster_id,
+ null_resource.apply,
+ ]
+}
+
+module "iam_assumable_role_cluster_autoscaler" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
+ version = "~> 4.0"
+
+ create_role = true
+ role_name_prefix = "cluster-autoscaler"
+ role_description = "IRSA role for cluster autoscaler"
+
+ provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
+ role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
+ oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:cluster-autoscaler-aws"]
+ oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
+
+ tags = local.tags
+}
+
+resource "aws_iam_policy" "cluster_autoscaler" {
+ name = "KarpenterControllerPolicy-refresh"
+ policy = data.aws_iam_policy_document.cluster_autoscaler.json
+
+ tags = local.tags
+}
+
+data "aws_iam_policy_document" "cluster_autoscaler" {
+ statement {
+ sid = "clusterAutoscalerAll"
+ actions = [
+ "autoscaling:DescribeAutoScalingGroups",
+ "autoscaling:DescribeAutoScalingInstances",
+ "autoscaling:DescribeLaunchConfigurations",
+ "autoscaling:DescribeTags",
+ "ec2:DescribeLaunchTemplateVersions",
+ ]
+ resources = ["*"]
+ }
+
+ statement {
+ sid = "clusterAutoscalerOwn"
+ actions = [
+ "autoscaling:SetDesiredCapacity",
+ "autoscaling:TerminateInstanceInAutoScalingGroup",
+ "autoscaling:UpdateAutoScalingGroup",
+ ]
+ resources = ["*"]
+
+ condition {
+ test = "StringEquals"
+ variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
+ values = ["owned"]
+ }
+
+ condition {
+ test = "StringEquals"
+ variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
+ values = ["true"]
+ }
+ }
+}
+
+################################################################################
+# Node Termination Handler
+# Based on the official docs at
+# https://github.com/aws/aws-node-termination-handler
+################################################################################
+
+resource "helm_release" "aws_node_termination_handler" {
+ name = "aws-node-termination-handler"
+ namespace = "kube-system"
+ repository = "https://aws.github.io/eks-charts"
+ chart = "aws-node-termination-handler"
+ version = "0.16.0"
+ create_namespace = false
+
+ set {
+ name = "awsRegion"
+ value = local.region
+ }
+
+ set {
+ name = "serviceAccount.name"
+ value = "aws-node-termination-handler"
+ }
+
+ set {
+ name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
+ value = module.aws_node_termination_handler_role.iam_role_arn
+ type = "string"
+ }
+
+ set {
+ name = "enableSqsTerminationDraining"
+ value = "true"
+ }
+
+ set {
+ name = "enableSpotInterruptionDraining"
+ value = "true"
+ }
+
+ set {
+ name = "queueURL"
+ value = module.aws_node_termination_handler_sqs.sqs_queue_id
+ }
+
+ set {
+ name = "logLevel"
+ value = "debug"
+ }
+
+ depends_on = [
+ module.eks.cluster_id,
+ null_resource.apply,
+ ]
+}
+
+module "aws_node_termination_handler_role" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
+ version = "~> 4.0"
+
+ create_role = true
+ role_name_prefix = "node-termination-handler"
+ role_description = "IRSA role for node termination handler"
+
+ provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
+ role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
+ oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
+ oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
+
+ tags = local.tags
+}
+
+resource "aws_iam_policy" "aws_node_termination_handler" {
+ name = "${local.name}-aws-node-termination-handler"
+ policy = data.aws_iam_policy_document.aws_node_termination_handler.json
+
+ tags = local.tags
+}
+
+data "aws_iam_policy_document" "aws_node_termination_handler" {
+ statement {
+ actions = [
+ "ec2:DescribeInstances",
+ "autoscaling:DescribeAutoScalingInstances",
+ "autoscaling:DescribeTags",
+ ]
+ resources = ["*"]
+ }
+
+ statement {
+ actions = ["autoscaling:CompleteLifecycleAction"]
+ resources = [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ }
+
+ statement {
+ actions = [
+ "sqs:DeleteMessage",
+ "sqs:ReceiveMessage"
+ ]
+ resources = [module.aws_node_termination_handler_sqs.sqs_queue_arn]
+ }
+}
+
+module "aws_node_termination_handler_sqs" {
+ source = "terraform-aws-modules/sqs/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ message_retention_seconds = 300
+ policy = data.aws_iam_policy_document.aws_node_termination_handler_sqs.json
+
+ tags = local.tags
+}
+
+data "aws_iam_policy_document" "aws_node_termination_handler_sqs" {
+ statement {
+ actions = ["sqs:SendMessage"]
+ resources = ["arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.name}"]
+
+ principals {
+ type = "Service"
+ identifiers = [
+ "events.amazonaws.com",
+ "sqs.amazonaws.com",
+ ]
+ }
+ }
+}
+
+resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
+ name = "${local.name}-asg-termination"
+ description = "Node termination event rule"
+
+ event_pattern = jsonencode({
+ "source" : ["aws.autoscaling"],
+ "detail-type" : ["EC2 Instance-terminate Lifecycle Action"]
+ "resources" : [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ })
+
+ tags = local.tags
+}
+
+resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
+ target_id = "${local.name}-asg-termination"
+ rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
+ arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
+}
+
+resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
+ name = "${local.name}-spot-termination"
+ description = "Node termination event rule"
+ event_pattern = jsonencode({
+ "source" : ["aws.ec2"],
+ "detail-type" : ["EC2 Spot Instance Interruption Warning"]
+ "resources" : [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ })
+}
+
+resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
+ target_id = "${local.name}-spot-termination"
+ rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
+ arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
+}
+
+# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
+# ensures that node termination does not require the lifecycle action to be completed,
+# and thus allows the ASG to be destroyed cleanly.
+resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
+ for_each = module.eks.self_managed_node_groups
+
+ name = "aws-node-termination-handler-${each.value.autoscaling_group_name}"
+ autoscaling_group_name = each.value.autoscaling_group_name
+ lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
+ heartbeat_timeout = 300
+ default_result = "CONTINUE"
+}
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
new file mode 100644
index 0000000000..9e74e3d9fe
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -0,0 +1,203 @@
+provider "aws" {
+ region = local.region
+}
+
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+ cluster_version = "1.21"
+ region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+data "aws_caller_identity" "current" {}
+
+data "aws_eks_cluster_auth" "cluster" {
+ name = module.eks.cluster_id
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+ source = "../.."
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ enable_irsa = true
+
+ # Self Managed Node Group(s)
+ self_managed_node_groups = {
+ refresh = {
+ max_size = 5
+ desired_size = 1
+
+ instance_type = "m5.large"
+
+ instance_refresh = {
+ strategy = "Rolling"
+ preferences = {
+ checkpoint_delay = 600
+ checkpoint_percentages = [35, 70, 100]
+ instance_warmup = 300
+ min_healthy_percentage = 50
+ }
+ triggers = ["tag"]
+ }
+
+ propogate_tags = [{
+ key = "aws-node-termination-handler/managed"
+ value = true
+ propagate_at_launch = true
+ }]
+ }
+
+ mixed_instance = {
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "1"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "2"
+ },
+ ]
+ }
+
+ propogate_tags = [{
+ key = "aws-node-termination-handler/managed"
+ value = true
+ propagate_at_launch = true
+ }]
+ }
+
+ spot = {
+ instance_type = "m5.large"
+ instance_market_options = {
+ market_type = "spot"
+ }
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ propogate_tags = [{
+ key = "aws-node-termination-handler/managed"
+ value = true
+ propagate_at_launch = true
+ }]
+ }
+ }
+
+ tags = merge(local.tags, { Foo = "bar" })
+}
+
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = module.eks.cluster_id
+ cluster = {
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = module.eks.cluster_id
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = data.aws_eks_cluster_auth.this.token
+ }
+ }]
+ })
+}
+
+resource "null_resource" "apply" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = <<-EOT
+ kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ EOT
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ cidr = "10.0.0.0/16"
+
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ enable_dns_hostnames = true
+
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
+
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
+}
diff --git a/examples/irsa_autoscale_refresh/outputs.tf b/examples/irsa_autoscale_refresh/outputs.tf
new file mode 100644
index 0000000000..3e9620157b
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/outputs.tf
@@ -0,0 +1,167 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+ description = "Endpoint for your Kubernetes API server"
+ value = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+ value = module.eks.cluster_primary_security_group_id
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+ description = "ID of the cluster security group"
+ value = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the node shared security group"
+ value = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+ description = "ID of the node shared security group"
+ value = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/instance_refresh/variables.tf b/examples/irsa_autoscale_refresh/variables.tf
similarity index 100%
rename from examples/instance_refresh/variables.tf
rename to examples/irsa_autoscale_refresh/variables.tf
diff --git a/examples/irsa_autoscale_refresh/versions.tf b/examples/irsa_autoscale_refresh/versions.tf
new file mode 100644
index 0000000000..4706dec92a
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/versions.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.64"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/examples/launch_templates/README.md b/examples/launch_templates/README.md
deleted file mode 100644
index dbd5073f89..0000000000
--- a/examples/launch_templates/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Launch templates example
-
-This is EKS example using workers launch template with worker groups feature.
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-
diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf
deleted file mode 100644
index 476ca13d68..0000000000
--- a/examples/launch_templates/main.tf
+++ /dev/null
@@ -1,134 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "launch_template-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- worker_groups_launch_template = [
- {
- name = "worker-group-1"
- instance_type = "t3.small"
- asg_desired_capacity = 2
- public_ip = true
- tags = [{
- key = "ExtraTag"
- value = "TagValue"
- propagate_at_launch = true
- }]
- },
- {
- name = "worker-group-2"
- instance_type = "t3.medium"
- asg_desired_capacity = 1
- public_ip = true
- ebs_optimized = true
- },
- {
- name = "worker-group-3"
- instance_type = "t2.large"
- asg_desired_capacity = 1
- public_ip = true
- elastic_inference_accelerator = "eia2.medium"
- },
- {
- name = "worker-group-4"
- instance_type = "t3.small"
- asg_desired_capacity = 1
- public_ip = true
- root_volume_size = 150
- root_volume_type = "gp3"
- root_volume_throughput = 300
- additional_ebs_volumes = [
- {
- block_device_name = "/dev/xvdb"
- volume_size = 100
- volume_type = "gp3"
- throughput = 150
- },
- ]
- },
- ]
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf
deleted file mode 100644
index b778ec7926..0000000000
--- a/examples/launch_templates/outputs.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
diff --git a/examples/launch_templates/pre_userdata.sh b/examples/launch_templates/pre_userdata.sh
deleted file mode 100644
index 4cbf0d114b..0000000000
--- a/examples/launch_templates/pre_userdata.sh
+++ /dev/null
@@ -1 +0,0 @@
-yum update -y
diff --git a/examples/launch_templates/versions.tf b/examples/launch_templates/versions.tf
deleted file mode 100644
index 8e2b837984..0000000000
--- a/examples/launch_templates/versions.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/README.md b/examples/launch_templates_with_managed_node_groups/README.md
deleted file mode 100644
index 3cae549325..0000000000
--- a/examples/launch_templates_with_managed_node_groups/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Launch template with managed groups example
-
-This is EKS example using workers custom launch template with managed groups feature in two different ways:
-
-- Using a defined existing launch template created outside module
-- Using dlaunch template which will be created by module with user customization
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_iam_service_linked_role.autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource |
-| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-
diff --git a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf b/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf
deleted file mode 100644
index 3f834ad100..0000000000
--- a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf
+++ /dev/null
@@ -1,78 +0,0 @@
-# if you have used ASGs before, that role got auto-created already and you need to import to TF state
-resource "aws_iam_service_linked_role" "autoscaling" {
- aws_service_name = "autoscaling.amazonaws.com"
- description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
- custom_suffix = "lt_with_managed_node_groups" # the full name is "AWSServiceRoleForAutoScaling_lt_with_managed_node_groups" < 64 characters
-}
-
-#data "aws_caller_identity" "current" {}
-#
-## This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
-#data "aws_iam_policy_document" "ebs_decryption" {
-# # Copy of default KMS policy that lets you manage it
-# statement {
-# sid = "Enable IAM User Permissions"
-# effect = "Allow"
-#
-# principals {
-# type = "AWS"
-# identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
-# }
-#
-# actions = [
-# "kms:*"
-# ]
-#
-# resources = ["*"]
-# }
-#
-# # Required for EKS
-# statement {
-# sid = "Allow service-linked role use of the CMK"
-# effect = "Allow"
-#
-# principals {
-# type = "AWS"
-# identifiers = [
-# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
-# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
-# ]
-# }
-#
-# actions = [
-# "kms:Encrypt",
-# "kms:Decrypt",
-# "kms:ReEncrypt*",
-# "kms:GenerateDataKey*",
-# "kms:DescribeKey"
-# ]
-#
-# resources = ["*"]
-# }
-#
-# statement {
-# sid = "Allow attachment of persistent resources"
-# effect = "Allow"
-#
-# principals {
-# type = "AWS"
-# identifiers = [
-# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
-# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
-# ]
-# }
-#
-# actions = [
-# "kms:CreateGrant"
-# ]
-#
-# resources = ["*"]
-#
-# condition {
-# test = "Bool"
-# variable = "kms:GrantIsForAWSResource"
-# values = ["true"]
-# }
-#
-# }
-#}
diff --git a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf b/examples/launch_templates_with_managed_node_groups/launchtemplate.tf
deleted file mode 100644
index 0f0e4ebf31..0000000000
--- a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf
+++ /dev/null
@@ -1,98 +0,0 @@
-#data "template_file" "launch_template_userdata" {
-# template = file("${path.module}/templates/userdata.sh.tpl")
-#
-# vars = {
-# cluster_name = local.name
-# endpoint = module.eks.cluster_endpoint
-# cluster_auth_base64 = module.eks.cluster_certificate_authority_data
-#
-# bootstrap_extra_args = ""
-# kubelet_extra_args = ""
-# }
-#}
-
-# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
-# there are several more options one could set but you probably dont need to modify them
-# you can take the default and add your custom AMI and/or custom tags
-#
-# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-# then the default user-data for bootstrapping a cluster is merged in the copy.
-
-resource "aws_launch_template" "default" {
- name_prefix = "eks-example-"
- description = "Default Launch-Template"
- update_default_version = true
-
- block_device_mappings {
- device_name = "/dev/xvda"
-
- ebs {
- volume_size = 100
- volume_type = "gp2"
- delete_on_termination = true
- # encrypted = true
-
- # Enable this if you want to encrypt your node root volumes with a KMS/CMK. encryption of PVCs is handled via k8s StorageClass tho
- # you also need to attach data.aws_iam_policy_document.ebs_decryption.json from the disk_encryption_policy.tf to the KMS/CMK key then !!
- # kms_key_id = var.kms_key_arn
- }
- }
-
- monitoring {
- enabled = true
- }
-
- network_interfaces {
- associate_public_ip_address = false
- delete_on_termination = true
- security_groups = [module.eks.worker_security_group_id]
- }
-
- # if you want to use a custom AMI
- # image_id = var.ami_id
-
- # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
- # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
- #
- # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
-
- # user_data = base64encode(
- # data.template_file.launch_template_userdata.rendered,
- # )
-
- # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "instance"
-
- tags = {
- CustomTag = "Instance custom tag"
- }
- }
-
- # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC)
- tag_specifications {
- resource_type = "volume"
-
- tags = {
- CustomTag = "Volume custom tag"
- }
- }
-
- # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "network-interface"
-
- tags = {
- CustomTag = "EKS example"
- }
- }
-
- # Tag the LT itself
- tags = {
- CustomTag = "Launch template custom tag"
- }
-
- lifecycle {
- create_before_destroy = true
- }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/main.tf b/examples/launch_templates_with_managed_node_groups/main.tf
deleted file mode 100644
index 4392c606fa..0000000000
--- a/examples/launch_templates_with_managed_node_groups/main.tf
+++ /dev/null
@@ -1,149 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "lt_with_mng-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- node_groups = {
- # use arleady defined launch template
- example1 = {
- name_prefix = "example1"
- desired_capacity = 1
- max_capacity = 15
- min_capacity = 1
-
- launch_template_id = aws_launch_template.default.id
- launch_template_version = aws_launch_template.default.default_version
-
- instance_types = ["t3.small"]
-
- additional_tags = {
- ExtraTag = "example1"
- }
- }
- # create launch template
- example2 = {
- create_launch_template = true
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- disk_size = 50
- disk_type = "gp3"
- disk_throughput = 150
- disk_iops = 3000
-
- instance_types = ["t3.large"]
- capacity_type = "SPOT"
-
- bootstrap_env = {
- CONTAINER_RUNTIME = "containerd"
- USE_MAX_PODS = false
- }
- kubelet_extra_args = "--max-pods=110"
- k8s_labels = {
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example2"
- }
- taints = [
- {
- key = "dedicated"
- value = "gpuGroup"
- effect = "NO_SCHEDULE"
- }
- ]
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/outputs.tf b/examples/launch_templates_with_managed_node_groups/outputs.tf
deleted file mode 100644
index 359db3a481..0000000000
--- a/examples/launch_templates_with_managed_node_groups/outputs.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
diff --git a/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl b/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl
deleted file mode 100644
index 41eeb0ba03..0000000000
--- a/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,12 +0,0 @@
-MIME-Version: 1.0
-Content-Type: multipart/mixed; boundary="//"
-
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-#!/bin/bash
-set -e
-
-# Bootstrap and join the cluster
-/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
-
---//--
diff --git a/examples/launch_templates_with_managed_node_groups/variables.tf b/examples/launch_templates_with_managed_node_groups/variables.tf
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/examples/launch_templates_with_managed_node_groups/versions.tf b/examples/launch_templates_with_managed_node_groups/versions.tf
deleted file mode 100644
index 8e2b837984..0000000000
--- a/examples/launch_templates_with_managed_node_groups/versions.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- }
-}
diff --git a/examples/managed_node_groups/README.md b/examples/managed_node_groups/README.md
deleted file mode 100644
index 7121431392..0000000000
--- a/examples/managed_node_groups/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Managed groups example
-
-This is EKS example using managed groups feature in two different ways:
-
-- Using SPOT instances in node group
-- Using ON_DEMAND instance in node group
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` |
[ "777777777777", "888888888888" ]
| no |
-| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
| no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [node\_groups](#output\_node\_groups) | Outputs from node groups |
-
diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf
deleted file mode 100644
index 56a2b05346..0000000000
--- a/examples/managed_node_groups/main.tf
+++ /dev/null
@@ -1,148 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "managed_node_groups-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- node_groups_defaults = {
- ami_type = "AL2_x86_64"
- disk_size = 50
- }
-
- node_groups = {
- example = {
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- instance_types = ["t3.large"]
- capacity_type = "SPOT"
- k8s_labels = {
- Example = "managed_node_groups"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example"
- }
- taints = [
- {
- key = "dedicated"
- value = "gpuGroup"
- effect = "NO_SCHEDULE"
- }
- ]
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
- example2 = {
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- instance_types = ["t3.medium"]
- k8s_labels = {
- Example = "managed_node_groups"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example2"
- }
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
- }
-
- map_roles = var.map_roles
- map_users = var.map_users
- map_accounts = var.map_accounts
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
diff --git a/examples/managed_node_groups/outputs.tf b/examples/managed_node_groups/outputs.tf
deleted file mode 100644
index 10a3a96604..0000000000
--- a/examples/managed_node_groups/outputs.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
-output "node_groups" {
- description = "Outputs from node groups"
- value = module.eks.node_groups
-}
diff --git a/examples/managed_node_groups/variables.tf b/examples/managed_node_groups/variables.tf
deleted file mode 100644
index 57853d8b4d..0000000000
--- a/examples/managed_node_groups/variables.tf
+++ /dev/null
@@ -1,48 +0,0 @@
-variable "map_accounts" {
- description = "Additional AWS account numbers to add to the aws-auth configmap."
- type = list(string)
-
- default = [
- "777777777777",
- "888888888888",
- ]
-}
-
-variable "map_roles" {
- description = "Additional IAM roles to add to the aws-auth configmap."
- type = list(object({
- rolearn = string
- username = string
- groups = list(string)
- }))
-
- default = [
- {
- rolearn = "arn:aws:iam::66666666666:role/role1"
- username = "role1"
- groups = ["system:masters"]
- },
- ]
-}
-
-variable "map_users" {
- description = "Additional IAM users to add to the aws-auth configmap."
- type = list(object({
- userarn = string
- username = string
- groups = list(string)
- }))
-
- default = [
- {
- userarn = "arn:aws:iam::66666666666:user/user1"
- username = "user1"
- groups = ["system:masters"]
- },
- {
- userarn = "arn:aws:iam::66666666666:user/user2"
- username = "user2"
- groups = ["system:masters"]
- },
- ]
-}
diff --git a/examples/managed_node_groups/versions.tf b/examples/managed_node_groups/versions.tf
deleted file mode 100644
index 8e2b837984..0000000000
--- a/examples/managed_node_groups/versions.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- }
-}
diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md
deleted file mode 100644
index f5f38b0498..0000000000
--- a/examples/secrets_encryption/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Managed groups example
-
-This is EKS using [secrets encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) feature.
-
-See [the official blog](https://aws.amazon.com/blogs/containers/using-eks-encryption-provider-support-for-defense-in-depth/) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
deleted file mode 100644
index 49d9a7b029..0000000000
--- a/examples/secrets_encryption/main.tf
+++ /dev/null
@@ -1,126 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "secrets_encryption-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
-
- cluster_encryption_config = [
- {
- provider_key_arn = aws_kms_key.eks.arn
- resources = ["secrets"]
- }
- ]
-
- worker_groups = [
- {
- name = "worker-group-1"
- instance_type = "t3.small"
- additional_userdata = "echo foo bar"
- asg_desired_capacity = 2
- },
- ]
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# KMS for encrypting secrets
-################################################################################
-
-resource "aws_kms_key" "eks" {
- description = "EKS Secret Encryption Key"
- deletion_window_in_days = 7
- enable_key_rotation = true
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf
deleted file mode 100644
index 359db3a481..0000000000
--- a/examples/secrets_encryption/outputs.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
diff --git a/examples/secrets_encryption/variables.tf b/examples/secrets_encryption/variables.tf
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf
deleted file mode 100644
index 8e2b837984..0000000000
--- a/examples/secrets_encryption/versions.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = {
- source = "hashicorp/aws"
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- }
-}
diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md
new file mode 100644
index 0000000000..ff13d8148a
--- /dev/null
+++ b/examples/self_managed_node_group/README.md
@@ -0,0 +1,95 @@
+# Self Managed Node Groups Example
+
+Configuration in this directory creates an AWS EKS cluster with various Self Managed Node Groups (AutoScaling Groups) demonstrating the various methods of configuring/customizing:
+
+- A default, "out of the box" self managed node group as supplied by the `self-managed-node-group` sub-module
+- A Bottlerocket self managed node group that demonstrates many of the configuration/customizations offered by the `self-manged-node-group` sub-module for the Bottlerocket OS
+- A self managed node group that demonstrates nearly all of the configurations/customizations offered by the `self-managed-node-group` sub-module
+
+See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [null](#requirement\_null) | >= 3.0 |
+| [tls](#requirement\_tls) | >= 2.2 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+| [null](#provider\_null) | >= 3.0 |
+| [tls](#provider\_tls) | >= 2.2 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks](#module\_eks) | ../.. | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
+| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
+| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
new file mode 100644
index 0000000000..73b502520c
--- /dev/null
+++ b/examples/self_managed_node_group/main.tf
@@ -0,0 +1,392 @@
+provider "aws" {
+ region = local.region
+}
+
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+ cluster_version = "1.21"
+ region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+data "aws_caller_identity" "current" {}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+ source = "../.."
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ enable_irsa = true
+
+ self_managed_node_group_defaults = {
+ disk_size = 50
+ }
+
+ self_managed_node_groups = {
+ # Default node group - as provisioned by the module defaults
+ default_node_group = {}
+
+ # Bottlerocket node group
+ bottlerocket = {
+ name = "bottlerocket-self-mng"
+
+ platform = "bottlerocket"
+ ami_id = data.aws_ami.bottlerocket_ami.id
+ instance_type = "m5.large"
+ desired_size = 2
+ key_name = aws_key_pair.this.key_name
+
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+
+ bootstrap_extra_args = <<-EOT
+ # The admin host container provides SSH access and runs with "superpowers".
+ # It is disabled by default, but can be disabled explicitly.
+ [settings.host-containers.admin]
+ enabled = false
+
+ # The control host container provides out-of-band access via SSM.
+ # It is enabled by default, and can be disabled if you do not expect to use SSM.
+ # This could leave you with no way to access the API and change settings on an existing node!
+ [settings.host-containers.control]
+ enabled = true
+
+ [settings.kubernetes.node-labels]
+ ingress = "allowed"
+ EOT
+ }
+
+ # Complete
+ complete = {
+ name = "complete-self-mng"
+ use_name_prefix = false
+
+ subnet_ids = module.vpc.public_subnets
+
+ min_size = 1
+ max_size = 7
+ desired_size = 1
+
+ ami_id = "ami-0caf35bc73450c396"
+ bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
+
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+
+ post_bootstrap_user_data = <<-EOT
+ echo "you are free little kubelet!"
+ EOT
+
+ disk_size = 256
+ instance_type = "m6i.large"
+
+ launch_template_name = "self-managed-ex"
+ launch_template_use_name_prefix = true
+ launch_template_description = "Self managed node group example launch template"
+
+ ebs_optimized = true
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ enable_monitoring = true
+
+ block_device_mappings = {
+ xvda = {
+ device_name = "/dev/xvda"
+ ebs = {
+ volume_size = 75
+ volume_type = "gp3"
+ iops = 3000
+ throughput = 150
+ encrypted = true
+ kms_key_id = aws_kms_key.ebs.arn
+ delete_on_termination = true
+ }
+ }
+ }
+
+ metadata_options = {
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ http_put_response_hop_limit = 2
+ }
+
+ create_iam_role = true
+ iam_role_name = "self-managed-node-group-complete-example"
+ iam_role_use_name_prefix = false
+ iam_role_description = "Self managed node group complete example role"
+ iam_role_tags = {
+ Purpose = "Protector of the kubelet"
+ }
+ iam_role_additional_policies = [
+ "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
+ ]
+
+ create_security_group = true
+ security_group_name = "self-managed-node-group-complete-example"
+ security_group_use_name_prefix = false
+ security_group_description = "Self managed node group complete example security group"
+ security_group_rules = {
+ phoneOut = {
+ description = "Hello CloudFlare"
+ protocol = "udp"
+ from_port = 53
+ to_port = 53
+ type = "egress"
+ cidr_blocks = ["1.1.1.1/32"]
+ }
+ phoneHome = {
+ description = "Hello cluster"
+ protocol = "udp"
+ from_port = 53
+ to_port = 53
+ type = "egress"
+ source_cluster_security_group = true # bit of reflection lookup
+ }
+ }
+ security_group_tags = {
+ Purpose = "Protector of the kubelet"
+ }
+
+ timeouts = {
+ create = "80m"
+ update = "80m"
+ delete = "80m"
+ }
+
+ tags = {
+ ExtraTag = "Self managed node group complete example"
+ }
+ }
+ }
+
+ tags = local.tags
+}
+
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = module.eks.cluster_id
+ cluster = {
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = module.eks.cluster_id
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = data.aws_eks_cluster_auth.this.token
+ }
+ }]
+ })
+}
+
+resource "null_resource" "apply" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = <<-EOT
+ kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ EOT
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ cidr = "10.0.0.0/16"
+
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ enable_dns_hostnames = true
+
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
+
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
+}
+
+resource "aws_security_group" "additional" {
+ name_prefix = "${local.name}-additional"
+ vpc_id = module.vpc.vpc_id
+
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = [
+ "10.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ ]
+ }
+
+ tags = local.tags
+}
+
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
+
+data "aws_ami" "bottlerocket_ami" {
+ most_recent = true
+ owners = ["amazon"]
+
+ filter {
+ name = "name"
+ values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
+ }
+}
+
+resource "tls_private_key" "this" {
+ algorithm = "RSA"
+}
+
+resource "aws_key_pair" "this" {
+ key_name = local.name
+ public_key = tls_private_key.this.public_key_openssh
+}
+
+resource "aws_kms_key" "ebs" {
+ description = "Customer managed key to encrypt self managed node group volumes"
+ deletion_window_in_days = 7
+ policy = data.aws_iam_policy_document.ebs.json
+}
+
+# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
+data "aws_iam_policy_document" "ebs" {
+ # Copy of default KMS policy that lets you manage it
+ statement {
+ sid = "Enable IAM User Permissions"
+ actions = ["kms:*"]
+ resources = ["*"]
+
+ principals {
+ type = "AWS"
+ identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
+ }
+ }
+
+ # Required for EKS
+ statement {
+ sid = "Allow service-linked role use of the CMK"
+ actions = [
+ "kms:Encrypt",
+ "kms:Decrypt",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:DescribeKey"
+ ]
+ resources = ["*"]
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
+ module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
+ ]
+ }
+ }
+
+ statement {
+ sid = "Allow attachment of persistent resources"
+ actions = ["kms:CreateGrant"]
+ resources = ["*"]
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
+ module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
+ ]
+ }
+
+ condition {
+ test = "Bool"
+ variable = "kms:GrantIsForAWSResource"
+ values = ["true"]
+ }
+ }
+}
diff --git a/examples/self_managed_node_group/outputs.tf b/examples/self_managed_node_group/outputs.tf
new file mode 100644
index 0000000000..3e9620157b
--- /dev/null
+++ b/examples/self_managed_node_group/outputs.tf
@@ -0,0 +1,167 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+ description = "Endpoint for your Kubernetes API server"
+ value = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+ value = module.eks.cluster_primary_security_group_id
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+ description = "ID of the cluster security group"
+ value = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the node shared security group"
+ value = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+ description = "ID of the node shared security group"
+ value = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/irsa/variables.tf b/examples/self_managed_node_group/variables.tf
similarity index 100%
rename from examples/irsa/variables.tf
rename to examples/self_managed_node_group/variables.tf
diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf
new file mode 100644
index 0000000000..883963f7b0
--- /dev/null
+++ b/examples/self_managed_node_group/versions.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.64"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 2.2"
+ }
+ }
+}
diff --git a/examples/user_data/README.md b/examples/user_data/README.md
new file mode 100644
index 0000000000..57ba591944
--- /dev/null
+++ b/examples/user_data/README.md
@@ -0,0 +1,78 @@
+# Internal User Data Module
+
+Configuration in this directory render various user data outputs used for testing and validating the internal `_user-data` sub-module.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+
+## Providers
+
+No providers.
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks\_mng\_bottlerocket\_additional](#module\_eks\_mng\_bottlerocket\_additional) | ../../modules/_user_data | n/a |
+| [eks\_mng\_bottlerocket\_custom\_ami](#module\_eks\_mng\_bottlerocket\_custom\_ami) | ../../modules/_user_data | n/a |
+| [eks\_mng\_bottlerocket\_custom\_template](#module\_eks\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
+| [eks\_mng\_bottlerocket\_no\_op](#module\_eks\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_additional](#module\_eks\_mng\_linux\_additional) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_custom\_ami](#module\_eks\_mng\_linux\_custom\_ami) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_custom\_template](#module\_eks\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_no\_op](#module\_eks\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
+| [self\_mng\_bottlerocket\_bootstrap](#module\_self\_mng\_bottlerocket\_bootstrap) | ../../modules/_user_data | n/a |
+| [self\_mng\_bottlerocket\_custom\_template](#module\_self\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
+| [self\_mng\_bottlerocket\_no\_op](#module\_self\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
+| [self\_mng\_linux\_bootstrap](#module\_self\_mng\_linux\_bootstrap) | ../../modules/_user_data | n/a |
+| [self\_mng\_linux\_custom\_template](#module\_self\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
+| [self\_mng\_linux\_no\_op](#module\_self\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
+| [self\_mng\_windows\_bootstrap](#module\_self\_mng\_windows\_bootstrap) | ../../modules/_user_data | n/a |
+| [self\_mng\_windows\_custom\_template](#module\_self\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a |
+| [self\_mng\_windows\_no\_op](#module\_self\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a |
+
+## Resources
+
+No resources.
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [eks\_mng\_bottlerocket\_additional](#output\_eks\_mng\_bottlerocket\_additional) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_bottlerocket\_custom\_ami](#output\_eks\_mng\_bottlerocket\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_bottlerocket\_custom\_template](#output\_eks\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_bottlerocket\_no\_op](#output\_eks\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_additional](#output\_eks\_mng\_linux\_additional) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_custom\_ami](#output\_eks\_mng\_linux\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_custom\_template](#output\_eks\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_no\_op](#output\_eks\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_bottlerocket\_bootstrap](#output\_self\_mng\_bottlerocket\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_bottlerocket\_custom\_template](#output\_self\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_bottlerocket\_no\_op](#output\_self\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_linux\_bootstrap](#output\_self\_mng\_linux\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_linux\_custom\_template](#output\_self\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_linux\_no\_op](#output\_self\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_windows\_bootstrap](#output\_self\_mng\_windows\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_windows\_custom\_template](#output\_self\_mng\_windows\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_windows\_no\_op](#output\_self\_mng\_windows\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+
diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf
new file mode 100644
index 0000000000..4e961a3947
--- /dev/null
+++ b/examples/user_data/main.tf
@@ -0,0 +1,289 @@
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+
+ cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+ cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+ cluster_service_ipv4_cidr = "172.16.0.0/16"
+}
+
+################################################################################
+# User Data Module
+################################################################################
+
+# EKS managed node group - linux
+module "eks_mng_linux_no_op" {
+ source = "../../modules/_user_data"
+}
+
+module "eks_mng_linux_additional" {
+ source = "../../modules/_user_data"
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+module "eks_mng_linux_custom_ami" {
+ source = "../../modules/_user_data"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+ cluster_service_ipv4_cidr = local.cluster_service_ipv4_cidr
+
+ enable_bootstrap_user_data = true
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+
+module "eks_mng_linux_custom_template" {
+ source = "../../modules/_user_data"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/linux_custom.tpl"
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+# EKS managed node group - bottlerocket
+module "eks_mng_bottlerocket_no_op" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+}
+
+module "eks_mng_bottlerocket_additional" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+module "eks_mng_bottlerocket_custom_ami" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ enable_bootstrap_user_data = true
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+module "eks_mng_bottlerocket_custom_template" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+# Self managed node group - linux
+module "self_mng_linux_no_op" {
+ source = "../../modules/_user_data"
+
+ is_eks_managed_node_group = false
+}
+
+module "self_mng_linux_bootstrap" {
+ source = "../../modules/_user_data"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+module "self_mng_linux_custom_template" {
+ source = "../../modules/_user_data"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/linux_custom.tpl"
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+# Self managed node group - bottlerocket
+module "self_mng_bottlerocket_no_op" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ is_eks_managed_node_group = false
+}
+
+module "self_mng_bottlerocket_bootstrap" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+module "self_mng_bottlerocket_custom_template" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+# Self managed node group - windows
+module "self_mng_windows_no_op" {
+ source = "../../modules/_user_data"
+
+ platform = "windows"
+
+ is_eks_managed_node_group = false
+}
+
+module "self_mng_windows_bootstrap" {
+ source = "../../modules/_user_data"
+
+ platform = "windows"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ pre_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+ # I don't know if this is the right way on WindowsOS, but its just a string check here anyways
+ bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+ post_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+}
+
+module "self_mng_windows_custom_template" {
+ source = "../../modules/_user_data"
+
+ platform = "windows"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/windows_custom.tpl"
+
+ pre_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+ # I don't know if this is the right way on WindowsOS, but its just a string check here anyways
+ bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+ post_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+}
diff --git a/examples/user_data/outputs.tf b/examples/user_data/outputs.tf
new file mode 100644
index 0000000000..dd2c3407e1
--- /dev/null
+++ b/examples/user_data/outputs.tf
@@ -0,0 +1,89 @@
+# EKS managed node group - linux
+output "eks_mng_linux_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_no_op.user_data)
+}
+
+output "eks_mng_linux_additional" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_additional.user_data)
+}
+
+output "eks_mng_linux_custom_ami" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_custom_ami.user_data)
+}
+
+output "eks_mng_linux_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_custom_template.user_data)
+}
+
+# EKS managed node group - bottlerocket
+output "eks_mng_bottlerocket_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_no_op.user_data)
+}
+
+output "eks_mng_bottlerocket_additional" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_additional.user_data)
+}
+
+output "eks_mng_bottlerocket_custom_ami" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_custom_ami.user_data)
+}
+
+output "eks_mng_bottlerocket_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_custom_template.user_data)
+}
+
+# Self managed node group - linux
+output "self_mng_linux_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_linux_no_op.user_data)
+}
+
+output "self_mng_linux_bootstrap" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_linux_bootstrap.user_data)
+}
+
+output "self_mng_linux_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_linux_custom_template.user_data)
+}
+
+# Self managed node group - bottlerocket
+output "self_mng_bottlerocket_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_bottlerocket_no_op.user_data)
+}
+
+output "self_mng_bottlerocket_bootstrap" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_bottlerocket_bootstrap.user_data)
+}
+
+output "self_mng_bottlerocket_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_bottlerocket_custom_template.user_data)
+}
+
+# Self managed node group - windows
+output "self_mng_windows_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_windows_no_op.user_data)
+}
+
+output "self_mng_windows_bootstrap" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_windows_bootstrap.user_data)
+}
+
+output "self_mng_windows_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_windows_custom_template.user_data)
+}
diff --git a/examples/user_data/templates/bottlerocket_custom.tpl b/examples/user_data/templates/bottlerocket_custom.tpl
new file mode 100644
index 0000000000..6c4d9434a7
--- /dev/null
+++ b/examples/user_data/templates/bottlerocket_custom.tpl
@@ -0,0 +1,7 @@
+# Custom user data template provided for rendering
+[settings.kubernetes]
+"cluster-name" = "${cluster_name}"
+"api-server" = "${cluster_endpoint}"
+"cluster-certificate" = "${cluster_auth_base64}"
+
+${bootstrap_extra_args~}
diff --git a/examples/user_data/templates/linux_custom.tpl b/examples/user_data/templates/linux_custom.tpl
new file mode 100644
index 0000000000..bfe21f117a
--- /dev/null
+++ b/examples/user_data/templates/linux_custom.tpl
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -ex
+
+${pre_bootstrap_user_data ~}
+
+# Custom user data template provided for rendering
+B64_CLUSTER_CA=${cluster_auth_base64}
+API_SERVER_URL=${cluster_endpoint}
+/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
+${post_bootstrap_user_data ~}
diff --git a/examples/user_data/templates/windows_custom.tpl b/examples/user_data/templates/windows_custom.tpl
new file mode 100644
index 0000000000..3c1ca7014a
--- /dev/null
+++ b/examples/user_data/templates/windows_custom.tpl
@@ -0,0 +1,10 @@
+# Custom user data template provided for rendering
+
+${pre_bootstrap_user_data ~}
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -APIServerEndpoint ${cluster_endpoint} -Base64ClusterCA ${cluster_auth_base64} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+${post_bootstrap_user_data ~}
+
diff --git a/examples/launch_templates/variables.tf b/examples/user_data/variables.tf
similarity index 100%
rename from examples/launch_templates/variables.tf
rename to examples/user_data/variables.tf
diff --git a/examples/user_data/versions.tf b/examples/user_data/versions.tf
new file mode 100644
index 0000000000..bfce6ae345
--- /dev/null
+++ b/examples/user_data/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.64"
+ }
+ }
+}
diff --git a/fargate.tf b/fargate.tf
deleted file mode 100644
index 5526e2eed0..0000000000
--- a/fargate.tf
+++ /dev/null
@@ -1,16 +0,0 @@
-module "fargate" {
- source = "./modules/fargate"
-
- create_eks = var.create_eks
- create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
-
- cluster_name = local.cluster_name
- fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
- permissions_boundary = var.permissions_boundary
- iam_path = var.iam_path
- subnets = coalescelist(var.fargate_subnets, var.subnets, [""])
-
- fargate_profiles = var.fargate_profiles
-
- tags = var.tags
-}
diff --git a/irsa.tf b/irsa.tf
deleted file mode 100644
index 5fc3dc8df7..0000000000
--- a/irsa.tf
+++ /dev/null
@@ -1,23 +0,0 @@
-# Enable IAM Roles for EKS Service-Accounts (IRSA).
-
-# The Root CA Thumbprint for an OpenID Connect Identity Provider is currently
-# Being passed as a default value which is the same for all regions and
-# Is valid until (Jun 28 17:39:16 2034 GMT).
-# https://crt.sh/?q=9E99A48A9960B14926BB7F3B02E22DA2B0AB7280
-# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
-# https://github.com/terraform-providers/terraform-provider-aws/issues/10104
-
-resource "aws_iam_openid_connect_provider" "oidc_provider" {
- count = var.enable_irsa && var.create_eks ? 1 : 0
-
- client_id_list = local.client_id_list
- thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
- url = local.cluster_oidc_issuer_url
-
- tags = merge(
- {
- Name = "${var.cluster_name}-eks-irsa"
- },
- var.tags
- )
-}
diff --git a/kubectl.tf b/kubectl.tf
deleted file mode 100644
index b5d6947855..0000000000
--- a/kubectl.tf
+++ /dev/null
@@ -1,8 +0,0 @@
-resource "local_file" "kubeconfig" {
- count = var.write_kubeconfig && var.create_eks ? 1 : 0
-
- content = local.kubeconfig
- filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
- file_permission = var.kubeconfig_file_permission
- directory_permission = "0755"
-}
diff --git a/locals.tf b/locals.tf
deleted file mode 100644
index 8e1b54e49c..0000000000
--- a/locals.tf
+++ /dev/null
@@ -1,263 +0,0 @@
-locals {
-
- # EKS Cluster
- cluster_id = coalescelist(aws_eks_cluster.this[*].id, [""])[0]
- cluster_arn = coalescelist(aws_eks_cluster.this[*].arn, [""])[0]
- cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
- cluster_endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
- cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
- cluster_oidc_issuer_url = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc[0].issuer, [""]))[0]
- cluster_primary_security_group_id = coalescelist(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, [""])[0]
-
- cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
- cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
- cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
-
- # Worker groups
- worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
-
- default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
- default_ami_id_linux = local.workers_group_defaults.ami_id != "" ? local.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0]
- default_ami_id_windows = local.workers_group_defaults.ami_id_windows != "" ? local.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0]
-
- worker_group_launch_configuration_count = length(var.worker_groups)
- worker_group_launch_template_count = length(var.worker_groups_launch_template)
-
- worker_groups_platforms = [for x in concat(var.worker_groups, var.worker_groups_launch_template) : try(x.platform, var.workers_group_defaults["platform"], var.default_platform)]
-
- worker_ami_name_filter = coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*")
- worker_ami_name_filter_windows = coalesce(var.worker_ami_name_filter_windows, "Windows_Server-2019-English-Core-EKS_Optimized-${coalesce(var.cluster_version, "cluster_version")}-*")
-
- ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
- sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
- client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
- policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
-
- workers_group_defaults_defaults = {
- name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
- tags = [] # A list of maps defining extra tags to be applied to the worker group autoscaling group and volumes.
- ami_id = "" # AMI ID for the eks linux based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
- ami_id_windows = "" # AMI ID for the eks windows based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
- asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group and changing its value will not affect the autoscaling group's desired capacity because the cluster-autoscaler manages up and down scaling of the nodes. Cluster-autoscaler add nodes when pods are in pending state and remove the nodes when they are not required by modifying the desired_capacity of the autoscaling group. Although an issue exists in which if the value of the asg_min_size is changed it modifies the value of asg_desired_capacity.
- asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
- asg_min_size = "1" # Minimum worker capacity in the autoscaling group. NOTE: Change in this paramater will affect the asg_desired_capacity, like changing its value to 2 will change asg_desired_capacity value to 2 but bringing back it to 1 will not affect the asg_desired_capacity.
- asg_force_delete = false # Enable forced deletion for the autoscaling group.
- asg_initial_lifecycle_hooks = [] # Initital lifecycle hook for the autoscaling group.
- default_cooldown = null # The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
- health_check_type = null # Controls how health checking is done. Valid values are "EC2" or "ELB".
- health_check_grace_period = null # Time in seconds after instance comes into service before checking health.
- instance_type = "m4.large" # Size of the workers instances.
- instance_store_virtual_name = "ephemeral0" # "virtual_name" of the instance store volume.
- spot_price = "" # Cost of spot instance.
- placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated".
- root_volume_size = "100" # root volume size of workers instances.
- root_volume_type = "gp2" # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
- root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
- root_volume_throughput = null # The amount of throughput to provision for a gp3 volume.
- key_name = "" # The key pair name that should be used for the instances in the autoscaling group
- pre_userdata = "" # userdata to pre-append to the default userdata.
- userdata_template_file = "" # alternate template to use for userdata
- userdata_template_extra_args = {} # Additional arguments to use when expanding the userdata template file
- bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI (Amazon Machine Image).
- additional_userdata = "" # userdata to append to the default userdata.
- ebs_optimized = true # sets whether to use ebs optimization on supported types.
- enable_monitoring = true # Enables/disables detailed monitoring.
- enclave_support = false # Enables/disables enclave support
- public_ip = false # Associate a public ip address with a worker
- kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints.
- subnets = var.subnets # A list of subnets to place the worker nodes in. i.e. ["subnet-123", "subnet-456", "subnet-789"]
- additional_security_group_ids = [] # A list of additional security group ids to include in worker launch config
- protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
- iam_instance_profile_name = "" # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
- iam_role_id = "local.default_iam_role_id" # A custom IAM role id. Incompatible with iam_instance_profile_name. Literal local.default_iam_role_id will never be used but if iam_role_id is not set, the local.default_iam_role_id interpolation will be used.
- suspended_processes = ["AZRebalance"] # A list of processes to suspend. i.e. ["AZRebalance", "HealthCheck", "ReplaceUnhealthy"]
- target_group_arns = null # A list of Application LoadBalancer (ALB) target group ARNs to be associated to the autoscaling group
- load_balancers = null # A list of Classic LoadBalancer (CLB)'s name to be associated to the autoscaling group
- enabled_metrics = [] # A list of metrics to be collected i.e. ["GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity"]
- placement_group = null # The name of the placement group into which to launch the instances, if any.
- service_linked_role_arn = "" # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS
- termination_policies = [] # A list of policies to decide how the instances in the auto scale group should be terminated.
- platform = var.default_platform # Platform of workers. Either "linux" or "windows".
- additional_ebs_volumes = [] # A list of additional volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), volume_size, volume_type, iops, throughput, encrypted, kms_key_id (only on launch-template), delete_on_termination, snapshot_id. Optional values are grabbed from root volume or from defaults
- additional_instance_store_volumes = [] # A list of additional instance store (local disk) volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), virtual_name.
- warm_pool = null # If this block is configured, add a Warm Pool to the specified Auto Scaling group.
- timeouts = {} # A map of timeouts for create/update/delete operations
- snapshot_id = null # A custom snapshot ID.
-
- # Settings for launch templates
- root_block_device_name = concat(data.aws_ami.eks_worker.*.root_device_name, [""])[0] # Root device name for Linux workers. If not provided, will assume default Linux AMI was used.
- root_block_device_name_windows = concat(data.aws_ami.eks_worker_windows.*.root_device_name, [""])[0] # Root device name for Windows workers. If not provided, will assume default Windows AMI was used.
- root_kms_key_id = "" # The KMS key to use when encrypting the root storage device
- launch_template_id = null # The id of the launch template used for managed node_groups
- launch_template_version = "$Latest" # The latest version of the launch template to use in the autoscaling group
- update_default_version = false # Update the autoscaling group launch template's default version upon each update
- launch_template_placement_tenancy = "default" # The placement tenancy for instances
- launch_template_placement_group = null # The name of the placement group into which to launch the instances, if any.
- root_encrypted = false # Whether the volume should be encrypted or not
- eni_delete = true # Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying)
- interface_type = null # The type of network interface. To create an Elastic Fabric Adapter (EFA), specify 'efa'.
- cpu_credits = "standard" # T2/T3 unlimited mode, can be 'standard' or 'unlimited'. Used 'standard' mode as default to avoid paying higher costs
- market_type = null
- metadata_http_endpoint = "enabled" # The state of the metadata service: enabled, disabled.
- metadata_http_tokens = "optional" # If session tokens are required: optional, required.
- metadata_http_put_response_hop_limit = null # The desired HTTP PUT response hop limit for instance metadata requests.
- # Settings for launch templates with mixed instances policy
- override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] # A list of override instance types for mixed instances policy
- on_demand_allocation_strategy = null # Strategy to use when launching on-demand instances. Valid values: prioritized.
- on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
- on_demand_percentage_above_base_capacity = "0" # Percentage split between on-demand and Spot instances above the base on-demand capacity
- spot_allocation_strategy = "lowest-price" # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
- spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
- spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
- max_instance_lifetime = 0 # Maximum number of seconds instances can run in the ASG. 0 is unlimited.
- elastic_inference_accelerator = null # Type of elastic inference accelerator to be attached. Example values are eia1.medium, eia2.large, etc.
- instance_refresh_enabled = false # Enable instance refresh for the worker autoscaling group.
- instance_refresh_strategy = "Rolling" # Strategy to use for instance refresh. Default is 'Rolling' which the only valid value.
- instance_refresh_min_healthy_percentage = 90 # The amount of capacity in the ASG that must remain healthy during an instance refresh, as a percentage of the ASG's desired capacity.
- instance_refresh_instance_warmup = null # The number of seconds until a newly launched instance is configured and ready to use. Defaults to the ASG's health check grace period.
- instance_refresh_triggers = [] # Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy.
- capacity_rebalance = false # Enable capacity rebalance
- }
-
- workers_group_defaults = merge(
- local.workers_group_defaults_defaults,
- var.workers_group_defaults,
- )
-
- ebs_optimized_not_supported = [
- "c1.medium",
- "c3.8xlarge",
- "c3.large",
- "c5d.12xlarge",
- "c5d.24xlarge",
- "c5d.metal",
- "cc2.8xlarge",
- "cr1.8xlarge",
- "g2.8xlarge",
- "g4dn.metal",
- "hs1.8xlarge",
- "i2.8xlarge",
- "m1.medium",
- "m1.small",
- "m2.xlarge",
- "m3.large",
- "m3.medium",
- "m5ad.16xlarge",
- "m5ad.8xlarge",
- "m5dn.metal",
- "m5n.metal",
- "r3.8xlarge",
- "r3.large",
- "r5ad.16xlarge",
- "r5ad.8xlarge",
- "r5dn.metal",
- "r5n.metal",
- "t1.micro",
- "t2.2xlarge",
- "t2.large",
- "t2.medium",
- "t2.micro",
- "t2.nano",
- "t2.small",
- "t2.xlarge"
- ]
-
- kubeconfig = var.create_eks ? templatefile("${path.module}/templates/kubeconfig.tpl", {
- kubeconfig_name = coalesce(var.kubeconfig_name, "eks_${var.cluster_name}")
- endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
- aws_authenticator_kubeconfig_apiversion = var.kubeconfig_api_version
- aws_authenticator_command = var.kubeconfig_aws_authenticator_command
- aws_authenticator_command_args = coalescelist(var.kubeconfig_aws_authenticator_command_args, ["token", "-i", local.cluster_name])
- aws_authenticator_additional_args = var.kubeconfig_aws_authenticator_additional_args
- aws_authenticator_env_variables = var.kubeconfig_aws_authenticator_env_variables
- }) : ""
-
- launch_configuration_userdata_rendered = [
- for index in range(var.create_eks ? local.worker_group_launch_configuration_count : 0) : templatefile(
- lookup(
- var.worker_groups[index],
- "userdata_template_file",
- lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"]) == "windows"
- ? "${path.module}/templates/userdata_windows.tpl"
- : "${path.module}/templates/userdata.sh.tpl"
- ),
- merge({
- platform = lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"])
- cluster_name = local.cluster_name
- endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
- pre_userdata = lookup(
- var.worker_groups[index],
- "pre_userdata",
- local.workers_group_defaults["pre_userdata"],
- )
- additional_userdata = lookup(
- var.worker_groups[index],
- "additional_userdata",
- local.workers_group_defaults["additional_userdata"],
- )
- bootstrap_extra_args = lookup(
- var.worker_groups[index],
- "bootstrap_extra_args",
- local.workers_group_defaults["bootstrap_extra_args"],
- )
- kubelet_extra_args = lookup(
- var.worker_groups[index],
- "kubelet_extra_args",
- local.workers_group_defaults["kubelet_extra_args"],
- )
- },
- lookup(
- var.worker_groups[index],
- "userdata_template_extra_args",
- local.workers_group_defaults["userdata_template_extra_args"]
- )
- )
- )
- ]
-
- launch_template_userdata_rendered = [
- for index in range(var.create_eks ? local.worker_group_launch_template_count : 0) : templatefile(
- lookup(
- var.worker_groups_launch_template[index],
- "userdata_template_file",
- lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"]) == "windows"
- ? "${path.module}/templates/userdata_windows.tpl"
- : "${path.module}/templates/userdata.sh.tpl"
- ),
- merge({
- platform = lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"])
- cluster_name = local.cluster_name
- endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
- pre_userdata = lookup(
- var.worker_groups_launch_template[index],
- "pre_userdata",
- local.workers_group_defaults["pre_userdata"],
- )
- additional_userdata = lookup(
- var.worker_groups_launch_template[index],
- "additional_userdata",
- local.workers_group_defaults["additional_userdata"],
- )
- bootstrap_extra_args = lookup(
- var.worker_groups_launch_template[index],
- "bootstrap_extra_args",
- local.workers_group_defaults["bootstrap_extra_args"],
- )
- kubelet_extra_args = lookup(
- var.worker_groups_launch_template[index],
- "kubelet_extra_args",
- local.workers_group_defaults["kubelet_extra_args"],
- )
- },
- lookup(
- var.worker_groups_launch_template[index],
- "userdata_template_extra_args",
- local.workers_group_defaults["userdata_template_extra_args"]
- )
- )
- )
- ]
-}
diff --git a/main.tf b/main.tf
index d1860f0696..286e23b5a4 100644
--- a/main.tf
+++ b/main.tf
@@ -1,24 +1,20 @@
-resource "aws_cloudwatch_log_group" "this" {
- count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
+data "aws_partition" "current" {}
- name = "/aws/eks/${var.cluster_name}/cluster"
- retention_in_days = var.cluster_log_retention_in_days
- kms_key_id = var.cluster_log_kms_key_id
-
- tags = var.tags
-}
+################################################################################
+# Cluster
+################################################################################
resource "aws_eks_cluster" "this" {
- count = var.create_eks ? 1 : 0
+ count = var.create ? 1 : 0
name = var.cluster_name
- enabled_cluster_log_types = var.cluster_enabled_log_types
- role_arn = local.cluster_iam_role_arn
+ role_arn = try(aws_iam_role.this[0].arn, var.iam_role_arn)
version = var.cluster_version
+ enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
- security_group_ids = compact([local.cluster_security_group_id])
- subnet_ids = var.subnets
+ security_group_ids = distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))
+ subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
@@ -45,188 +41,221 @@ resource "aws_eks_cluster" "this" {
)
timeouts {
- create = var.cluster_create_timeout
- delete = var.cluster_delete_timeout
- update = var.cluster_update_timeout
+ create = lookup(var.cluster_timeouts, "create", null)
+ delete = lookup(var.cluster_timeouts, "update", null)
+ update = lookup(var.cluster_timeouts, "delete", null)
}
depends_on = [
- aws_security_group_rule.cluster_egress_internet,
- aws_security_group_rule.cluster_https_worker_ingress,
- aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
- aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
- aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy,
+ aws_iam_role_policy_attachment.this,
+ aws_security_group_rule.cluster,
+ aws_security_group_rule.node,
aws_cloudwatch_log_group.this
]
}
+resource "aws_cloudwatch_log_group" "this" {
+ count = var.create && var.create_cloudwatch_log_group ? 1 : 0
+
+ name = "/aws/eks/${var.cluster_name}/cluster"
+ retention_in_days = var.cloudwatch_log_group_retention_in_days
+ kms_key_id = var.cloudwatch_log_group_kms_key_id
+
+ tags = var.tags
+}
+
+################################################################################
+# Cluster Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
+################################################################################
+
+locals {
+ cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
+ create_cluster_sg = var.create && var.create_cluster_security_group
+
+ cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
+
+ cluster_security_group_rules = {
+ ingress_nodes_443 = {
+ description = "Node groups to cluster API"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+ source_node_security_group = true
+ }
+ egress_nodes_443 = {
+ description = "Cluster API to node groups"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "egress"
+ source_node_security_group = true
+ }
+ egress_nodes_kubelet = {
+ description = "Cluster API to node kubelets"
+ protocol = "tcp"
+ from_port = 10250
+ to_port = 10250
+ type = "egress"
+ source_node_security_group = true
+ }
+ }
+}
+
resource "aws_security_group" "cluster" {
- count = var.cluster_create_security_group && var.create_eks ? 1 : 0
+ count = local.create_cluster_sg ? 1 : 0
- name_prefix = var.cluster_name
- description = "EKS cluster security group."
+ name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
+ name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}-" : null
+ description = var.cluster_security_group_description
vpc_id = var.vpc_id
tags = merge(
var.tags,
- {
- "Name" = "${var.cluster_name}-eks_cluster_sg"
- },
+ { "Name" = local.cluster_sg_name },
+ var.cluster_security_group_tags
)
}
-resource "aws_security_group_rule" "cluster_egress_internet" {
- count = var.cluster_create_security_group && var.create_eks ? 1 : 0
-
- description = "Allow cluster egress access to the Internet."
- protocol = "-1"
- security_group_id = local.cluster_security_group_id
- cidr_blocks = var.cluster_egress_cidrs
- from_port = 0
- to_port = 0
- type = "egress"
-}
-
-resource "aws_security_group_rule" "cluster_https_worker_ingress" {
- count = var.cluster_create_security_group && var.create_eks && var.worker_create_security_group ? 1 : 0
-
- description = "Allow pods to communicate with the EKS cluster API."
- protocol = "tcp"
- security_group_id = local.cluster_security_group_id
- source_security_group_id = local.worker_security_group_id
- from_port = 443
- to_port = 443
- type = "ingress"
+resource "aws_security_group_rule" "cluster" {
+ for_each = local.create_cluster_sg ? merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : {}
+
+ # Required
+ security_group_id = aws_security_group.cluster[0].id
+ protocol = each.value.protocol
+ from_port = each.value.from_port
+ to_port = each.value.to_port
+ type = each.value.type
+
+ # Optional
+ description = try(each.value.description, null)
+ cidr_blocks = try(each.value.cidr_blocks, null)
+ ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
+ prefix_list_ids = try(each.value.prefix_list_ids, [])
+ self = try(each.value.self, null)
+ source_security_group_id = try(
+ each.value.source_security_group_id,
+ try(each.value.source_node_security_group, false) ? local.node_security_group_id : null
+ )
}
-resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
- for_each = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
+################################################################################
+# IRSA
+# Note - this is different from EKS identity provider
+################################################################################
- description = "Allow private K8S API ingress from custom CIDR source."
- type = "ingress"
- from_port = 443
- to_port = 443
- protocol = "tcp"
- cidr_blocks = [each.value]
+data "tls_certificate" "this" {
+ count = var.create && var.enable_irsa ? 1 : 0
- security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+ url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
}
-resource "aws_security_group_rule" "cluster_private_access_sg_source" {
- count = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
+resource "aws_iam_openid_connect_provider" "oidc_provider" {
+ count = var.create && var.enable_irsa ? 1 : 0
- description = "Allow private K8S API ingress from custom Security Groups source."
- type = "ingress"
- from_port = 443
- to_port = 443
- protocol = "tcp"
- source_security_group_id = var.cluster_endpoint_private_access_sg[count.index]
+ client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences)))
+ thumbprint_list = [data.tls_certificate.this[0].certificates[0].sha1_fingerprint]
+ url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
- security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+ tags = merge(
+ { Name = "${var.cluster_name}-eks-irsa" },
+ var.tags
+ )
}
-resource "aws_iam_role" "cluster" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-
- name_prefix = var.cluster_iam_role_name != "" ? null : var.cluster_name
- name = var.cluster_iam_role_name != "" ? var.cluster_iam_role_name : null
- assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
- permissions_boundary = var.permissions_boundary
- path = var.iam_path
- force_detach_policies = true
+################################################################################
+# IAM Role
+################################################################################
- tags = var.tags
+locals {
+ iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster")
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy"
- role = local.cluster_iam_role_name
-}
+data "aws_iam_policy_document" "assume_role_policy" {
+ count = var.create && var.create_iam_role ? 1 : 0
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ statement {
+ sid = "EKSClusterAssumeRole"
+ actions = ["sts:AssumeRole"]
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy"
- role = local.cluster_iam_role_name
+ principals {
+ type = "Service"
+ identifiers = ["eks.amazonaws.com"]
+ }
+ }
}
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceControllerPolicy" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+resource "aws_iam_role" "this" {
+ count = var.create && var.create_iam_role ? 1 : 0
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController"
- role = local.cluster_iam_role_name
-}
-
-/*
- Adding a policy to cluster IAM role that allow permissions
- required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
-*/
+ name = var.iam_role_use_name_prefix ? null : local.iam_role_name
+ name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+ path = var.iam_role_path
+ description = var.iam_role_description
-data "aws_iam_policy_document" "cluster_elb_sl_role_creation" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
+ permissions_boundary = var.iam_role_permissions_boundary
+ force_detach_policies = true
- statement {
- effect = "Allow"
- actions = [
- "ec2:DescribeAccountAttributes",
- "ec2:DescribeInternetGateways",
- "ec2:DescribeAddresses"
- ]
- resources = ["*"]
- }
+ tags = merge(var.tags, var.iam_role_tags)
}
-resource "aws_iam_policy" "cluster_elb_sl_role_creation" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
+resource "aws_iam_role_policy_attachment" "this" {
+ for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
+ "${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
+ "${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
+ ], var.iam_role_additional_policies)))) : toset([])
- name_prefix = "${var.cluster_name}-elb-sl-role-creation"
- description = "Permissions for EKS to create AWSServiceRoleForElasticLoadBalancing service-linked role"
- policy = data.aws_iam_policy_document.cluster_elb_sl_role_creation[0].json
- path = var.iam_path
-
- tags = var.tags
+ policy_arn = each.value
+ role = aws_iam_role.this[0].name
}
-resource "aws_iam_role_policy_attachment" "cluster_elb_sl_role_creation" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+################################################################################
+# EKS Addons
+################################################################################
- policy_arn = aws_iam_policy.cluster_elb_sl_role_creation[0].arn
- role = local.cluster_iam_role_name
-}
+resource "aws_eks_addon" "this" {
+ for_each = { for k, v in var.cluster_addons : k => v if var.create }
-/*
- Adding a policy to cluster IAM role that deny permissions to logs:CreateLogGroup
- it is not needed since we create the log group ourselve in this module, and it is causing trouble during cleanup/deletion
-*/
+ cluster_name = aws_eks_cluster.this[0].name
+ addon_name = try(each.value.name, each.key)
-data "aws_iam_policy_document" "cluster_deny_log_group" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ addon_version = lookup(each.value, "addon_version", null)
+ resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
+ service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
- statement {
- effect = "Deny"
- actions = [
- "logs:CreateLogGroup"
+ lifecycle {
+ ignore_changes = [
+ modified_at
]
- resources = ["*"]
}
-}
-
-resource "aws_iam_policy" "cluster_deny_log_group" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-
- name_prefix = "${var.cluster_name}-deny-log-group"
- description = "Deny CreateLogGroup"
- policy = data.aws_iam_policy_document.cluster_deny_log_group[0].json
- path = var.iam_path
tags = var.tags
}
-resource "aws_iam_role_policy_attachment" "cluster_deny_log_group" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+################################################################################
+# EKS Identity Provider
+# Note - this is different from IRSA
+################################################################################
+
+resource "aws_eks_identity_provider_config" "this" {
+ for_each = { for k, v in var.cluster_identity_providers : k => v if var.create }
+
+ cluster_name = aws_eks_cluster.this[0].name
+
+ oidc {
+ client_id = each.value.client_id
+ groups_claim = lookup(each.value, "groups_claim", null)
+ groups_prefix = lookup(each.value, "groups_prefix", null)
+ identity_provider_config_name = try(each.value.identity_provider_config_name, each.key)
+ issuer_url = each.value.issuer_url
+ required_claims = lookup(each.value, "required_claims", null)
+ username_claim = lookup(each.value, "username_claim", null)
+ username_prefix = lookup(each.value, "username_prefix", null)
+ }
- policy_arn = aws_iam_policy.cluster_deny_log_group[0].arn
- role = local.cluster_iam_role_name
+ tags = var.tags
}
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
new file mode 100644
index 0000000000..f61500517e
--- /dev/null
+++ b/modules/_user_data/README.md
@@ -0,0 +1,123 @@
+# Internal User Data Module
+
+Configuration in this directory renders the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible as well as providing a means for out of bands testing and validation.
+
+See the [`examples/user_data/` directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) for various examples of using the module.
+
+## Combinations
+
+At a high level, AWS EKS users have two methods for launching nodes within this EKS module (ignoring Fargate profiles):
+
+1. EKS managed node group
+2. Self managed node group
+
+### EKS Managed Node Group
+
+When using an EKS managed node group, users have 2 primary routes for interacting with the bootstrap user data:
+
+1. If the EKS managed node group does **NOT** utilize a custom AMI, then users can elect to supply additional user data that is pre-pended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
+
+ - Users can use the following variables to facilitate this process:
+
+ ```hcl
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ ```
+
+2. If the EKS managed node group does utilize a custom AMI, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary bootstrap configuration via user data to ensure that the node is configured to register with the cluster when launched. There are two routes that users can utilize to facilitate this bootstrapping process:
+ - If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post bootstrap user data as well as bootstrap additional args that are supplied to the [AWS EKS bootstrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
+ - Users can use the following variables to facilitate this process:
+ ```hcl
+ enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+ - If the AMI is not an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - Users can use the following variables to facilitate this process:
+ ```hcl
+ user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+
+| ℹ️ When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstrap_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI bootstrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `bootstrap_extra_args` variable. |
+| :--- |
+
+### Self Managed Node Group
+
+When using a self managed node group, the options presented to users is very similar to the 2nd option listed above for EKS managed node groups. Since self managed node groups require users to provide the bootstrap user data, there is no concept of appending to user data that AWS provides; users can either elect to use the user data template provided for their platform/OS by the module or provide their own user data template for rendering by the module.
+
+- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post bootstrap user data as well as bootstrap additional args that are supplied to the [AWS EKS bootstrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
+ - Users can use the following variables to facilitate this process:
+ ```hcl
+ enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+- If the AMI is not an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - Users can use the following variables to facilitate this process:
+ ```hcl
+ user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+
+### Logic Diagram
+
+The rough flow of logic that is encapsulated within the `_user_data` internal module can be represented by the following diagram to better highlight the various manners in which user data can be populated.
+
+
+
+
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [cloudinit](#provider\_cloudinit) | >= 2.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [cloudinit_config.linux_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
+| [create](#input\_create) | Determines whether to create user-data or not | `bool` | `true` | no |
+| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
+| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no |
+| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
+| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
+| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
+| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [user\_data](#output\_user\_data) | Base64 encoded user data rendered for the provided inputs |
+
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
new file mode 100644
index 0000000000..1d265bdb54
--- /dev/null
+++ b/modules/_user_data/main.tf
@@ -0,0 +1,78 @@
+
+locals {
+ int_linux_default_user_data = var.create && var.platform == "linux" && (var.enable_bootstrap_user_data || var.user_data_template_path != "") ? base64encode(templatefile(
+ coalesce(var.user_data_template_path, "${path.module}/../../templates/linux_user_data.tpl"),
+ {
+ # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+ enable_bootstrap_user_data = var.enable_bootstrap_user_data
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional
+ cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr != null ? var.cluster_service_ipv4_cidr : ""
+ bootstrap_extra_args = var.bootstrap_extra_args
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ }
+ )) : ""
+ platform = {
+ bottlerocket = {
+ user_data = var.create && var.platform == "bottlerocket" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.bootstrap_extra_args != "") ? base64encode(templatefile(
+ coalesce(var.user_data_template_path, "${path.module}/../../templates/bottlerocket_user_data.tpl"),
+ {
+ # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+ enable_bootstrap_user_data = var.enable_bootstrap_user_data
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional - is appended if using EKS managed node group without custom AMI
+ # cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/bottlerocket-os/bottlerocket/issues/1866
+ bootstrap_extra_args = var.bootstrap_extra_args
+ }
+ )) : ""
+ }
+ linux = {
+ user_data = try(data.cloudinit_config.linux_eks_managed_node_group[0].rendered, local.int_linux_default_user_data)
+
+ }
+ windows = {
+ user_data = var.create && var.platform == "windows" && var.enable_bootstrap_user_data ? base64encode(templatefile(
+ coalesce(var.user_data_template_path, "${path.module}/../../templates/windows_user_data.tpl"),
+ {
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional - is appended if using EKS managed node group without custom AMI
+ # cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/awslabs/amazon-eks-ami/issues/805
+ bootstrap_extra_args = var.bootstrap_extra_args
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ }
+ )) : ""
+ }
+ }
+}
+
+# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
+# An important note is that user data must in MIME multi-part archive format,
+# as by default, EKS will merge the bootstrapping command required for nodes to join the
+# cluster with your user data. If you use a custom AMI in your launch template,
+# this merging will NOT happen and you are responsible for nodes joining the cluster.
+# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
+
+data "cloudinit_config" "linux_eks_managed_node_group" {
+ count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0
+
+ base64_encode = true
+ gzip = false
+ boundary = "//"
+
+ # Prepend to existing user data suppled by AWS EKS
+ part {
+ content_type = "text/x-shellscript"
+ content = var.pre_bootstrap_user_data
+ }
+}
diff --git a/modules/_user_data/outputs.tf b/modules/_user_data/outputs.tf
new file mode 100644
index 0000000000..c2a569b05b
--- /dev/null
+++ b/modules/_user_data/outputs.tf
@@ -0,0 +1,4 @@
+output "user_data" {
+ description = "Base64 encoded user data rendered for the provided inputs"
+ value = try(local.platform[var.platform].user_data, "")
+}
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
new file mode 100644
index 0000000000..232e1e883e
--- /dev/null
+++ b/modules/_user_data/variables.tf
@@ -0,0 +1,71 @@
+variable "create" {
+ description = "Determines whether to create user-data or not"
+ type = bool
+ default = true
+}
+
+variable "platform" {
+ description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based"
+ type = string
+ default = "linux"
+}
+
+variable "enable_bootstrap_user_data" {
+ description = "Determines whether the bootstrap configurations are populated within the user data template"
+ type = bool
+ default = false
+}
+
+variable "is_eks_managed_node_group" {
+ description = "Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not"
+ type = bool
+ default = true
+}
+
+variable "cluster_name" {
+ description = "Name of the EKS cluster"
+ type = string
+ default = ""
+}
+
+variable "cluster_endpoint" {
+ description = "Endpoint of associated EKS cluster"
+ type = string
+ default = ""
+}
+
+variable "cluster_auth_base64" {
+ description = "Base64 encoded CA of associated EKS cluster"
+ type = string
+ default = ""
+}
+
+variable "cluster_service_ipv4_cidr" {
+ description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks"
+ type = string
+ default = null
+}
+
+variable "pre_bootstrap_user_data" {
+ description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
+ type = string
+ default = ""
+}
+
+variable "post_bootstrap_user_data" {
+ description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
+ type = string
+ default = ""
+}
+
+variable "bootstrap_extra_args" {
+ description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
+ type = string
+ default = ""
+}
+
+variable "user_data_template_path" {
+ description = "Path to a local, custom user data template file to use when rendering user data"
+ type = string
+ default = ""
+}
diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf
new file mode 100644
index 0000000000..e293dc67ce
--- /dev/null
+++ b/modules/_user_data/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ cloudinit = {
+ source = "hashicorp/cloudinit"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
new file mode 100644
index 0000000000..43e4a72bc3
--- /dev/null
+++ b/modules/eks-managed-node-group/README.md
@@ -0,0 +1,175 @@
+# EKS Managed Node Group Module
+
+Configuration in this directory creates an EKS Managed Node Group along with an IAM role, security group, and launch template
+
+## Usage
+
+```hcl
+module "eks_managed_node_group" {
+ source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group"
+
+ name = "separate-eks-mng"
+ cluster_name = "my-cluster"
+ cluster_version = "1.21"
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = {
+ dedicated = {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ }
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+```
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [user\_data](#module\_user\_data) | ../_user_data | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance. If not supplied, EKS will use its own default image | `string` | `""` | no |
+| [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no |
+| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no |
+| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no |
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
+| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
+| [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
+| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
+| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no |
+| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
+| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
+| [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no |
+| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
+| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20` | `number` | `null` | no |
+| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no |
+| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
+| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
+| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
+| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no |
+| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
+| [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `null` | no |
+| [instance\_types](#input\_instance\_types) | Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]` | `list(string)` | `null` | no |
+| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
+| [key\_name](#input\_key\_name) | The key name that should be used for the instance(s) | `string` | `null` | no |
+| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
+| [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default version of the launch template | `string` | `null` | no |
+| [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no |
+| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `""` | no |
+| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
+| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
+| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
+| [max\_size](#input\_max\_size) | Maximum number of instances/nodes | `number` | `3` | no |
+| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
| `[]` | no |
-| subnets | List of subnet IDs. Will replace the root module subnets. | `list(string)` | `var.subnets` | no |
-| timeouts | A map of timeouts for create/delete operations. | `map(string)` | Provider default behavior | no |
-| tags | Key-value map of resource tags. Will be merged with root module tags. | `map(string)` | `var.tags` | no |
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource |
-| [aws_iam_role.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_policy_document.eks_fargate_pod_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_role.custom_fargate_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
-| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. | `string` | `""` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created. | `bool` | `true` | no |
-| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no |
-| [iam\_path](#input\_iam\_path) | IAM roles will be created on this path. | `string` | `"/"` | no |
-| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| [subnets](#input\_subnets) | A list of subnets for the EKS Fargate profiles. | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources. | `map(string)` | `{}` | no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
-| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| [iam\_role\_arn](#output\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
-| [iam\_role\_name](#output\_iam\_role\_name) | IAM role name for EKS Fargate pods |
-
diff --git a/modules/fargate/main.tf b/modules/fargate/main.tf
deleted file mode 100644
index a4e4b0d80f..0000000000
--- a/modules/fargate/main.tf
+++ /dev/null
@@ -1,72 +0,0 @@
-locals {
- create_eks = var.create_eks && length(var.fargate_profiles) > 0
-
- pod_execution_role_arn = coalescelist(aws_iam_role.eks_fargate_pod.*.arn, data.aws_iam_role.custom_fargate_iam_role.*.arn, [""])[0]
- pod_execution_role_name = coalescelist(aws_iam_role.eks_fargate_pod.*.name, data.aws_iam_role.custom_fargate_iam_role.*.name, [""])[0]
-
- fargate_profiles = { for k, v in var.fargate_profiles : k => v if var.create_eks }
-}
-
-data "aws_partition" "current" {}
-
-data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
- count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
-
- statement {
- effect = "Allow"
- actions = ["sts:AssumeRole"]
-
- principals {
- type = "Service"
- identifiers = ["eks-fargate-pods.amazonaws.com"]
- }
- }
-}
-
-data "aws_iam_role" "custom_fargate_iam_role" {
- count = local.create_eks && !var.create_fargate_pod_execution_role ? 1 : 0
-
- name = var.fargate_pod_execution_role_name
-}
-
-resource "aws_iam_role" "eks_fargate_pod" {
- count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
-
- name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24))
- assume_role_policy = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json
- permissions_boundary = var.permissions_boundary
- tags = var.tags
- path = var.iam_path
-}
-
-resource "aws_iam_role_policy_attachment" "eks_fargate_pod" {
- count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
-
- policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"
- role = aws_iam_role.eks_fargate_pod[0].name
-}
-
-resource "aws_eks_fargate_profile" "this" {
- for_each = local.fargate_profiles
-
- cluster_name = var.cluster_name
- fargate_profile_name = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-")))
- pod_execution_role_arn = local.pod_execution_role_arn
- subnet_ids = lookup(each.value, "subnets", var.subnets)
-
- dynamic "selector" {
- for_each = each.value.selectors
-
- content {
- namespace = selector.value["namespace"]
- labels = lookup(selector.value, "labels", {})
- }
- }
-
- timeouts {
- create = try(each.value["timeouts"].create, null)
- delete = try(each.value["timeouts"].delete, null)
- }
-
- tags = merge(var.tags, lookup(each.value, "tags", {}))
-}
diff --git a/modules/fargate/outputs.tf b/modules/fargate/outputs.tf
deleted file mode 100644
index 126ba6e385..0000000000
--- a/modules/fargate/outputs.tf
+++ /dev/null
@@ -1,29 +0,0 @@
-output "fargate_profile_ids" {
- description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)."
- value = [for f in aws_eks_fargate_profile.this : f.id]
-}
-
-output "fargate_profile_arns" {
- description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles."
- value = [for f in aws_eks_fargate_profile.this : f.arn]
-}
-
-output "iam_role_name" {
- description = "IAM role name for EKS Fargate pods"
- value = local.pod_execution_role_name
-}
-
-output "iam_role_arn" {
- description = "IAM role ARN for EKS Fargate pods"
- value = local.pod_execution_role_arn
-}
-
-output "aws_auth_roles" {
- description = "Roles for use in aws-auth ConfigMap"
- value = [
- for i in range(1) : {
- worker_role_arn = local.pod_execution_role_arn
- platform = "fargate"
- } if local.create_eks
- ]
-}
diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf
deleted file mode 100644
index 39e2cc68b3..0000000000
--- a/modules/fargate/variables.tf
+++ /dev/null
@@ -1,53 +0,0 @@
-variable "create_eks" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
- type = bool
- default = true
-}
-
-variable "create_fargate_pod_execution_role" {
- description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created."
- type = bool
- default = true
-}
-
-variable "cluster_name" {
- description = "Name of the EKS cluster."
- type = string
- default = ""
-}
-
-variable "iam_path" {
- description = "IAM roles will be created on this path."
- type = string
- default = "/"
-}
-
-variable "fargate_pod_execution_role_name" {
- description = "The IAM Role that provides permissions for the EKS Fargate Profile."
- type = string
- default = null
-}
-
-variable "fargate_profiles" {
- description = "Fargate profiles to create. See `fargate_profile` keys section in README.md for more details"
- type = any
- default = {}
-}
-
-variable "permissions_boundary" {
- description = "If provided, all IAM roles will be created with this permissions boundary attached."
- type = string
- default = null
-}
-
-variable "subnets" {
- description = "A list of subnets for the EKS Fargate profiles."
- type = list(string)
- default = []
-}
-
-variable "tags" {
- description = "A map of tags to add to all resources."
- type = map(string)
- default = {}
-}
diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md
deleted file mode 100644
index f91ce04ae0..0000000000
--- a/modules/node_groups/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# EKS `node_groups` submodule
-
-Helper submodule to create and manage resources related to `eks_node_groups`.
-
-## Node Groups' IAM Role
-
-The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module.
-
-`iam_role_arn` must be specified in either `var.node_groups_defaults` or `var.node_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent.
-
-## `node_groups` and `node_groups_defaults` keys
-`node_groups_defaults` is a map that can take the below keys. Values will be used if not specified in individual node groups.
-
-`node_groups` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_node_group` name. Inner map can take the below values.
-
-| Name | Description | Type | If unset |
-|------|-------------|:----:|:-----:|
-| additional\_tags | Additional tags to apply to node group | map(string) | Only `var.tags` applied |
-| ami\_release\_version | AMI version of workers | string | Provider default behavior |
-| ami\_type | AMI Type. See Terraform or AWS docs | string | Provider default behavior |
-| ami\_id | ID of custom AMI. If you use a custom AMI, you need to set `ami_is_eks_optimized` | string | Provider default behavior |
-| ami\_is\_eks\_optimized | If the custom AMI is an EKS optimised image, ignored if `ami_id` is not set. If this is `true` then `bootstrap.sh` is called automatically (max pod logic needs to be manually set), if this is `false` you need to provide all the node configuration in `pre_userdata` | bool | `true` |
-| capacity\_type | Type of instance capacity to provision. Options are `ON_DEMAND` and `SPOT` | string | Provider default behavior |
-| create_launch_template | Create and use a default launch template | bool | `false` |
-| desired\_capacity | Desired number of workers | number | `var.workers_group_defaults[asg_desired_capacity]` |
-| disk\_encrypted | Whether the root disk will be encrypyted. Requires `create_launch_template` to be `true` and `disk_kms_key_id` to be set | bool | false |
-| disk\_kms\_key\_id | KMS Key used to encrypt the root disk. Requires both `create_launch_template` and `disk_encrypted` to be `true` | string | "" |
-| disk\_size | Workers' disk size | number | Provider default behavior |
-| disk\_type | Workers' disk type. Require `create_launch_template` to be `true`| string | Provider default behavior |
-| disk\_throughput | Workers' disk throughput. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
-| disk\_iops | Workers' disk IOPS. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
-| ebs\_optimized | Enables/disables EBS optimization. Require `create_launch_template` to be `true` | bool | `true` if defined `instance\_types` are not present in `var.ebs\_optimized\_not\_supported` |
-| enable_monitoring | Enables/disables detailed monitoring. Require `create_launch_template` to be `true`| bool | `true` |
-| eni_delete | Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying) | bool | `true` |
-| force\_update\_version | Force version update if existing pods are unable to be drained due to a pod disruption budget issue. | bool | Provider default behavior |
-| iam\_role\_arn | IAM role ARN for workers | string | `var.default_iam_role_arn` |
-| instance\_types | Node group's instance type(s). Multiple types can be specified when `capacity_type="SPOT"`. | list | `[var.workers_group_defaults[instance_type]]` |
-| k8s\_labels | Kubernetes labels | map(string) | No labels applied |
-| key\_name | Key name for workers. Set to empty string to disable remote access | string | `var.workers_group_defaults[key_name]` |
-| bootstrap_env | Provide environment variables to customise [bootstrap.sh](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh). Require `create_launch_template` to be `true` | map(string) | `{}` |
-| kubelet_extra_args | Extra arguments for kubelet, this is automatically merged with `labels`. Require `create_launch_template` to be `true` | string | "" |
-| launch_template_id | The id of a aws_launch_template to use | string | No LT used |
-| launch\_template_version | The version of the LT to use | string | none |
-| max\_capacity | Max number of workers | number | `var.workers_group_defaults[asg_max_size]` |
-| min\_capacity | Min number of workers | number | `var.workers_group_defaults[asg_min_size]` |
-| update_config.max\_unavailable\_percentage | Max percentage of unavailable nodes during update. (e.g. 25, 50, etc) | number | `null` if `update_config.max_unavailable` is set |
-| update_config.max\_unavailable | Max number of unavailable nodes during update | number | `null` if `update_config.max_unavailable_percentage` is set |
-| name | Name of the node group. If you don't really need this, we recommend you to use `name_prefix` instead. | string | Will use the autogenerate name prefix |
-| name_prefix | Name prefix of the node group | string | Auto generated |
-| pre_userdata | userdata to pre-append to the default userdata. Require `create_launch_template` to be `true`| string | "" |
-| public_ip | Associate a public ip address with a worker. Require `create_launch_template` to be `true`| string | `false`
-| source\_security\_group\_ids | Source security groups for remote access to workers | list(string) | If key\_name is specified: THE REMOTE ACCESS WILL BE OPENED TO THE WORLD |
-| subnets | Subnets to contain workers | list(string) | `var.workers_group_defaults[subnets]` |
-| version | Kubernetes version | string | Provider default behavior |
-| taints | Kubernetes node taints | list(map) | empty |
-| timeouts | A map of timeouts for create/update/delete operations. | `map(string)` | Provider default behavior |
-| update_default_version | Whether or not to set the new launch template version the Default | bool | `true` |
-| metadata_http_endpoint | The state of the instance metadata service. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_endpoint]` |
-| metadata_http_tokens | If session tokens are required. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_tokens]` |
-| metadata_http_put_response_hop_limit | The desired HTTP PUT response hop limit for instance metadata requests. Requires `create_launch_template` to be `true` | number | `var.workers_group_defaults[metadata_http_put_response_hop_limit]` |
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
-| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
-| [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no |
-| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
-| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
-
diff --git a/modules/node_groups/launch_template.tf b/modules/node_groups/launch_template.tf
deleted file mode 100644
index 6abe358d5a..0000000000
--- a/modules/node_groups/launch_template.tf
+++ /dev/null
@@ -1,146 +0,0 @@
-data "cloudinit_config" "workers_userdata" {
- for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
-
- gzip = false
- base64_encode = true
- boundary = "//"
-
- part {
- content_type = "text/x-shellscript"
- content = templatefile("${path.module}/templates/userdata.sh.tpl",
- {
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- ami_id = lookup(each.value, "ami_id", "")
- ami_is_eks_optimized = each.value["ami_is_eks_optimized"]
- bootstrap_env = each.value["bootstrap_env"]
- kubelet_extra_args = each.value["kubelet_extra_args"]
- pre_userdata = each.value["pre_userdata"]
- capacity_type = lookup(each.value, "capacity_type", "ON_DEMAND")
- append_labels = length(lookup(each.value, "k8s_labels", {})) > 0 ? ",${join(",", [for k, v in lookup(each.value, "k8s_labels", {}) : "${k}=${v}"])}" : ""
- }
- )
- }
-}
-
-# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
-# there are several more options one could set but you probably dont need to modify them
-# you can take the default and add your custom AMI and/or custom tags
-#
-# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-# then the default user-data for bootstrapping a cluster is merged in the copy.
-resource "aws_launch_template" "workers" {
- for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
-
- name_prefix = local.node_groups_names[each.key]
- description = format("EKS Managed Node Group custom LT for %s", local.node_groups_names[each.key])
- update_default_version = lookup(each.value, "update_default_version", true)
-
- block_device_mappings {
- device_name = "/dev/xvda"
-
- ebs {
- volume_size = lookup(each.value, "disk_size", null)
- volume_type = lookup(each.value, "disk_type", null)
- iops = lookup(each.value, "disk_iops", null)
- throughput = lookup(each.value, "disk_throughput", null)
- encrypted = lookup(each.value, "disk_encrypted", null)
- kms_key_id = lookup(each.value, "disk_kms_key_id", null)
- delete_on_termination = true
- }
- }
-
- ebs_optimized = lookup(each.value, "ebs_optimized", !contains(var.ebs_optimized_not_supported, element(each.value.instance_types, 0)))
-
- instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
-
- monitoring {
- enabled = lookup(each.value, "enable_monitoring", null)
- }
-
- network_interfaces {
- associate_public_ip_address = lookup(each.value, "public_ip", null)
- delete_on_termination = lookup(each.value, "eni_delete", null)
- security_groups = compact(flatten([
- var.worker_security_group_id,
- var.worker_additional_security_group_ids,
- lookup(
- each.value,
- "additional_security_group_ids",
- null,
- ),
- ]))
- }
-
- # if you want to use a custom AMI
- image_id = lookup(each.value, "ami_id", null)
-
- # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
- # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
- #
- # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
-
- user_data = data.cloudinit_config.workers_userdata[each.key].rendered
-
- key_name = lookup(each.value, "key_name", null)
-
- metadata_options {
- http_endpoint = lookup(each.value, "metadata_http_endpoint", null)
- http_tokens = lookup(each.value, "metadata_http_tokens", null)
- http_put_response_hop_limit = lookup(each.value, "metadata_http_put_response_hop_limit", null)
- }
-
- # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "instance"
-
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
- }
-
- # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
- tag_specifications {
- resource_type = "volume"
-
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
- }
-
- # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "network-interface"
-
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
- }
-
- # Tag the LT itself
- tags = merge(
- var.tags,
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {}),
- )
-
- lifecycle {
- create_before_destroy = true
- }
-}
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
deleted file mode 100644
index 0a6c7cbffb..0000000000
--- a/modules/node_groups/locals.tf
+++ /dev/null
@@ -1,51 +0,0 @@
-locals {
- # Merge defaults and per-group values to make code cleaner
- node_groups_expanded = { for k, v in var.node_groups : k => merge(
- {
- desired_capacity = var.workers_group_defaults["asg_desired_capacity"]
- iam_role_arn = var.default_iam_role_arn
- instance_types = [var.workers_group_defaults["instance_type"]]
- key_name = var.workers_group_defaults["key_name"]
- launch_template_id = var.workers_group_defaults["launch_template_id"]
- launch_template_version = var.workers_group_defaults["launch_template_version"]
- set_instance_types_on_lt = false
- max_capacity = var.workers_group_defaults["asg_max_size"]
- min_capacity = var.workers_group_defaults["asg_min_size"]
- subnets = var.workers_group_defaults["subnets"]
- create_launch_template = false
- bootstrap_env = {}
- kubelet_extra_args = var.workers_group_defaults["kubelet_extra_args"]
- disk_size = var.workers_group_defaults["root_volume_size"]
- disk_type = var.workers_group_defaults["root_volume_type"]
- disk_iops = var.workers_group_defaults["root_iops"]
- disk_throughput = var.workers_group_defaults["root_volume_throughput"]
- disk_encrypted = var.workers_group_defaults["root_encrypted"]
- disk_kms_key_id = var.workers_group_defaults["root_kms_key_id"]
- enable_monitoring = var.workers_group_defaults["enable_monitoring"]
- eni_delete = var.workers_group_defaults["eni_delete"]
- public_ip = var.workers_group_defaults["public_ip"]
- pre_userdata = var.workers_group_defaults["pre_userdata"]
- additional_security_group_ids = var.workers_group_defaults["additional_security_group_ids"]
- taints = []
- timeouts = var.workers_group_defaults["timeouts"]
- update_default_version = true
- ebs_optimized = null
- metadata_http_endpoint = var.workers_group_defaults["metadata_http_endpoint"]
- metadata_http_tokens = var.workers_group_defaults["metadata_http_tokens"]
- metadata_http_put_response_hop_limit = var.workers_group_defaults["metadata_http_put_response_hop_limit"]
- ami_is_eks_optimized = true
- },
- var.node_groups_defaults,
- v,
- ) if var.create_eks }
-
- node_groups_names = { for k, v in local.node_groups_expanded : k => lookup(
- v,
- "name",
- lookup(
- v,
- "name_prefix",
- join("-", [var.cluster_name, k])
- )
- ) }
-}
diff --git a/modules/node_groups/main.tf b/modules/node_groups/main.tf
deleted file mode 100644
index 75e6209730..0000000000
--- a/modules/node_groups/main.tf
+++ /dev/null
@@ -1,105 +0,0 @@
-resource "aws_eks_node_group" "workers" {
- for_each = local.node_groups_expanded
-
- node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
- node_group_name = lookup(each.value, "name", null)
-
- cluster_name = var.cluster_name
- node_role_arn = each.value["iam_role_arn"]
- subnet_ids = each.value["subnets"]
-
- scaling_config {
- desired_size = each.value["desired_capacity"]
- max_size = each.value["max_capacity"]
- min_size = each.value["min_capacity"]
- }
-
- ami_type = lookup(each.value, "ami_type", null)
- disk_size = each.value["launch_template_id"] != null || each.value["create_launch_template"] ? null : lookup(each.value, "disk_size", null)
- instance_types = !each.value["set_instance_types_on_lt"] ? each.value["instance_types"] : null
- release_version = lookup(each.value, "ami_release_version", null)
- capacity_type = lookup(each.value, "capacity_type", null)
- force_update_version = lookup(each.value, "force_update_version", null)
-
- dynamic "remote_access" {
- for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
- ec2_ssh_key = each.value["key_name"]
- source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
- }] : []
-
- content {
- ec2_ssh_key = remote_access.value["ec2_ssh_key"]
- source_security_group_ids = remote_access.value["source_security_group_ids"]
- }
- }
-
- dynamic "launch_template" {
- for_each = each.value["launch_template_id"] != null ? [{
- id = each.value["launch_template_id"]
- version = each.value["launch_template_version"]
- }] : []
-
- content {
- id = launch_template.value["id"]
- version = launch_template.value["version"]
- }
- }
-
- dynamic "launch_template" {
- for_each = each.value["launch_template_id"] == null && each.value["create_launch_template"] ? [{
- id = aws_launch_template.workers[each.key].id
- version = each.value["launch_template_version"] == "$Latest" ? aws_launch_template.workers[each.key].latest_version : (
- each.value["launch_template_version"] == "$Default" ? aws_launch_template.workers[each.key].default_version : each.value["launch_template_version"]
- )
- }] : []
-
- content {
- id = launch_template.value["id"]
- version = launch_template.value["version"]
- }
- }
-
- dynamic "taint" {
- for_each = each.value["taints"]
-
- content {
- key = taint.value["key"]
- value = taint.value["value"]
- effect = taint.value["effect"]
- }
- }
-
- dynamic "update_config" {
- for_each = try(each.value.update_config.max_unavailable_percentage > 0, each.value.update_config.max_unavailable > 0, false) ? [true] : []
-
- content {
- max_unavailable_percentage = try(each.value.update_config.max_unavailable_percentage, null)
- max_unavailable = try(each.value.update_config.max_unavailable, null)
- }
- }
-
- timeouts {
- create = lookup(each.value["timeouts"], "create", null)
- update = lookup(each.value["timeouts"], "update", null)
- delete = lookup(each.value["timeouts"], "delete", null)
- }
-
- version = lookup(each.value, "version", null)
-
- labels = merge(
- lookup(var.node_groups_defaults, "k8s_labels", {}),
- lookup(var.node_groups[each.key], "k8s_labels", {})
- )
-
- tags = merge(
- var.tags,
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {}),
- )
-
- lifecycle {
- create_before_destroy = true
- ignore_changes = [scaling_config[0].desired_size]
- }
-
-}
diff --git a/modules/node_groups/outputs.tf b/modules/node_groups/outputs.tf
deleted file mode 100644
index ad148ea514..0000000000
--- a/modules/node_groups/outputs.tf
+++ /dev/null
@@ -1,14 +0,0 @@
-output "node_groups" {
- description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
- value = aws_eks_node_group.workers
-}
-
-output "aws_auth_roles" {
- description = "Roles for use in aws-auth ConfigMap"
- value = [
- for k, v in local.node_groups_expanded : {
- worker_role_arn = lookup(v, "iam_role_arn", var.default_iam_role_arn)
- platform = "linux"
- }
- ]
-}
diff --git a/modules/node_groups/templates/userdata.sh.tpl b/modules/node_groups/templates/userdata.sh.tpl
deleted file mode 100644
index 321c17b427..0000000000
--- a/modules/node_groups/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash -e
-%{ if length(ami_id) == 0 ~}
-
-# Set bootstrap env
-printf '#!/bin/bash
-%{ for k, v in bootstrap_env ~}
-export ${k}="${v}"
-%{ endfor ~}
-export ADDITIONAL_KUBELET_EXTRA_ARGS="${kubelet_extra_args}"
-' > /etc/profile.d/eks-bootstrap-env.sh
-
-# Source extra environment variables in bootstrap script
-sed -i '/^set -o errexit/a\\nsource /etc/profile.d/eks-bootstrap-env.sh' /etc/eks/bootstrap.sh
-
-# Merge ADDITIONAL_KUBELET_EXTRA_ARGS into KUBELET_EXTRA_ARGS
-sed -i 's/^KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-}/KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-} $${ADDITIONAL_KUBELET_EXTRA_ARGS}/' /etc/eks/bootstrap.sh
-%{else ~}
-
-# Set variables for custom AMI
-API_SERVER_URL=${cluster_endpoint}
-B64_CLUSTER_CA=${cluster_auth_base64}
-%{ for k, v in bootstrap_env ~}
-${k}="${v}"
-%{ endfor ~}
-KUBELET_EXTRA_ARGS='--node-labels=eks.amazonaws.com/nodegroup-image=${ami_id},eks.amazonaws.com/capacityType=${capacity_type}${append_labels} ${kubelet_extra_args}'
-%{endif ~}
-
-# User supplied pre userdata
-${pre_userdata}
-%{ if length(ami_id) > 0 && ami_is_eks_optimized ~}
-
-# Call bootstrap for EKS optimised custom AMI
-/etc/eks/bootstrap.sh ${cluster_name} --apiserver-endpoint "$${API_SERVER_URL}" --b64-cluster-ca "$${B64_CLUSTER_CA}" --kubelet-extra-args "$${KUBELET_EXTRA_ARGS}"
-%{ endif ~}
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
deleted file mode 100644
index 1aa8cfe26d..0000000000
--- a/modules/node_groups/variables.tf
+++ /dev/null
@@ -1,71 +0,0 @@
-variable "create_eks" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
- type = bool
- default = true
-}
-
-variable "cluster_name" {
- description = "Name of parent cluster"
- type = string
- default = ""
-}
-
-variable "cluster_endpoint" {
- description = "Endpoint of parent cluster"
- type = string
- default = ""
-}
-
-variable "cluster_auth_base64" {
- description = "Base64 encoded CA of parent cluster"
- type = string
- default = ""
-}
-
-variable "default_iam_role_arn" {
- description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`"
- type = string
- default = ""
-}
-
-variable "workers_group_defaults" {
- description = "Workers group defaults from parent"
- type = any
- default = {}
-}
-
-variable "worker_security_group_id" {
- description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
- type = string
- default = ""
-}
-
-variable "worker_additional_security_group_ids" {
- description = "A list of additional security group ids to attach to worker instances"
- type = list(string)
- default = []
-}
-
-variable "tags" {
- description = "A map of tags to add to all resources"
- type = map(string)
- default = {}
-}
-
-variable "node_groups_defaults" {
- description = "map of maps of node groups to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
- type = any
- default = {}
-}
-
-variable "node_groups" {
- description = "Map of maps of `eks_node_groups` to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
- type = any
- default = {}
-}
-
-variable "ebs_optimized_not_supported" {
- description = "List of instance types that do not support EBS optimization"
- type = list(string)
- default = []
-}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
new file mode 100644
index 0000000000..10a3068825
--- /dev/null
+++ b/modules/self-managed-node-group/README.md
@@ -0,0 +1,199 @@
+# Self Managed Node Group Module
+
+Configuration in this directory creates a Self Managed Node Group (AutoScaling Group) along with an IAM role, security group, and launch template
+
+## Usage
+
+```hcl
+module "self_managed_node_group" {
+ source = "terraform-aws-modules/eks/aws//modules/self-managed-node-group"
+
+ name = "separate-self-mng"
+ cluster_name = "my-cluster"
+ cluster_version = "1.21"
+ cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+ cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+ vpc_security_group_ids = [
+ # cluster_security_group_id,
+ ]
+
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ launch_template_name = "separate-self-mng"
+ instance_type = "m5.large"
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+```
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [user\_data](#module\_user\_data) | ../_user_data | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
+| [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource |
+| [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance | `string` | `""` | no |
+| [availability\_zones](#input\_availability\_zones) | A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids` | `list(string)` | `null` | no |
+| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no |
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
+| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
+| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no |
+| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
+| [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no |
+| [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
+| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
+| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
+| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
+| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
+| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
+| [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no |
+| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
+| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
+| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
+| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
+| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no |
+| [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `null` | no |
+| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
+| [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no |
+| [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no |
+| [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
+| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
+| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
+| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no |
+| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
+| [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `null` | no |
+| [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` | `null` | no |
+| [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no |
+| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
+| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default Version of the launch template | `string` | `null` | no |
+| [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no |
+| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
+| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
+| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
+| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
+| [max\_instance\_lifetime](#input\_max\_instance\_lifetime) | The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds | `number` | `null` | no |
+| [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `3` | no |
+| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |