diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 145baf94e4..93900a8e02 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.83.5 + rev: v1.86.0 hooks: - id: terraform_fmt - id: terraform_validate @@ -27,3 +27,4 @@ repos: hooks: - id: check-merge-conflict - id: end-of-file-fixer + - id: trailing-whitespace diff --git a/README.md b/README.md index bd15fcbae5..1039fa6c82 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,6 @@ Terraform module which creates AWS EKS (Kubernetes) resources - [Frequently Asked Questions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md) - [Compute Resources](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md) -- [IRSA Integration](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/irsa_integration.md) - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md) - [Network Connectivity](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/network_connectivity.md) - Upgrade Guides @@ -27,58 +26,17 @@ Please note that we strive to provide a comprehensive suite of documentation for The examples provided under `examples/` provide a comprehensive suite of configurations that demonstrate nearly all of the possible different configurations and settings that can be used with this module. However, these examples are not representative of clusters that you would normally find in use for production workloads. For reference architectures that utilize this module, please see the following: - [EKS Reference Architecture](https://github.com/clowdhaus/eks-reference-architecture) - -## Available Features - -- AWS EKS Cluster Addons -- AWS EKS Identity Provider Configuration -- [AWS EKS on Outposts support](https://aws.amazon.com/blogs/aws/deploy-your-amazon-eks-clusters-locally-on-aws-outposts/) -- All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported: - - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) - - [Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) -- Support for creating Karpenter related AWS infrastructure resources (e.g. IAM roles, SQS queue, EventBridge rules, etc.) -- Support for custom AMI, custom launch template, and custom user data including custom user data template -- Support for Amazon Linux 2 EKS Optimized AMI and Bottlerocket nodes - - Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes -- Support for module created security group, bring your own security groups, as well as adding additional security group rules to the module created security group(s) -- Support for creating node groups/profiles separate from the cluster through the use of sub-modules (same as what is used by root module) -- Support for node group/profile "default" settings - useful for when creating multiple node groups/Fargate profiles where you want to set a common set of configurations once, and then individually control only select features on certain node groups/profiles - -### [IRSA Terraform Module](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) - -An IAM role for service accounts (IRSA) sub-module has been created to make deploying common addons/controllers easier. Instead of users having to create a custom IAM role with the necessary federated role assumption required for IRSA plus find and craft the associated policy required for the addon/controller, users can create the IRSA role and policy with a few lines of code. See the [`terraform-aws-iam/examples/iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/examples/iam-role-for-service-accounts-eks/main.tf) directory for examples on how to use the IRSA sub-module in conjunction with this (`terraform-aws-eks`) module. - -Some of the addon/controller policies that are currently supported include: - -- [Cert-Manager](https://cert-manager.io/docs/configuration/acme/dns01/route53/#set-up-an-iam-role) -- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) -- [EBS CSI Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json) -- [EFS CSI Driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json) -- [External DNS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) -- [External Secrets](https://github.com/external-secrets/kubernetes-external-secrets#add-a-secret) -- [FSx for Lustre CSI Driver](https://github.com/kubernetes-sigs/aws-fsx-csi-driver/blob/master/docs/README.md) -- [Karpenter](https://github.com/aws/karpenter/blob/main/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml) -- [Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/install/iam_policy.json) - - [Load Balancer Controller Target Group Binding Only](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/#iam-permission-subset-for-those-who-use-targetgroupbinding-only-and-dont-plan-to-use-the-aws-load-balancer-controller-to-manage-security-group-rules) -- [App Mesh Controller](https://github.com/aws/aws-app-mesh-controller-for-k8s/blob/master/config/iam/controller-iam-policy.json) - - [App Mesh Envoy Proxy](https://raw.githubusercontent.com/aws/aws-app-mesh-controller-for-k8s/master/config/iam/envoy-iam-policy.json) -- [Managed Service for Prometheus](https://docs.aws.amazon.com/prometheus/latest/userguide/set-up-irsa.html) -- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler#5-create-an-iam-role-for-the-pods) -- [Velero](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) -- [VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html) - -See [terraform-aws-iam/modules/iam-role-for-service-accounts](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) for current list of supported addon/controller policies as more are added to the project. +- [EKS Blueprints](https://github.com/aws-ia/terraform-aws-eks-blueprints) ## Usage ```hcl module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.0" + version = "~> 20.0" cluster_name = "my-cluster" - cluster_version = "1.27" + cluster_version = "1.29" cluster_endpoint_public_access = true @@ -98,51 +56,13 @@ module "eks" { subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] control_plane_subnet_ids = ["subnet-xyzde987", "subnet-slkjf456", "subnet-qeiru789"] - # Self Managed Node Group(s) - self_managed_node_group_defaults = { - instance_type = "m6i.large" - update_launch_template_default_version = true - iam_role_additional_policies = { - AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" - } - } - - self_managed_node_groups = { - one = { - name = "mixed-1" - max_size = 5 - desired_size = 2 - - use_mixed_instances_policy = true - mixed_instances_policy = { - instances_distribution = { - on_demand_base_capacity = 0 - on_demand_percentage_above_base_capacity = 10 - spot_allocation_strategy = "capacity-optimized" - } - - override = [ - { - instance_type = "m5.large" - weighted_capacity = "1" - }, - { - instance_type = "m6i.large" - weighted_capacity = "2" - }, - ] - } - } - } - # EKS Managed Node Group(s) eks_managed_node_group_defaults = { instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] } eks_managed_node_groups = { - blue = {} - green = { + example = { min_size = 1 max_size = 10 desired_size = 1 @@ -152,47 +72,6 @@ module "eks" { } } - # Fargate Profile(s) - fargate_profiles = { - default = { - name = "default" - selectors = [ - { - namespace = "default" - } - ] - } - } - - # aws-auth configmap - manage_aws_auth_configmap = true - - aws_auth_roles = [ - { - rolearn = "arn:aws:iam::66666666666:role/role1" - username = "role1" - groups = ["system:masters"] - }, - ] - - aws_auth_users = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] - - aws_auth_accounts = [ - "777777777777", - "888888888888", - ] - tags = { Environment = "dev" Terraform = "true" @@ -202,7 +81,6 @@ module "eks" { ## Examples -- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations - [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups - [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) - [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for intelligent data plane management @@ -222,9 +100,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | | [time](#requirement\_time) | >= 0.9 | | [tls](#requirement\_tls) | >= 3.0 | @@ -232,8 +109,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | -| [kubernetes](#provider\_kubernetes) | >= 2.10 | +| [aws](#provider\_aws) | >= 5.34 | | [time](#provider\_time) | >= 0.9 | | [tls](#provider\_tls) | >= 3.0 | @@ -252,6 +128,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple |------|------| | [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | | [aws_ec2_tag.cluster_primary_security_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_tag) | resource | +| [aws_eks_access_entry.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | +| [aws_eks_access_policy_association.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_policy_association) | resource | | [aws_eks_addon.before_compute](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource | | [aws_eks_addon.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource | | [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource | @@ -267,8 +145,6 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | -| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | -| [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | | [time_sleep.this](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | @@ -282,13 +158,10 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [access\_entries](#input\_access\_entries) | Map of access entries to add to the cluster | `any` | `{}` | no | | [attach\_cluster\_encryption\_policy](#input\_attach\_cluster\_encryption\_policy) | Indicates whether or not to attach an additional policy for the cluster IAM role to utilize the encryption key provided | `bool` | `true` | no | -| [aws\_auth\_accounts](#input\_aws\_auth\_accounts) | List of account maps to add to the aws-auth configmap | `list(any)` | `[]` | no | -| [aws\_auth\_fargate\_profile\_pod\_execution\_role\_arns](#input\_aws\_auth\_fargate\_profile\_pod\_execution\_role\_arns) | List of Fargate profile pod execution role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no | -| [aws\_auth\_node\_iam\_role\_arns\_non\_windows](#input\_aws\_auth\_node\_iam\_role\_arns\_non\_windows) | List of non-Windows based node IAM role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no | -| [aws\_auth\_node\_iam\_role\_arns\_windows](#input\_aws\_auth\_node\_iam\_role\_arns\_windows) | List of Windows based node IAM role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no | -| [aws\_auth\_roles](#input\_aws\_auth\_roles) | List of role maps to add to the aws-auth configmap | `list(any)` | `[]` | no | -| [aws\_auth\_users](#input\_aws\_auth\_users) | List of user maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [authentication\_mode](#input\_authentication\_mode) | The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` | `string` | `"API_AND_CONFIG_MAP"` | no | +| [cloudwatch\_log\_group\_class](#input\_cloudwatch\_log\_group\_class) | Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS` | `string` | `null` | no | | [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no | | [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no | | [cloudwatch\_log\_group\_tags](#input\_cloudwatch\_log\_group\_tags) | A map of additional tags to add to the cloudwatch log group created | `map(string)` | `{}` | no | @@ -305,7 +178,6 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `true` | no | | [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `false` | no | | [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[
"0.0.0.0/0"
]
| no | -| [cluster\_iam\_role\_dns\_suffix](#input\_cluster\_iam\_role\_dns\_suffix) | Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China) | `string` | `null` | no | | [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no | | [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `null` | no | | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no | @@ -321,8 +193,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.27`) | `string` | `null` | no | | [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane | `list(string)` | `[]` | no | -| [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no | -| [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | +| [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no | | [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no | | [create\_cluster\_primary\_security\_group\_tags](#input\_create\_cluster\_primary\_security\_group\_tags) | Indicates whether or not to tag the cluster's primary security group. This security group is created by the EKS service, not the module, and therefore tagging is handled after cluster creation | `bool` | `true` | no | | [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default | `bool` | `true` | no | @@ -334,8 +205,9 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [dataplane\_wait\_duration](#input\_dataplane\_wait\_duration) | Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed nodegroup(s), self-managed nodegroup(s), Fargate profile(s)) | `string` | `"30s"` | no | | [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no | | [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no | +| [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry | `bool` | `false` | no | | [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no | -| [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled. Defaults to `true` | `bool` | `true` | no | +| [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled | `bool` | `true` | no | | [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no | | [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | @@ -351,13 +223,12 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [kms\_key\_aliases](#input\_kms\_key\_aliases) | A list of aliases to create. Note - due to the use of `toset()`, values must be static strings and not computed values | `list(string)` | `[]` | no | | [kms\_key\_deletion\_window\_in\_days](#input\_kms\_key\_deletion\_window\_in\_days) | The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. If you specify a value, it must be between `7` and `30`, inclusive. If you do not specify a value, it defaults to `30` | `number` | `null` | no | | [kms\_key\_description](#input\_kms\_key\_description) | The description of the key as viewed in AWS console | `string` | `null` | no | -| [kms\_key\_enable\_default\_policy](#input\_kms\_key\_enable\_default\_policy) | Specifies whether to enable the default key policy. Defaults to `false` | `bool` | `false` | no | +| [kms\_key\_enable\_default\_policy](#input\_kms\_key\_enable\_default\_policy) | Specifies whether to enable the default key policy | `bool` | `true` | no | | [kms\_key\_override\_policy\_documents](#input\_kms\_key\_override\_policy\_documents) | List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid` | `list(string)` | `[]` | no | | [kms\_key\_owners](#input\_kms\_key\_owners) | A list of IAM ARNs for those who will have full key permissions (`kms:*`) | `list(string)` | `[]` | no | | [kms\_key\_service\_users](#input\_kms\_key\_service\_users) | A list of IAM ARNs for [key service users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-service-integration) | `list(string)` | `[]` | no | | [kms\_key\_source\_policy\_documents](#input\_kms\_key\_source\_policy\_documents) | List of IAM policy documents that are merged together into the exported document. Statements must have unique `sid`s | `list(string)` | `[]` | no | | [kms\_key\_users](#input\_kms\_key\_users) | A list of IAM ARNs for [key users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-users) | `list(string)` | `[]` | no | -| [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no | | [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no | | [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no | | [node\_security\_group\_enable\_recommended\_rules](#input\_node\_security\_group\_enable\_recommended\_rules) | Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic | `bool` | `true` | no | @@ -379,7 +250,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | [DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -416,7 +287,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple ## License -Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-rds-aurora/tree/master/LICENSE) for full details. +Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details. ## Additional information for users from Russia and Belarus diff --git a/docs/README.md b/docs/README.md index 889b8481e4..144826b4cf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,10 +4,10 @@ - [Frequently Asked Questions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md) - [Compute Resources](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md) -- [IRSA Integration](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/irsa_integration.md) - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md) - [Network Connectivity](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/network_connectivity.md) - Upgrade Guides - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md) - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md) - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md) + - [Upgrade to v20.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md) diff --git a/docs/UPGRADE-18.0.md b/docs/UPGRADE-18.0.md index 481e9fe021..3f25ca8e43 100644 --- a/docs/UPGRADE-18.0.md +++ b/docs/UPGRADE-18.0.md @@ -13,7 +13,7 @@ cluster_security_group_name = $CLUSTER_NAME cluster_security_group_description = "EKS cluster security group." ``` -This configuration assumes that [`create_iam_role`](https://github.com/terraform-aws-modules/terraform-aws-eks#input_create_iam_role) is set to `true`, which is the default value. +This configuration assumes that [`create_iam_role`](https://github.com/terraform-aws-modules/terraform-aws-eks#input_create_iam_role) is set to `true`, which is the default value. As the location of the Terraform state of the IAM role has been changed from 17.x to 18.x, you'll also have to move the state before running `terraform apply` by calling: diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 19bd5f3946..5bb74723f4 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -58,7 +58,7 @@ Please consult the `examples` directory for reference example configurations. If ### Variable and output changes 1. Removed variables: - + - `node_security_group_ntp_ipv4_cidr_block` - default security group settings have an egress rule for ALL to `0.0.0.0/0`/`::/0` - `node_security_group_ntp_ipv6_cidr_block` - default security group settings have an egress rule for ALL to `0.0.0.0/0`/`::/0` - Self-managed node groups: diff --git a/docs/UPGRADE-20.0.md b/docs/UPGRADE-20.0.md new file mode 100644 index 0000000000..72bdd57699 --- /dev/null +++ b/docs/UPGRADE-20.0.md @@ -0,0 +1,262 @@ +# Upgrade from v19.x to v20.x + +Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce. + +## List of backwards incompatible changes + +- Minium supported AWS provider version increased to `v5.34` +- Minimum supported Terraform version increased to `v1.3` to support Terraform state `moved` blocks as well as other advanced features +- The `resolve_conflicts` argument within the `cluster_addons` configuration has been replaced with `resolve_conflicts_on_create` and `resolve_conflicts_on_delete` now that `resolve_conflicts` is deprecated +- The default/fallback value for the `preserve` argument of `cluster_addons`is now set to `true`. This has shown to be useful for users deprovisioning clusters while avoiding the situation where the CNI is deleted too early and causes resources to be left orphaned resulting in conflicts. +- The Karpenter sub-module's use of the `irsa` naming convention has been removed, along with an update to the Karpenter controller IAM policy to align with Karpenter's `v1beta1`/`v0.32` changes. Instead of referring to the role as `irsa` or `pod_identity`, its simply just an IAM role used by the Karpenter controller and there is support for use with either IRSA and/or Pod Identity (default) at this time +- The `aws-auth` ConfigMap resources have been moved to a standalone sub-module. This removes the Kubernetes provider requirement from the main module and allows for the `aws-auth` ConfigMap to be managed independently of the main module. This sub-module will be removed entirely in the next major release. +- Support for cluster access management has been added with the default authentication mode set as `API_AND_CONFIG_MAP`. This is a one way change if applied; if you wish to use `CONFIG_MAP`, you will need to set `authentication_mode = "CONFIG_MAP"` explicitly when upgrading. +- Karpenter EventBridge rule key `spot_interrupt` updated to correct mis-spelling (was `spot_interupt`). This will cause the rule to be replaced + +### ⚠️ Upcoming Changes Planned in v21.0 ⚠️ + +To give users advanced notice and provide some future direction for this module, these are the following changes we will be looking to make in the next major release of this module: + +1. The `aws-auth` sub-module will be removed entirely from the project. Since this sub-module is captured in the v20.x releases, users can continue using it even after the module moves forward with the next major version. The long term strategy and direction is cluster access entry and to rely only on the AWS Terraform provider. +2. The default value for `authentication_mode` will change to `API`. Aligning with point 1 above, this is a one way change, but users are free to specify the value of their choosing in place of this default (when the change is made). This module will proceed with an EKS API first strategy. +3. The launch template and autoscaling group usage contained within the EKS managed nodegroup and self-managed nodegroup sub-modules *might be replaced with the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module. At minimum, it makes sense to replace most of functionality in the self-managed nodegroup module with this external module, but its not yet clear if there is any benefit of using it in the EKS managed nodegroup sub-module. The interface that users interact with will stay the same, the changes will be internal to the implementation and we will do everything we can to keep the disruption to a minimum. + +## Additional changes + +### Added + + - A module tag has been added to the cluster control plane + - Support for cluster access entries. The `bootstrap_cluster_creator_admin_permissions` setting on the control plane has been hardcoded to `false` since this operation is a one time operation only at cluster creation per the EKS API. Instead, users can enable/disable `enable_cluster_creator_admin_permissions` at any time to achieve the same functionality. This takes the identity that Terraform is using to make API calls and maps it into a cluster admin via an access entry. For users on existing clusters, you will need to remove the default cluster administrator that was created by EKS prior to the cluster access entry APIs - see the section [`Removing the default cluster administrator`](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for more details. + - Support for specifying the CloudWatch log group class (standard or infrequent access) + - Native support for Windows based managed nodegroups similar to AL2 and Bottlerocket + - Self-managed nodegroups now support `instance_maintenance_policy` and have added `max_healthy_percentage`, `scale_in_protected_instances`, and `standby_instances` arguments to the `instance_refresh.preferences` block + +### Modified + + - For `sts:AssumeRole` permissions by services, the use of dynamically looking up the DNS suffix has been replaced with the static value of `amazonaws.com`. This does not appear to change by partition and instead requires users to set this manually for non-commercial regions. + - The default value for `kms_key_enable_default_policy` has changed from `false` to `true` to align with the default behavior of the `aws_kms_key` resource + - The Karpenter default value for `create_instance_profile` has changed from `true` to `false` to align with the changes in Karpenter v0.32 + - The Karpenter variable `create_instance_profile` default value has changed from `true` to `false`. Starting with Karpenter `v0.32.0`, Karpenter accepts an IAM role and creates the EC2 instance profile used by the nodes + +### Removed + + - The `complete` example has been removed due to its redundancy with the other examples + - References to the IRSA sub-module in the IAM repository have been removed. Once https://github.com/clowdhaus/terraform-aws-eks-pod-identity has been updated and moved into the organization, the documentation here will be updated to mention the new module. + +### Variable and output changes + +1. Removed variables: + + - `cluster_iam_role_dns_suffix` - replaced with a static string of `amazonaws.com` + - `manage_aws_auth_configmap` + - `create_aws_auth_configmap` + - `aws_auth_node_iam_role_arns_non_windows` + - `aws_auth_node_iam_role_arns_windows` + - `aws_auth_fargate_profile_pod_execution_role_arn` + - `aws_auth_roles` + - `aws_auth_users` + - `aws_auth_accounts` + + - Karpenter + - `irsa_tag_key` + - `irsa_tag_values` + - `irsa_subnet_account_id` + - `enable_karpenter_instance_profile_creation` + +2. Renamed variables: + + - Karpenter + - `create_irsa` -> `create_iam_role` + - `irsa_name` -> `iam_role_name` + - `irsa_use_name_prefix` -> `iam_role_name_prefix` + - `irsa_path` -> `iam_role_path` + - `irsa_description` -> `iam_role_description` + - `irsa_max_session_duration` -> `iam_role_max_session_duration` + - `irsa_permissions_boundary_arn` -> `iam_role_permissions_boundary_arn` + - `irsa_tags` -> `iam_role_tags` + - `policies` -> `iam_role_policies` + - `irsa_policy_name` -> `iam_policy_name` + - `irsa_ssm_parameter_arns` -> `ami_id_ssm_parameter_arns` + - `create_iam_role` -> `create_node_iam_role` + - `iam_role_additional_policies` -> `node_iam_role_additional_policies` + - `policies` -> `iam_role_policies` + - `iam_role_arn` -> `node_iam_role_arn` + - `iam_role_name` -> `node_iam_role_name` + - `iam_role_name_prefix` -> `node_iam_role_name_prefix` + - `iam_role_path` -> `node_iam_role_path` + - `iam_role_description` -> `node_iam_role_description` + - `iam_role_max_session_duration` -> `node_iam_role_max_session_duration` + - `iam_role_permissions_boundary_arn` -> `node_iam_role_permissions_boundary_arn` + - `iam_role_attach_cni_policy` -> `node_iam_role_attach_cni_policy` + - `iam_role_additional_policies` -> `node_iam_role_additional_policies` + - `iam_role_tags` -> `node_iam_role_tags` + +3. Added variables: + + - `create_access_entry` + - `enable_cluster_creator_admin_permissions` + - `authentication_mode` + - `access_entries` + - `cloudwatch_log_group_class` + + - Karpenter + - `iam_policy_name` + - `iam_policy_use_name_prefix` + - `iam_policy_description` + - `iam_policy_path` + - `enable_irsa` + - `create_access_entry` + - `access_entry_type` + + - Self-managed nodegroup + - `instance_maintenance_policy` + - `create_access_entry` + - `iam_role_arn` + +4. Removed outputs: + + - `aws_auth_configmap_yaml` + +5. Renamed outputs: + + - Karpenter + - `irsa_name` -> `iam_role_name` + - `irsa_arn` -> `iam_role_arn` + - `irsa_unique_id` -> `iam_role_unique_id` + - `role_name` -> `node_iam_role_name` + - `role_arn` -> `node_iam_role_arn` + - `role_unique_id` -> `node_iam_role_unique_id` + +6. Added outputs: + + - `access_entries` + + - Karpenter + - `node_access_entry_arn` + + - Self-managed nodegroup + - `access_entry_arn` + +## Upgrade Migrations + +### Diff of Before (v19.21) vs After (v20.0) + +```diff + module "eks" { + source = "terraform-aws-modules/eks/aws" +- version = "~> 19.21" ++ version = "~> 20.0" + +# If you want to maintain the current default behavior of v19.x ++ kms_key_enable_default_policy = false + +- manage_aws_auth_configmap = true + +- aws_auth_roles = [ +- { +- rolearn = "arn:aws:iam::66666666666:role/role1" +- username = "role1" +- groups = ["custom-role-group"] +- }, +- ] + +- aws_auth_users = [ +- { +- userarn = "arn:aws:iam::66666666666:user/user1" +- username = "user1" +- groups = ["custom-users-group"] +- }, +- ] +} + ++ module "eks" { ++ source = "terraform-aws-modules/eks/aws//modules/aws-auth" ++ version = "~> 20.0" + ++ manage_aws_auth_configmap = true + ++ aws_auth_roles = [ ++ { ++ rolearn = "arn:aws:iam::66666666666:role/role1" ++ username = "role1" ++ groups = ["custom-role-group"] ++ }, ++ ] + ++ aws_auth_users = [ ++ { ++ userarn = "arn:aws:iam::66666666666:user/user1" ++ username = "user1" ++ groups = ["custom-users-group"] ++ }, ++ ] ++ } +``` + +### Karpenter Diff of Before (v19.21) vs After (v20.0) + +```diff + module "eks" { + source = "terraform-aws-modules/eks/aws//modules/karpenter" +- version = "~> 19.21" ++ version = "~> 20.0" + +# If you wish to maintain the current default behavior of v19.x ++ enable_irsa = true ++ create_instance_profile = true + +# To avoid any resource re-creation ++ iam_role_name = "KarpenterIRSA-${module.eks.cluster_name}" ++ iam_role_description = "Karpenter IAM role for service account" ++ iam_policy_name = "KarpenterIRSA-${module.eks.cluster_name}" ++ iam_policy_description = "Karpenter IAM role for service account" +} +``` + +## Terraform State Moves + +#### ⚠️ Authentication Mode Changes ⚠️ + +Changing the `authentication_mode` is a one-way decision. See [announcement blog](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for further details: + +> Switching authentication modes on an existing cluster is a one-way operation. You can switch from CONFIG_MAP to API_AND_CONFIG_MAP. You can then switch from API_AND_CONFIG_MAP to API. You cannot revert these operations in the opposite direction. Meaning you cannot switch back to CONFIG_MAP or API_AND_CONFIG_MAP from API. And you cannot switch back to CONFIG_MAP from API_AND_CONFIG_MAP. + +### authentication_mode = "CONFIG_MAP" + +If using `authentication_mode = "CONFIG_MAP"`, before making any changes, you will first need to remove the configmap from the statefile to avoid any disruptions: + +```sh +terraform state rm 'module.eks.kubernetes_config_map_v1_data.aws_auth[0]' +terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if Terraform created the original configmap +``` + +Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). + +#### ⚠️ Node IAM Roles + +You will need to add entries for any IAM roles used by nodegroups and/or Fargate profiles - the module no longer handles this in the background on behalf of users. + +When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. + +### authentication_mode = "API_AND_CONFIG_MAP" + +When using `authentication_mode = "API_AND_CONFIG_MAP"` and there are entries that will remain in the configmap (entries that cannot be replaced by cluster access entry), you will first need to update the `authentication_mode` on the cluster to `"API_AND_CONFIG_MAP"`. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed nodegroups and Fargate profiles; users do not need to do anything additional for these roles. + +Once the `authentication_mode` has been updated, next you will need to remove the configmap from the statefile to avoid any disruptions: + +```sh +terraform state rm 'module.eks.kubernetes_config_map_v1_data.aws_auth[0]' +terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if Terraform created the original configmap +``` + +#### ℹ️ Terraform 1.7+ users + +If you are using Terraform `v1.7+`, you can utilize the [`remove`](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) to facilitate both the removal of the configmap through code. You can create a fork/clone of the provided [migration module](https://github.com/clowdhaus/terraform-aws-eks-migrate-v19-to-v20) and add the `remove` blocks and apply those changes before proceeding. We do not want to force users onto the bleeding edge with this module, so we have not included `remove` support at this time. + +Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. In the before/example above - the configmap would remove any entries for roles used by nodegroups and/or Fargate Profiles, but maintain the custom entries for users and roles passed into the module definition. + +### authentication_mode = "API" + +In order to switch to `API` only using cluster access entry, you first need to update the `authentication_mode` on the cluster to `API_AND_CONFIG_MAP` without modifying the `aws-auth` configmap. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed nodegroups and Fargate profiles; users do not need to do anything additional for these roles. + +Once the `authentication_mode` has been updated, you can update the `authentication_mode` on the cluster to `API` and remove the `aws-auth` configmap components. diff --git a/docs/faq.md b/docs/faq.md index 3ca85e85e8..395c8c3130 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -57,14 +57,6 @@ If you require a public endpoint, setting up both (public and private) and restr The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block. The setting is ignored to allow autoscaling via controllers such as cluster autoscaler or Karpenter to work properly and without interference by Terraform. Changing the desired count must be handled outside of Terraform once the node group is created. -### How can I deploy Windows based nodes? - -To enable Windows support for your EKS cluster, you will need to apply some configuration manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support). - -In addition, Windows based nodes require an additional cluster RBAC role (`eks:kube-proxy-windows`). - -Note: Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes. - ### How do I access compute resource attributes? Examples of accessing the attributes of the compute resource(s) created by the root module are shown below. Note - the assumption is that your cluster module definition is named `eks` as in `module "eks" { ... }`: diff --git a/docs/irsa_integration.md b/docs/irsa_integration.md deleted file mode 100644 index cc6a549500..0000000000 --- a/docs/irsa_integration.md +++ /dev/null @@ -1,84 +0,0 @@ - -### IRSA Integration - -An [IAM role for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) module has been created to work in conjunction with this module. The [`iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) module has a set of pre-defined IAM policies for common addons. Check [`policy.tf`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/modules/iam-role-for-service-accounts-eks/policies.tf) for a list of the policies currently supported. One example of this integration is shown below, and more can be found in the [`iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/examples/iam-role-for-service-accounts-eks/main.tf) example directory: - -```hcl -module "eks" { - source = "terraform-aws-modules/eks/aws" - - cluster_name = "example" - cluster_version = "1.27" - - cluster_addons = { - vpc-cni = { - resolve_conflicts = "OVERWRITE" - service_account_role_arn = module.vpc_cni_irsa.iam_role_arn - } - } - - vpc_id = "vpc-1234556abcdef" - subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] - - eks_managed_node_group_defaults = { - # We are using the IRSA created below for permissions - # However, we have to provision a new cluster with the policy attached FIRST - # before we can disable. Without this initial policy, - # the VPC CNI fails to assign IPs and nodes cannot join the new cluster - iam_role_attach_cni_policy = true - } - - eks_managed_node_groups = { - default = {} - } - - tags = { - Environment = "dev" - Terraform = "true" - } -} - -module "vpc_cni_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "vpc_cni" - attach_vpc_cni_policy = true - vpc_cni_enable_ipv4 = true - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } - } - - tags = { - Environment = "dev" - Terraform = "true" - } -} - -module "karpenter_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "karpenter_controller" - attach_karpenter_controller_policy = true - - karpenter_controller_cluster_id = module.eks.cluster_id - karpenter_controller_node_iam_role_arns = [ - module.eks.eks_managed_node_groups["default"].iam_role_arn - ] - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["karpenter:karpenter"] - } - } - - tags = { - Environment = "dev" - Terraform = "true" - } -} -``` diff --git a/docs/user_data.md b/docs/user_data.md index 7b236944ff..87a28a2b47 100644 --- a/docs/user_data.md +++ b/docs/user_data.md @@ -8,6 +8,7 @@ Users can see the various methods of using and providing user data through the [ - By default, any supplied user data is pre-pended to the user data supplied by the EKS Managed Node Group service - If users supply an `ami_id`, the service no longers supplies user data to bootstrap nodes; users can enable `enable_bootstrap_user_data` and use the module provided user data template, or provide their own user data template - `bottlerocket` platform user data must be in TOML format + - `windows` platform user data must be in powershell/PS1 script format - Self Managed Node Groups - `linux` platform (default) -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template - `bottlerocket` platform -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template diff --git a/examples/complete/README.md b/examples/complete/README.md deleted file mode 100644 index 0fd43130da..0000000000 --- a/examples/complete/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Complete AWS EKS Cluster - -Configuration in this directory creates an AWS EKS cluster with a broad mix of various features and settings provided by this module: - -- AWS EKS cluster -- Disabled EKS cluster -- Self managed node group -- Externally attached self managed node group -- Disabled self managed node group -- EKS managed node group -- Externally attached EKS managed node group -- Disabled self managed node group -- Fargate profile -- Externally attached Fargate profile -- Disabled Fargate profile -- Cluster addons: CoreDNS, Kube-Proxy, and VPC-CNI -- IAM roles for service accounts - -## Usage - -To run this example you need to execute: - -```bash -$ terraform init -$ terraform plan -$ terraform apply -``` - -Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | >= 4.57 | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a | -| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | -| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a | -| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a | -| [eks](#module\_eks) | ../.. | n/a | -| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | -| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a | -| [kms](#module\_kms) | terraform-aws-modules/kms/aws | ~> 1.5 | -| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | - -## Resources - -| Name | Type | -|------|------| -| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | - -## Inputs - -No inputs. - -## Outputs - -| Name | Description | -|------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | -| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | -| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | -| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | -| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | -| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | -| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | -| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | -| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | -| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | -| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | -| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | -| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | -| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | -| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | -| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | -| [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | -| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | -| [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | -| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | -| [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key | -| [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key | -| [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key | -| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | -| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | -| [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups | - diff --git a/examples/complete/main.tf b/examples/complete/main.tf deleted file mode 100644 index 184dba2228..0000000000 --- a/examples/complete/main.tf +++ /dev/null @@ -1,482 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - -data "aws_availability_zones" "available" {} -data "aws_caller_identity" "current" {} - -locals { - name = "ex-${replace(basename(path.cwd), "_", "-")}" - region = "eu-west-1" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Example = local.name - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } -} - -################################################################################ -# EKS Module -################################################################################ - -module "eks" { - source = "../.." - - cluster_name = local.name - cluster_endpoint_public_access = true - - cluster_addons = { - coredns = { - preserve = true - most_recent = true - - timeouts = { - create = "25m" - delete = "10m" - } - } - kube-proxy = { - most_recent = true - } - vpc-cni = { - most_recent = true - } - } - - # External encryption key - create_kms_key = false - cluster_encryption_config = { - resources = ["secrets"] - provider_key_arn = module.kms.key_arn - } - - iam_role_additional_policies = { - additional = aws_iam_policy.additional.arn - } - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets - control_plane_subnet_ids = module.vpc.intra_subnets - - # Extend cluster security group rules - cluster_security_group_additional_rules = { - ingress_nodes_ephemeral_ports_tcp = { - description = "Nodes on ephemeral ports" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "ingress" - source_node_security_group = true - } - # Test: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2319 - ingress_source_security_group_id = { - description = "Ingress from another computed security group" - protocol = "tcp" - from_port = 22 - to_port = 22 - type = "ingress" - source_security_group_id = aws_security_group.additional.id - } - } - - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - # Test: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2319 - ingress_source_security_group_id = { - description = "Ingress from another computed security group" - protocol = "tcp" - from_port = 22 - to_port = 22 - type = "ingress" - source_security_group_id = aws_security_group.additional.id - } - } - - # Self Managed Node Group(s) - self_managed_node_group_defaults = { - vpc_security_group_ids = [aws_security_group.additional.id] - iam_role_additional_policies = { - additional = aws_iam_policy.additional.arn - } - - instance_refresh = { - strategy = "Rolling" - preferences = { - min_healthy_percentage = 66 - } - } - } - - self_managed_node_groups = { - spot = { - instance_type = "m5.large" - instance_market_options = { - market_type = "spot" - } - - pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar - EOT - - bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" - - post_bootstrap_user_data = <<-EOT - cd /tmp - sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm - sudo systemctl enable amazon-ssm-agent - sudo systemctl start amazon-ssm-agent - EOT - } - } - - # EKS Managed Node Group(s) - eks_managed_node_group_defaults = { - ami_type = "AL2_x86_64" - instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] - - attach_cluster_primary_security_group = true - vpc_security_group_ids = [aws_security_group.additional.id] - iam_role_additional_policies = { - additional = aws_iam_policy.additional.arn - } - } - - eks_managed_node_groups = { - blue = {} - green = { - min_size = 1 - max_size = 10 - desired_size = 1 - - instance_types = ["t3.large"] - capacity_type = "SPOT" - labels = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } - - taints = { - dedicated = { - key = "dedicated" - value = "gpuGroup" - effect = "NO_SCHEDULE" - } - } - - block_device_mappings = { - xvda = { - device_name = "/dev/xvda" - ebs = { - volume_size = 100 - volume_type = "gp3" - iops = 3000 - throughput = 150 - delete_on_termination = true - } - } - } - - update_config = { - max_unavailable_percentage = 33 # or set `max_unavailable` - } - - tags = { - ExtraTag = "example" - } - } - } - - # Fargate Profile(s) - fargate_profiles = { - default = { - name = "default" - selectors = [ - { - namespace = "kube-system" - labels = { - k8s-app = "kube-dns" - } - }, - { - namespace = "default" - } - ] - - tags = { - Owner = "test" - } - - timeouts = { - create = "20m" - delete = "20m" - } - } - } - - # Create a new cluster where both an identity provider and Fargate profile is created - # will result in conflicts since only one can take place at a time - # # OIDC Identity provider - # cluster_identity_providers = { - # sts = { - # client_id = "sts.amazonaws.com" - # } - # } - - # aws-auth configmap - manage_aws_auth_configmap = true - - aws_auth_node_iam_role_arns_non_windows = [ - module.eks_managed_node_group.iam_role_arn, - module.self_managed_node_group.iam_role_arn, - ] - aws_auth_fargate_profile_pod_execution_role_arns = [ - module.fargate_profile.fargate_profile_pod_execution_role_arn - ] - - aws_auth_roles = [ - { - rolearn = module.eks_managed_node_group.iam_role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - { - rolearn = module.self_managed_node_group.iam_role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - { - rolearn = module.fargate_profile.fargate_profile_pod_execution_role_arn - username = "system:node:{{SessionName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - "system:node-proxier", - ] - } - ] - - aws_auth_users = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] - - aws_auth_accounts = [ - "777777777777", - "888888888888", - ] - - tags = local.tags -} - -################################################################################ -# Sub-Module Usage on Existing/Separate Cluster -################################################################################ - -module "eks_managed_node_group" { - source = "../../modules/eks-managed-node-group" - - name = "separate-eks-mng" - cluster_name = module.eks.cluster_name - cluster_version = module.eks.cluster_version - - subnet_ids = module.vpc.private_subnets - cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id - vpc_security_group_ids = [ - module.eks.cluster_security_group_id, - ] - - ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" - - # this will get added to what AWS provides - bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" - - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" - EOT - - tags = merge(local.tags, { Separate = "eks-managed-node-group" }) -} - -module "self_managed_node_group" { - source = "../../modules/self-managed-node-group" - - name = "separate-self-mng" - cluster_name = module.eks.cluster_name - cluster_version = module.eks.cluster_version - cluster_endpoint = module.eks.cluster_endpoint - cluster_auth_base64 = module.eks.cluster_certificate_authority_data - - instance_type = "m5.large" - - subnet_ids = module.vpc.private_subnets - vpc_security_group_ids = [ - module.eks.cluster_primary_security_group_id, - module.eks.cluster_security_group_id, - ] - - tags = merge(local.tags, { Separate = "self-managed-node-group" }) -} - -module "fargate_profile" { - source = "../../modules/fargate-profile" - - name = "separate-fargate-profile" - cluster_name = module.eks.cluster_name - - subnet_ids = module.vpc.private_subnets - selectors = [{ - namespace = "kube-system" - }] - - tags = merge(local.tags, { Separate = "fargate-profile" }) -} - -################################################################################ -# Disabled creation -################################################################################ - -module "disabled_eks" { - source = "../.." - - create = false -} - -module "disabled_fargate_profile" { - source = "../../modules/fargate-profile" - - create = false -} - -module "disabled_eks_managed_node_group" { - source = "../../modules/eks-managed-node-group" - - create = false -} - -module "disabled_self_managed_node_group" { - source = "../../modules/self-managed-node-group" - - create = false -} - -################################################################################ -# Supporting resources -################################################################################ - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] - intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] - - enable_nat_gateway = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} - -resource "aws_security_group" "additional" { - name_prefix = "${local.name}-additional" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } - - tags = merge(local.tags, { Name = "${local.name}-additional" }) -} - -resource "aws_iam_policy" "additional" { - name = "${local.name}-additional" - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = [ - "ec2:Describe*", - ] - Effect = "Allow" - Resource = "*" - }, - ] - }) -} - -module "kms" { - source = "terraform-aws-modules/kms/aws" - version = "~> 1.5" - - aliases = ["eks/${local.name}"] - description = "${local.name} cluster encryption key" - enable_default_policy = true - key_owners = [data.aws_caller_identity.current.arn] - - tags = local.tags -} diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf deleted file mode 100644 index c1020f3333..0000000000 --- a/examples/complete/outputs.tf +++ /dev/null @@ -1,192 +0,0 @@ -################################################################################ -# Cluster -################################################################################ - -output "cluster_arn" { - description = "The Amazon Resource Name (ARN) of the cluster" - value = module.eks.cluster_arn -} - -output "cluster_certificate_authority_data" { - description = "Base64 encoded certificate data required to communicate with the cluster" - value = module.eks.cluster_certificate_authority_data -} - -output "cluster_endpoint" { - description = "Endpoint for your Kubernetes API server" - value = module.eks.cluster_endpoint -} - -output "cluster_id" { - description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" - value = module.eks.cluster_id -} - -output "cluster_name" { - description = "The name of the EKS cluster" - value = module.eks.cluster_name -} - -output "cluster_oidc_issuer_url" { - description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = module.eks.cluster_oidc_issuer_url -} - -output "cluster_platform_version" { - description = "Platform version for the cluster" - value = module.eks.cluster_platform_version -} - -output "cluster_status" { - description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = module.eks.cluster_status -} - -output "cluster_security_group_id" { - description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = module.eks.cluster_security_group_id -} - -################################################################################ -# KMS Key -################################################################################ - -output "kms_key_arn" { - description = "The Amazon Resource Name (ARN) of the key" - value = module.eks.kms_key_arn -} - -output "kms_key_id" { - description = "The globally unique identifier for the key" - value = module.eks.kms_key_id -} - -output "kms_key_policy" { - description = "The IAM resource policy set on the key" - value = module.eks.kms_key_policy -} - -################################################################################ -# Security Group -################################################################################ - -output "cluster_security_group_arn" { - description = "Amazon Resource Name (ARN) of the cluster security group" - value = module.eks.cluster_security_group_arn -} - -################################################################################ -# IRSA -################################################################################ - -output "oidc_provider" { - description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = module.eks.oidc_provider -} - -output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = module.eks.oidc_provider_arn -} - -output "cluster_tls_certificate_sha1_fingerprint" { - description = "The SHA1 fingerprint of the public key of the cluster's certificate" - value = module.eks.cluster_tls_certificate_sha1_fingerprint -} - -################################################################################ -# IAM Role -################################################################################ - -output "cluster_iam_role_name" { - description = "IAM role name of the EKS cluster" - value = module.eks.cluster_iam_role_name -} - -output "cluster_iam_role_arn" { - description = "IAM role ARN of the EKS cluster" - value = module.eks.cluster_iam_role_arn -} - -output "cluster_iam_role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = module.eks.cluster_iam_role_unique_id -} - -################################################################################ -# EKS Addons -################################################################################ - -output "cluster_addons" { - description = "Map of attribute maps for all EKS cluster addons enabled" - value = module.eks.cluster_addons -} - -################################################################################ -# EKS Identity Provider -################################################################################ - -output "cluster_identity_providers" { - description = "Map of attribute maps for all EKS identity providers enabled" - value = module.eks.cluster_identity_providers -} - -################################################################################ -# CloudWatch Log Group -################################################################################ - -output "cloudwatch_log_group_name" { - description = "Name of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_name -} - -output "cloudwatch_log_group_arn" { - description = "Arn of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_arn -} - -################################################################################ -# Fargate Profile -################################################################################ - -output "fargate_profiles" { - description = "Map of attribute maps for all EKS Fargate Profiles created" - value = module.eks.fargate_profiles -} - -################################################################################ -# EKS Managed Node Group -################################################################################ - -output "eks_managed_node_groups" { - description = "Map of attribute maps for all EKS managed node groups created" - value = module.eks.eks_managed_node_groups -} - -output "eks_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by EKS managed node groups" - value = module.eks.eks_managed_node_groups_autoscaling_group_names -} - -################################################################################ -# Self Managed Node Group -################################################################################ - -output "self_managed_node_groups" { - description = "Map of attribute maps for all self managed node groups created" - value = module.eks.self_managed_node_groups -} - -output "self_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by self-managed node groups" - value = module.eks.self_managed_node_groups_autoscaling_group_names -} - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf deleted file mode 100644 index d2ec4a8f9d..0000000000 --- a/examples/complete/versions.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 4.57" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - } -} diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index c5278e07a0..103d133553 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -29,31 +29,31 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules | Name | Source | Version | |------|--------|---------| -| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.5 | +| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a | +| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 2.1 | | [eks](#module\_eks) | ../.. | n/a | +| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | | [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | -| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources | Name | Type | |------|------| -| [aws_autoscaling_group_tag.cluster_autoscaler_label_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group_tag) | resource | | [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | @@ -70,7 +70,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index b8097221bc..0d17827fd8 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -2,18 +2,6 @@ provider "aws" { region = local.region } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - data "aws_caller_identity" "current" {} data "aws_availability_zones" "available" {} @@ -44,14 +32,7 @@ module "eks" { cluster_endpoint_public_access = true # IPV6 - cluster_ip_family = "ipv6" - - # We are using the IRSA created below for permissions - # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) - # and then turn this off after the cluster/node group is created. Without this initial policy, - # the VPC CNI fails to assign IPs and nodes cannot join the cluster - # See https://github.com/aws/containers-roadmap/issues/1666 for more context - # TODO - remove this policy once AWS releases a managed version similar to AmazonEKS_CNI_Policy (IPv4) + cluster_ip_family = "ipv6" create_cni_ipv6_iam_policy = true cluster_addons = { @@ -62,9 +43,8 @@ module "eks" { most_recent = true } vpc-cni = { - most_recent = true - before_compute = true - service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + most_recent = true + before_compute = true configuration_values = jsonencode({ env = { # Reference docs https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html @@ -79,18 +59,9 @@ module "eks" { subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets - manage_aws_auth_configmap = true - eks_managed_node_group_defaults = { ami_type = "AL2_x86_64" instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] - - # We are using the IRSA created below for permissions - # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) - # and then turn this off after the cluster/node group is created. Without this initial policy, - # the VPC CNI fails to assign IPs and nodes cannot join the cluster - # See https://github.com/aws/containers-roadmap/issues/1666 for more context - iam_role_attach_cni_policy = true } eks_managed_node_groups = { @@ -264,27 +235,6 @@ module "eks" { additional = aws_iam_policy.node_additional.arn } - schedules = { - scale-up = { - min_size = 2 - max_size = "-1" # Retains current max size - desired_size = 2 - start_time = "2023-03-05T00:00:00Z" - end_time = "2024-03-05T00:00:00Z" - time_zone = "Etc/GMT+0" - recurrence = "0 0 * * *" - }, - scale-down = { - min_size = 0 - max_size = "-1" # Retains current max size - desired_size = 0 - start_time = "2023-03-05T12:00:00Z" - end_time = "2024-03-05T12:00:00Z" - time_zone = "Etc/GMT+0" - recurrence = "0 12 * * *" - } - } - tags = { ExtraTag = "EKS managed node group complete example" } @@ -294,13 +244,59 @@ module "eks" { tags = local.tags } +module "disabled_eks" { + source = "../.." + + create = false +} + +################################################################################ +# Sub-Module Usage on Existing/Separate Cluster +################################################################################ + +module "eks_managed_node_group" { + source = "../../modules/eks-managed-node-group" + + name = "separate-eks-mng" + cluster_name = module.eks.cluster_name + cluster_version = module.eks.cluster_version + + subnet_ids = module.vpc.private_subnets + cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id + vpc_security_group_ids = [ + module.eks.cluster_security_group_id, + ] + + ami_type = "BOTTLEROCKET_x86_64" + platform = "bottlerocket" + + # this will get added to what AWS provides + bootstrap_extra_args = <<-EOT + # extra args added + [settings.kernel] + lockdown = "integrity" + + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" + EOT + + tags = merge(local.tags, { Separate = "eks-managed-node-group" }) +} + +module "disabled_eks_managed_node_group" { + source = "../../modules/eks-managed-node-group" + + create = false +} + ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr @@ -333,27 +329,9 @@ module "vpc" { tags = local.tags } -module "vpc_cni_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - version = "~> 5.0" - - role_name_prefix = "VPC-CNI-IRSA" - attach_vpc_cni_policy = true - vpc_cni_enable_ipv6 = true - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } - } - - tags = local.tags -} - module "ebs_kms_key" { source = "terraform-aws-modules/kms/aws" - version = "~> 1.5" + version = "~> 2.1" description = "Customer managed key to encrypt EKS managed node group volumes" @@ -458,52 +436,3 @@ data "aws_ami" "eks_default_bottlerocket" { values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"] } } - -################################################################################ -# Tags for the ASG to support cluster-autoscaler scale up from 0 -################################################################################ - -locals { - - # We need to lookup K8s taint effect from the AWS API value - taint_effects = { - NO_SCHEDULE = "NoSchedule" - NO_EXECUTE = "NoExecute" - PREFER_NO_SCHEDULE = "PreferNoSchedule" - } - - cluster_autoscaler_label_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for label_name, label_value in coalesce(group.node_group_labels, {}) : "${name}|label|${label_name}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/label/${label_name}", - value = label_value, - } - } - ]...) - - cluster_autoscaler_taint_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for taint in coalesce(group.node_group_taints, []) : "${name}|taint|${taint.key}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/taint/${taint.key}" - value = "${taint.value}:${local.taint_effects[taint.effect]}" - } - } - ]...) - - cluster_autoscaler_asg_tags = merge(local.cluster_autoscaler_label_tags, local.cluster_autoscaler_taint_tags) -} - -resource "aws_autoscaling_group_tag" "cluster_autoscaler_label_tags" { - for_each = local.cluster_autoscaler_asg_tags - - autoscaling_group_name = each.value.autoscaling_group - - tag { - key = each.value.key - value = each.value.value - - propagate_at_launch = false - } -} diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf index 43334ecc0a..0a8873f479 100644 --- a/examples/eks_managed_node_group/outputs.tf +++ b/examples/eks_managed_node_group/outputs.tf @@ -47,6 +47,15 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +209,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index d2ec4a8f9d..63e9319d92 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 5.34" } } } diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 9d41ed40b0..9ea3bf1871 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -19,23 +19,23 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | -| [helm](#requirement\_helm) | >= 2.7 | -| [null](#requirement\_null) | >= 3.0 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules | Name | Source | Version | |------|--------|---------| +| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a | | [eks](#module\_eks) | ../.. | n/a | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | +| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources @@ -52,7 +52,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index b67335c94e..0b3c6b46c2 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -6,7 +6,7 @@ data "aws_availability_zones" "available" {} locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" + cluster_version = "1.29" region = "eu-west-1" vpc_cidr = "10.0.0.0/16" @@ -54,59 +54,72 @@ module "eks" { } } - fargate_profiles = merge( - { - example = { - name = "example" - selectors = [ - { - namespace = "backend" - labels = { - Application = "backend" - } - }, - { - namespace = "app-*" - labels = { - Application = "app-wildcard" - } + fargate_profiles = { + example = { + name = "example" + selectors = [ + { + namespace = "backend" + labels = { + Application = "backend" + } + }, + { + namespace = "app-*" + labels = { + Application = "app-wildcard" } - ] - - # Using specific subnets instead of the subnets supplied for the cluster itself - subnet_ids = [module.vpc.private_subnets[1]] - - tags = { - Owner = "secondary" } + ] - timeouts = { - create = "20m" - delete = "20m" - } - } - }, - { for i in range(3) : - "kube-system-${element(split("-", local.azs[i]), 2)}" => { - selectors = [ - { namespace = "kube-system" } - ] - # We want to create a profile per AZ for high availability - subnet_ids = [element(module.vpc.private_subnets, i)] + # Using specific subnets instead of the subnets supplied for the cluster itself + subnet_ids = [module.vpc.private_subnets[1]] + + tags = { + Owner = "secondary" } } - ) + kube-system = { + selectors = [ + { namespace = "kube-system" } + ] + } + } tags = local.tags } +################################################################################ +# Sub-Module Usage on Existing/Separate Cluster +################################################################################ + +module "fargate_profile" { + source = "../../modules/fargate-profile" + + name = "separate-fargate-profile" + cluster_name = module.eks.cluster_name + + subnet_ids = module.vpc.private_subnets + selectors = [{ + namespace = "kube-system" + }] + + tags = merge(local.tags, { Separate = "fargate-profile" }) +} + +module "disabled_fargate_profile" { + source = "../../modules/fargate-profile" + + create = false +} + ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr diff --git a/examples/fargate_profile/outputs.tf b/examples/fargate_profile/outputs.tf index 43334ecc0a..0a8873f479 100644 --- a/examples/fargate_profile/outputs.tf +++ b/examples/fargate_profile/outputs.tf @@ -47,6 +47,15 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +209,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 10d7878df7..63e9319d92 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -1,18 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.7" - } - null = { - source = "hashicorp/null" - version = ">= 3.0" + version = ">= 5.34" } } } diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index 06b29e7bb4..b2c42931dd 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -41,6 +41,9 @@ kubectl delete node -l karpenter.sh/provisioner-name=default 2. Remove the resources created by Terraform ```bash +# Necessary to avoid removing Terraform's permissions too soon before its finished +# cleaning up the resources it deployed inside the clsuter +terraform state rm 'module.eks.aws_eks_access_entry.this["cluster_creator_admin"]' || true terraform destroy ``` @@ -51,21 +54,19 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | | [helm](#requirement\_helm) | >= 2.7 | -| [kubectl](#requirement\_kubectl) | >= 1.14 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [null](#requirement\_null) | >= 3.0 | +| [kubectl](#requirement\_kubectl) | >= 2.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | -| [aws.virginia](#provider\_aws.virginia) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | +| [aws.virginia](#provider\_aws.virginia) | >= 5.34 | | [helm](#provider\_helm) | >= 2.7 | -| [kubectl](#provider\_kubectl) | >= 1.14 | +| [kubectl](#provider\_kubectl) | >= 2.0 | ## Modules @@ -73,6 +74,7 @@ Note that this example may create resources which cost money. Run `terraform des |------|--------|---------| | [eks](#module\_eks) | ../.. | n/a | | [karpenter](#module\_karpenter) | ../../modules/karpenter | n/a | +| [karpenter\_disabled](#module\_karpenter\_disabled) | ../../modules/karpenter | n/a | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources @@ -80,9 +82,9 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [helm_release.karpenter](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | -| [kubectl_manifest.karpenter_example_deployment](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [kubectl_manifest.karpenter_node_class](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [kubectl_manifest.karpenter_node_pool](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | +| [kubectl_manifest.karpenter_example_deployment](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource | +| [kubectl_manifest.karpenter_node_class](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource | +| [kubectl_manifest.karpenter_node_pool](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_ecrpublic_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecrpublic_authorization_token) | data source | @@ -94,7 +96,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -118,19 +120,19 @@ No inputs. | [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | | [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | | [karpenter\_event\_rules](#output\_karpenter\_event\_rules) | Map of the event rules created and their attributes | +| [karpenter\_iam\_role\_arn](#output\_karpenter\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the controller IAM role | +| [karpenter\_iam\_role\_name](#output\_karpenter\_iam\_role\_name) | The name of the controller IAM role | +| [karpenter\_iam\_role\_unique\_id](#output\_karpenter\_iam\_role\_unique\_id) | Stable and unique string identifying the controller IAM role | | [karpenter\_instance\_profile\_arn](#output\_karpenter\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile | | [karpenter\_instance\_profile\_id](#output\_karpenter\_instance\_profile\_id) | Instance profile's ID | | [karpenter\_instance\_profile\_name](#output\_karpenter\_instance\_profile\_name) | Name of the instance profile | | [karpenter\_instance\_profile\_unique](#output\_karpenter\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile | -| [karpenter\_irsa\_arn](#output\_karpenter\_irsa\_arn) | The Amazon Resource Name (ARN) specifying the IAM role for service accounts | -| [karpenter\_irsa\_name](#output\_karpenter\_irsa\_name) | The name of the IAM role for service accounts | -| [karpenter\_irsa\_unique\_id](#output\_karpenter\_irsa\_unique\_id) | Stable and unique string identifying the IAM role for service accounts | +| [karpenter\_node\_iam\_role\_arn](#output\_karpenter\_node\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | +| [karpenter\_node\_iam\_role\_name](#output\_karpenter\_node\_iam\_role\_name) | The name of the IAM role | +| [karpenter\_node\_iam\_role\_unique\_id](#output\_karpenter\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [karpenter\_queue\_arn](#output\_karpenter\_queue\_arn) | The ARN of the SQS queue | | [karpenter\_queue\_name](#output\_karpenter\_queue\_name) | The name of the created Amazon SQS queue | | [karpenter\_queue\_url](#output\_karpenter\_queue\_url) | The URL for the created Amazon SQS queue | -| [karpenter\_role\_arn](#output\_karpenter\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | -| [karpenter\_role\_name](#output\_karpenter\_role\_name) | The name of the IAM role | -| [karpenter\_role\_unique\_id](#output\_karpenter\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | | [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | | [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index b8d7be97ff..cd36785893 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -7,18 +7,6 @@ provider "aws" { alias = "virginia" } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - provider "helm" { kubernetes { host = module.eks.cluster_endpoint @@ -54,7 +42,7 @@ data "aws_ecrpublic_authorization_token" "token" { locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" + cluster_version = "1.28" region = "eu-west-1" vpc_cidr = "10.0.0.0/16" @@ -78,9 +66,11 @@ module "eks" { cluster_version = local.cluster_version cluster_endpoint_public_access = true + # Gives Terraform identity admin access to cluster which will + # allow deploying resources (Karpenter) into the cluster + enable_cluster_creator_admin_permissions = true + cluster_addons = { - kube-proxy = {} - vpc-cni = {} coredns = { configuration_values = jsonencode({ computeType = "Fargate" @@ -106,6 +96,8 @@ module "eks" { } }) } + kube-proxy = {} + vpc-cni = {} } vpc_id = module.vpc.vpc_id @@ -116,19 +108,6 @@ module "eks" { create_cluster_security_group = false create_node_security_group = false - manage_aws_auth_configmap = true - aws_auth_roles = [ - # We need to add in the Karpenter node IAM role for nodes launched by Karpenter - { - rolearn = module.karpenter.role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - ] - fargate_profiles = { karpenter = { selectors = [ @@ -157,41 +136,51 @@ module "eks" { module "karpenter" { source = "../../modules/karpenter" - cluster_name = module.eks.cluster_name - irsa_oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name - # In v0.32.0/v1beta1, Karpenter now creates the IAM instance profile - # so we disable the Terraform creation and add the necessary permissions for Karpenter IRSA - enable_karpenter_instance_profile_creation = true + # EKS Fargate currently does not support Pod Identity + enable_irsa = true + irsa_oidc_provider_arn = module.eks.oidc_provider_arn # Used to attach additional IAM policies to the Karpenter node IAM role - iam_role_additional_policies = { + node_iam_role_additional_policies = { AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } tags = local.tags } -resource "helm_release" "karpenter" { - namespace = "karpenter" - create_namespace = true +module "karpenter_disabled" { + source = "../../modules/karpenter" + + create = false +} +################################################################################ +# Karpenter Helm chart & manifests +# Not required; just to demonstrate functionality of the sub-module +################################################################################ + +resource "helm_release" "karpenter" { + namespace = "karpenter" + create_namespace = true name = "karpenter" repository = "oci://public.ecr.aws/karpenter" repository_username = data.aws_ecrpublic_authorization_token.token.user_name repository_password = data.aws_ecrpublic_authorization_token.token.password chart = "karpenter" - version = "v0.32.1" + version = "v0.33.1" + wait = false values = [ <<-EOT settings: clusterName: ${module.eks.cluster_name} clusterEndpoint: ${module.eks.cluster_endpoint} - interruptionQueueName: ${module.karpenter.queue_name} + interruptionQueue: ${module.karpenter.queue_name} serviceAccount: annotations: - eks.amazonaws.com/role-arn: ${module.karpenter.irsa_arn} + eks.amazonaws.com/role-arn: ${module.karpenter.iam_role_arn} EOT ] } @@ -204,7 +193,7 @@ resource "kubectl_manifest" "karpenter_node_class" { name: default spec: amiFamily: AL2 - role: ${module.karpenter.role_name} + role: ${module.karpenter.node_iam_role_name} subnetSelectorTerms: - tags: karpenter.sh/discovery: ${module.eks.cluster_name} diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf index f0ad50bd6a..41fe733f87 100644 --- a/examples/karpenter/outputs.tf +++ b/examples/karpenter/outputs.tf @@ -47,6 +47,15 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # Security Group ################################################################################ @@ -183,31 +192,22 @@ output "self_managed_node_groups_autoscaling_group_names" { } ################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} - -################################################################################ -# IAM Role for Service Account (IRSA) +# Karpenter controller IAM Role ################################################################################ -output "karpenter_irsa_name" { - description = "The name of the IAM role for service accounts" - value = module.karpenter.irsa_name +output "karpenter_iam_role_name" { + description = "The name of the controller IAM role" + value = module.karpenter.iam_role_name } -output "karpenter_irsa_arn" { - description = "The Amazon Resource Name (ARN) specifying the IAM role for service accounts" - value = module.karpenter.irsa_arn +output "karpenter_iam_role_arn" { + description = "The Amazon Resource Name (ARN) specifying the controller IAM role" + value = module.karpenter.iam_role_arn } -output "karpenter_irsa_unique_id" { - description = "Stable and unique string identifying the IAM role for service accounts" - value = module.karpenter.irsa_unique_id +output "karpenter_iam_role_unique_id" { + description = "Stable and unique string identifying the controller IAM role" + value = module.karpenter.iam_role_unique_id } ################################################################################ @@ -242,19 +242,19 @@ output "karpenter_event_rules" { # Node IAM Role ################################################################################ -output "karpenter_role_name" { +output "karpenter_node_iam_role_name" { description = "The name of the IAM role" - value = module.karpenter.role_name + value = module.karpenter.node_iam_role_name } -output "karpenter_role_arn" { +output "karpenter_node_iam_role_arn" { description = "The Amazon Resource Name (ARN) specifying the IAM role" - value = module.karpenter.role_arn + value = module.karpenter.node_iam_role_arn } -output "karpenter_role_unique_id" { +output "karpenter_node_iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = module.karpenter.role_unique_id + value = module.karpenter.node_iam_role_unique_id } ################################################################################ diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf index f1f0ee477e..96d7b4ea19 100644 --- a/examples/karpenter/versions.tf +++ b/examples/karpenter/versions.tf @@ -1,26 +1,18 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 5.34" } helm = { source = "hashicorp/helm" version = ">= 2.7" } kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.14" - } - null = { - source = "hashicorp/null" - version = ">= 3.0" + source = "alekc/kubectl" + version = ">= 2.0" } } } diff --git a/examples/outposts/README.md b/examples/outposts/README.md index ff2542c4e9..30fffaf330 100644 --- a/examples/outposts/README.md +++ b/examples/outposts/README.md @@ -36,21 +36,28 @@ $ terraform apply Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. +```bash +# Necessary to avoid removing Terraform's permissions too soon before its finished +# cleaning up the resources it deployed inside the clsuter +terraform state rm 'module.eks.aws_eks_access_entry.this["cluster_creator_admin"]' || true +terraform destroy +``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | +| [kubernetes](#requirement\_kubernetes) | >= 2.20 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | -| [kubernetes](#provider\_kubernetes) | >= 2.10 | +| [aws](#provider\_aws) | >= 5.34 | +| [kubernetes](#provider\_kubernetes) | >= 2.20 | ## Modules @@ -80,7 +87,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf index fc50b53c89..4b13f52465 100644 --- a/examples/outposts/main.tf +++ b/examples/outposts/main.tf @@ -2,21 +2,9 @@ provider "aws" { region = var.region } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # Note: `cluster_id` is used with Outposts for auth - args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] - } -} - locals { name = "ex-${basename(path.cwd)}" - cluster_version = "1.27" # Required by EKS on Outposts + cluster_version = "1.29" outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) @@ -41,6 +29,10 @@ module "eks" { cluster_endpoint_public_access = false # Not available on Outpost cluster_endpoint_private_access = true + # Gives Terraform identity admin access to cluster which will + # allow deploying resources (EBS storage class) into the cluster + enable_cluster_creator_admin_permissions = true + vpc_id = data.aws_vpc.this.id subnet_ids = data.aws_subnets.this.ids @@ -49,9 +41,6 @@ module "eks" { outpost_arns = [local.outpost_arn] } - # Local clusters will automatically add the node group IAM role to the aws-auth configmap - manage_aws_auth_configmap = true - # Extend cluster security group rules cluster_security_group_additional_rules = { ingress_vpc_https = { diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf index 43334ecc0a..0a8873f479 100644 --- a/examples/outposts/outputs.tf +++ b/examples/outposts/outputs.tf @@ -47,6 +47,15 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +209,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf index 014418121d..66ab2a4e29 100644 --- a/examples/outposts/prerequisites/main.tf +++ b/examples/outposts/prerequisites/main.tf @@ -23,7 +23,7 @@ locals { module "ssm_bastion_ec2" { source = "terraform-aws-modules/ec2-instance/aws" - version = "~> 4.2" + version = "~> 5.5" name = "${local.name}-bastion" @@ -56,7 +56,7 @@ module "ssm_bastion_ec2" { rm terraform_${local.terraform_version}_linux_amd64.zip 2> /dev/null # Install kubectl - curl -LO https://dl.k8s.io/release/v1.27.0/bin/linux/amd64/kubectl + curl -LO https://dl.k8s.io/release/v1.29.0/bin/linux/amd64/kubectl install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl # Remove default awscli which is v1 - we want latest v2 @@ -80,7 +80,7 @@ module "ssm_bastion_ec2" { module "bastion_security_group" { source = "terraform-aws-modules/security-group/aws" - version = "~> 4.0" + version = "~> 5.0" name = "${local.name}-bastion" description = "Security group to allow provisioning ${local.name} EKS local cluster on Outposts" diff --git a/examples/outposts/prerequisites/versions.tf b/examples/outposts/prerequisites/versions.tf index 01d187af62..63e9319d92 100644 --- a/examples/outposts/prerequisites/versions.tf +++ b/examples/outposts/prerequisites/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } } } diff --git a/examples/outposts/versions.tf b/examples/outposts/versions.tf index d2ec4a8f9d..dff26f6939 100644 --- a/examples/outposts/versions.tf +++ b/examples/outposts/versions.tf @@ -1,14 +1,14 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 2.20" } } } diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index 5d849be0b9..c19999d9b9 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -25,24 +25,25 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules | Name | Source | Version | |------|--------|---------| -| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.5 | +| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a | +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 2.0 | | [eks](#module\_eks) | ../.. | n/a | | [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | +| [kms](#module\_kms) | terraform-aws-modules/kms/aws | ~> 2.1 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources @@ -62,7 +63,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index d359226f3c..dc125e1fbb 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -2,24 +2,12 @@ provider "aws" { region = local.region } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - data "aws_caller_identity" "current" {} data "aws_availability_zones" "available" {} locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" + cluster_version = "1.29" region = "eu-west-1" vpc_cidr = "10.0.0.0/16" @@ -59,9 +47,12 @@ module "eks" { subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets - # Self managed node groups will not automatically create the aws-auth configmap so we need to - create_aws_auth_configmap = true - manage_aws_auth_configmap = true + # External encryption key + create_kms_key = false + cluster_encryption_config = { + resources = ["secrets"] + provider_key_arn = module.kms.key_arn + } self_managed_node_group_defaults = { # enable discovery of autoscaling groups by cluster-autoscaler @@ -141,36 +132,6 @@ module "eks" { } } - efa = { - min_size = 1 - max_size = 2 - desired_size = 1 - - # aws ec2 describe-instance-types --region eu-west-1 --filters Name=network-info.efa-supported,Values=true --query "InstanceTypes[*].[InstanceType]" --output text | sort - instance_type = "c5n.9xlarge" - - post_bootstrap_user_data = <<-EOT - # Install EFA - curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz - tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer - ./efa_installer.sh -y --minimal - fi_info -p efa -t FI_EP_RDM - - # Disable ptrace - sysctl -w kernel.yama.ptrace_scope=0 - EOT - - network_interfaces = [ - { - description = "EFA interface example" - delete_on_termination = true - device_index = 0 - associate_public_ip_address = false - interface_type = "efa" - } - ] - } - # Complete complete = { name = "complete-self-mng" @@ -287,12 +248,6 @@ module "eks" { additional = aws_iam_policy.additional.arn } - timeouts = { - create = "80m" - update = "80m" - delete = "80m" - } - tags = { ExtraTag = "Self managed node group complete example" } @@ -302,13 +257,19 @@ module "eks" { tags = local.tags } +module "disabled_self_managed_node_group" { + source = "../../modules/self-managed-node-group" + + create = false +} + ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr @@ -364,7 +325,7 @@ module "key_pair" { module "ebs_kms_key" { source = "terraform-aws-modules/kms/aws" - version = "~> 1.5" + version = "~> 2.0" description = "Customer managed key to encrypt EKS managed node group volumes" @@ -386,6 +347,18 @@ module "ebs_kms_key" { tags = local.tags } +module "kms" { + source = "terraform-aws-modules/kms/aws" + version = "~> 2.1" + + aliases = ["eks/${local.name}"] + description = "${local.name} cluster encryption key" + enable_default_policy = true + key_owners = [data.aws_caller_identity.current.arn] + + tags = local.tags +} + resource "aws_iam_policy" "additional" { name = "${local.name}-additional" description = "Example usage of node additional policy" diff --git a/examples/self_managed_node_group/outputs.tf b/examples/self_managed_node_group/outputs.tf index 43334ecc0a..0a8873f479 100644 --- a/examples/self_managed_node_group/outputs.tf +++ b/examples/self_managed_node_group/outputs.tf @@ -47,6 +47,15 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +209,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index d2ec4a8f9d..63e9319d92 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 5.34" } } } diff --git a/examples/user_data/README.md b/examples/user_data/README.md index cea7dce755..b2215a02bb 100644 --- a/examples/user_data/README.md +++ b/examples/user_data/README.md @@ -17,7 +17,7 @@ $ terraform apply | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | +| [terraform](#requirement\_terraform) | >= 1.3 | ## Providers @@ -35,6 +35,10 @@ No providers. | [eks\_mng\_linux\_custom\_ami](#module\_eks\_mng\_linux\_custom\_ami) | ../../modules/_user_data | n/a | | [eks\_mng\_linux\_custom\_template](#module\_eks\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a | | [eks\_mng\_linux\_no\_op](#module\_eks\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_additional](#module\_eks\_mng\_windows\_additional) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_custom\_ami](#module\_eks\_mng\_windows\_custom\_ami) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_custom\_template](#module\_eks\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_no\_op](#module\_eks\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a | | [self\_mng\_bottlerocket\_bootstrap](#module\_self\_mng\_bottlerocket\_bootstrap) | ../../modules/_user_data | n/a | | [self\_mng\_bottlerocket\_custom\_template](#module\_self\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a | | [self\_mng\_bottlerocket\_no\_op](#module\_self\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a | @@ -65,6 +69,10 @@ No inputs. | [eks\_mng\_linux\_custom\_ami](#output\_eks\_mng\_linux\_custom\_ami) | Base64 decoded user data rendered for the provided inputs | | [eks\_mng\_linux\_custom\_template](#output\_eks\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs | | [eks\_mng\_linux\_no\_op](#output\_eks\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs | +| [eks\_mng\_windows\_additional](#output\_eks\_mng\_windows\_additional) | Base64 decoded user data rendered for the provided inputs | +| [eks\_mng\_windows\_custom\_ami](#output\_eks\_mng\_windows\_custom\_ami) | Base64 decoded user data rendered for the provided inputs | +| [eks\_mng\_windows\_custom\_template](#output\_eks\_mng\_windows\_custom\_template) | Base64 decoded user data rendered for the provided inputs | +| [eks\_mng\_windows\_no\_op](#output\_eks\_mng\_windows\_no\_op) | Base64 decoded user data rendered for the provided inputs | | [self\_mng\_bottlerocket\_bootstrap](#output\_self\_mng\_bottlerocket\_bootstrap) | Base64 decoded user data rendered for the provided inputs | | [self\_mng\_bottlerocket\_custom\_template](#output\_self\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs | | [self\_mng\_bottlerocket\_no\_op](#output\_self\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs | diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf index d7d513190e..ea08c8c6d0 100644 --- a/examples/user_data/main.tf +++ b/examples/user_data/main.tf @@ -121,6 +121,69 @@ module "eks_mng_bottlerocket_custom_template" { EOT } +# EKS managed node group - windows +module "eks_mng_windows_no_op" { + source = "../../modules/_user_data" + + platform = "windows" +} + +module "eks_mng_windows_additional" { + source = "../../modules/_user_data" + + platform = "windows" + + pre_bootstrap_user_data = <<-EOT + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT +} + +module "eks_mng_windows_custom_ami" { + source = "../../modules/_user_data" + + platform = "windows" + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + + enable_bootstrap_user_data = true + + pre_bootstrap_user_data = <<-EOT + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT + # I don't know if this is the right way on Windows, but its just a string check here anyways + bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" + + post_bootstrap_user_data = <<-EOT + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT +} + +module "eks_mng_windows_custom_template" { + source = "../../modules/_user_data" + + platform = "windows" + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + + enable_bootstrap_user_data = true + + user_data_template_path = "${path.module}/templates/windows_custom.tpl" + + pre_bootstrap_user_data = <<-EOT + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT + # I don't know if this is the right way on Windows, but its just a string check here anyways + bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" + + post_bootstrap_user_data = <<-EOT + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT +} + # Self managed node group - linux module "self_mng_linux_no_op" { source = "../../modules/_user_data" @@ -247,7 +310,7 @@ module "self_mng_windows_bootstrap" { pre_bootstrap_user_data = <<-EOT [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT - # I don't know if this is the right way on WindowsOS, but its just a string check here anyways + # I don't know if this is the right way on Windows, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT @@ -272,7 +335,7 @@ module "self_mng_windows_custom_template" { pre_bootstrap_user_data = <<-EOT [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT - # I don't know if this is the right way on WindowsOS, but its just a string check here anyways + # I don't know if this is the right way on Windows, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT diff --git a/examples/user_data/outputs.tf b/examples/user_data/outputs.tf index dd2c3407e1..768d81c37a 100644 --- a/examples/user_data/outputs.tf +++ b/examples/user_data/outputs.tf @@ -40,6 +40,27 @@ output "eks_mng_bottlerocket_custom_template" { value = base64decode(module.eks_mng_bottlerocket_custom_template.user_data) } +# EKS managed node group - windows +output "eks_mng_windows_no_op" { + description = "Base64 decoded user data rendered for the provided inputs" + value = base64decode(module.eks_mng_windows_no_op.user_data) +} + +output "eks_mng_windows_additional" { + description = "Base64 decoded user data rendered for the provided inputs" + value = base64decode(module.eks_mng_windows_additional.user_data) +} + +output "eks_mng_windows_custom_ami" { + description = "Base64 decoded user data rendered for the provided inputs" + value = base64decode(module.eks_mng_windows_custom_ami.user_data) +} + +output "eks_mng_windows_custom_template" { + description = "Base64 decoded user data rendered for the provided inputs" + value = base64decode(module.eks_mng_windows_custom_template.user_data) +} + # Self managed node group - linux output "self_mng_linux_no_op" { description = "Base64 decoded user data rendered for the provided inputs" diff --git a/examples/user_data/versions.tf b/examples/user_data/versions.tf index 7117131f4c..696426845c 100644 --- a/examples/user_data/versions.tf +++ b/examples/user_data/versions.tf @@ -1,3 +1,3 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" } diff --git a/main.tf b/main.tf index 6a25f3a63f..a934c0f38e 100644 --- a/main.tf +++ b/main.tf @@ -12,6 +12,8 @@ data "aws_iam_session_context" "current" { locals { create = var.create && var.putin_khuylo + partition = data.aws_partition.current.partition + cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn) create_outposts_local_cluster = length(var.outpost_config) > 0 @@ -30,6 +32,17 @@ resource "aws_eks_cluster" "this" { version = var.cluster_version enabled_cluster_log_types = var.cluster_enabled_log_types + access_config { + authentication_mode = var.authentication_mode + + # See access entries below - this is a one time operation from the EKS API. + # Instead, we are hardcoding this to false and if users wish to achieve this + # same functionality, we will do that through an access entry which can be + # enabled or disabled at any time of their choosing using the variable + # var.enable_cluster_creator_admin_permissions + bootstrap_cluster_creator_admin_permissions = false + } + vpc_config { security_group_ids = compact(distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))) subnet_ids = coalescelist(var.control_plane_subnet_ids, var.subnet_ids) @@ -71,14 +84,15 @@ resource "aws_eks_cluster" "this" { } tags = merge( + { terraform-aws-modules = "eks" }, var.tags, var.cluster_tags, ) timeouts { - create = lookup(var.cluster_timeouts, "create", null) - update = lookup(var.cluster_timeouts, "update", null) - delete = lookup(var.cluster_timeouts, "delete", null) + create = try(var.cluster_timeouts.create, null) + update = try(var.cluster_timeouts.update, null) + delete = try(var.cluster_timeouts.delete, null) } depends_on = [ @@ -109,6 +123,7 @@ resource "aws_cloudwatch_log_group" "this" { name = "/aws/eks/${var.cluster_name}/cluster" retention_in_days = var.cloudwatch_log_group_retention_in_days kms_key_id = var.cloudwatch_log_group_kms_key_id + log_group_class = var.cloudwatch_log_group_class tags = merge( var.tags, @@ -117,6 +132,92 @@ resource "aws_cloudwatch_log_group" "this" { ) } +################################################################################ +# Access Entry +################################################################################ + +locals { + # This replaces the one time logic from the EKS API with something that can be + # better controlled by users through Terraform + bootstrap_cluster_creator_admin_permissions = { + cluster_creator = { + principal_arn = data.aws_iam_session_context.current.issuer_arn + type = "STANDARD" + + policy_associations = { + admin = { + policy_arn = "arn:${local.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + access_scope = { + type = "cluster" + } + } + } + } + } + + # Merge the bootstrap behavior with the entries that users provide + merged_access_entries = merge( + { for k, v in local.bootstrap_cluster_creator_admin_permissions : k => v if var.enable_cluster_creator_admin_permissions }, + var.access_entries, + ) + + # Flatten out entries and policy associations so users can specify the policy + # associations within a single entry + flattened_access_entries = flatten([ + for entry_key, entry_val in local.merged_access_entries : [ + for pol_key, pol_val in lookup(entry_val, "policy_associations", {}) : + merge( + { + principal_arn = entry_val.principal_arn + kubernetes_groups = lookup(entry_val, "kubernetes_groups", []) + tags = lookup(entry_val, "tags", {}) + type = lookup(entry_val, "type", "STANDARD") + user_name = lookup(entry_val, "user_name", null) + }, + { for k, v in { + association_policy_arn = pol_val.policy_arn + association_access_scope_type = pol_val.access_scope.type + association_access_scope_namespaces = lookup(pol_val.access_scope, "namespaces", []) + } : k => v if !contains(["EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX"], lookup(entry_val, "type", "STANDARD")) }, + { + entry_key = entry_key + pol_key = pol_key + } + ) + ] + ]) +} + +resource "aws_eks_access_entry" "this" { + for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if local.create } + + cluster_name = aws_eks_cluster.this[0].name + kubernetes_groups = try(each.value.kubernetes_groups, []) + principal_arn = each.value.principal_arn + type = try(each.value.type, "STANDARD") + user_name = try(each.value.user_name, null) + + tags = merge(var.tags, try(each.value.tags, {})) +} + +resource "aws_eks_access_policy_association" "this" { + for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if local.create } + + access_scope { + namespaces = try(each.value.association_access_scope_namespaces, []) + type = each.value.association_access_scope_type + } + + cluster_name = aws_eks_cluster.this[0].name + + policy_arn = each.value.association_policy_arn + principal_arn = each.value.principal_arn + + depends_on = [ + aws_eks_access_entry.this, + ] +} + ################################################################################ # KMS Key ################################################################################ @@ -148,7 +249,10 @@ module "kms" { cluster = { name = "eks/${var.cluster_name}" } } - tags = var.tags + tags = merge( + { terraform-aws-modules = "eks" }, + var.tags, + ) } ################################################################################ @@ -239,7 +343,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" { # Not available on outposts count = local.create_oidc_provider ? 1 : 0 - client_id_list = distinct(compact(concat(["sts.${local.dns_suffix}"], var.openid_connect_audiences))) + client_id_list = distinct(compact(concat(["sts.amazonaws.com"], var.openid_connect_audiences))) thumbprint_list = concat(local.oidc_root_ca_thumbprint, var.custom_oidc_thumbprints) url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer @@ -256,13 +360,9 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" { locals { create_iam_role = local.create && var.create_iam_role iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") - iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" + iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption") - - # TODO - hopefully this can be removed once the AWS endpoint is named properly in China - # https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1904 - dns_suffix = coalesce(var.cluster_iam_role_dns_suffix, data.aws_partition.current.dns_suffix) } data "aws_iam_policy_document" "assume_role_policy" { @@ -274,7 +374,7 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["eks.${local.dns_suffix}"] + identifiers = ["eks.amazonaws.com"] } dynamic "principals" { @@ -283,7 +383,7 @@ data "aws_iam_policy_document" "assume_role_policy" { content { type = "Service" identifiers = [ - "ec2.${local.dns_suffix}", + "ec2.amazonaws.com", ] } } @@ -394,11 +494,12 @@ resource "aws_eks_addon" "this" { cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) - configuration_values = try(each.value.configuration_values, null) - preserve = try(each.value.preserve, null) - resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") - service_account_role_arn = try(each.value.service_account_role_arn, null) + addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, true) + resolve_conflicts_on_create = try(each.value.resolve_conflicts_on_create, "OVERWRITE") + resolve_conflicts_on_update = try(each.value.resolve_conflicts_on_update, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) timeouts { create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) @@ -422,11 +523,12 @@ resource "aws_eks_addon" "before_compute" { cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) - configuration_values = try(each.value.configuration_values, null) - preserve = try(each.value.preserve, null) - resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") - service_account_role_arn = try(each.value.service_account_role_arn, null) + addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, true) + resolve_conflicts_on_create = try(each.value.resolve_conflicts_on_create, "OVERWRITE") + resolve_conflicts_on_update = try(each.value.resolve_conflicts_on_update, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) timeouts { create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) @@ -468,111 +570,3 @@ resource "aws_eks_identity_provider_config" "this" { tags = var.tags } - -################################################################################ -# aws-auth configmap -################################################################################ - -locals { - node_iam_role_arns_non_windows = distinct( - compact( - concat( - [for group in module.eks_managed_node_group : group.iam_role_arn if group.platform != "windows"], - [for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"], - var.aws_auth_node_iam_role_arns_non_windows, - ) - ) - ) - - node_iam_role_arns_windows = distinct( - compact( - concat( - [for group in module.eks_managed_node_group : group.iam_role_arn if group.platform == "windows"], - [for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"], - var.aws_auth_node_iam_role_arns_windows, - ) - ) - ) - - fargate_profile_pod_execution_role_arns = distinct( - compact( - concat( - [for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn], - var.aws_auth_fargate_profile_pod_execution_role_arns, - ) - ) - ) - - aws_auth_configmap_data = { - mapRoles = yamlencode(concat( - [for role_arn in local.node_iam_role_arns_non_windows : { - rolearn = role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - } - ], - [for role_arn in local.node_iam_role_arns_windows : { - rolearn = role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "eks:kube-proxy-windows", - "system:bootstrappers", - "system:nodes", - ] - } - ], - # Fargate profile - [for role_arn in local.fargate_profile_pod_execution_role_arns : { - rolearn = role_arn - username = "system:node:{{SessionName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - "system:node-proxier", - ] - } - ], - var.aws_auth_roles - )) - mapUsers = yamlencode(var.aws_auth_users) - mapAccounts = yamlencode(var.aws_auth_accounts) - } -} - -resource "kubernetes_config_map" "aws_auth" { - count = var.create && var.create_aws_auth_configmap ? 1 : 0 - - metadata { - name = "aws-auth" - namespace = "kube-system" - } - - data = local.aws_auth_configmap_data - - lifecycle { - # We are ignoring the data here since we will manage it with the resource below - # This is only intended to be used in scenarios where the configmap does not exist - ignore_changes = [data, metadata[0].labels, metadata[0].annotations] - } -} - -resource "kubernetes_config_map_v1_data" "aws_auth" { - count = var.create && var.manage_aws_auth_configmap ? 1 : 0 - - force = true - - metadata { - name = "aws-auth" - namespace = "kube-system" - } - - data = local.aws_auth_configmap_data - - depends_on = [ - # Required for instances where the configmap does not exist yet to avoid race condition - kubernetes_config_map.aws_auth, - ] -} diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md index 0853fd9e1a..4b50bfe35b 100644 --- a/modules/_user_data/README.md +++ b/modules/_user_data/README.md @@ -9,7 +9,7 @@ See [`examples/user_data/`](https://github.com/terraform-aws-modules/terraform-a | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | +| [terraform](#requirement\_terraform) | >= 1.3 | | [cloudinit](#requirement\_cloudinit) | >= 2.0 | ## Providers diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf index 8ace10539d..5d9e669538 100644 --- a/modules/_user_data/main.tf +++ b/modules/_user_data/main.tf @@ -38,9 +38,11 @@ locals { } windows = { - user_data = var.create && var.platform == "windows" && var.enable_bootstrap_user_data ? base64encode(templatefile( + user_data = var.create && var.platform == "windows" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.pre_bootstrap_user_data != "") ? base64encode(templatefile( coalesce(var.user_data_template_path, "${path.module}/../../templates/windows_user_data.tpl"), { + # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami + enable_bootstrap_user_data = var.enable_bootstrap_user_data # Required to bootstrap node cluster_name = var.cluster_name cluster_endpoint = var.cluster_endpoint diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf index 2dbd12cdc0..00dcffe370 100644 --- a/modules/_user_data/versions.tf +++ b/modules/_user_data/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { cloudinit = { diff --git a/modules/aws-auth/README.md b/modules/aws-auth/README.md new file mode 100644 index 0000000000..d66aa9fca7 --- /dev/null +++ b/modules/aws-auth/README.md @@ -0,0 +1,81 @@ +# `aws-auth` Module + +Configuration in this directory creates/updates the `aws-auth` ConfigMap. + +```hcl +module "eks" { + source = "terraform-aws-modules/eks/aws//modules/aws-auth" + version = "~> 20.0" + + manage_aws_auth_configmap = true + + aws_auth_roles = [ + { + rolearn = "arn:aws:iam::66666666666:role/role1" + username = "role1" + groups = ["system:masters"] + }, + ] + + aws_auth_users = [ + { + userarn = "arn:aws:iam::66666666666:user/user1" + username = "user1" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::66666666666:user/user2" + username = "user2" + groups = ["system:masters"] + }, + ] + + aws_auth_accounts = [ + "777777777777", + "888888888888", + ] +} +``` + +## Usage + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [kubernetes](#requirement\_kubernetes) | >= 2.20 | + +## Providers + +| Name | Version | +|------|---------| +| [kubernetes](#provider\_kubernetes) | >= 2.20 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_auth\_accounts](#input\_aws\_auth\_accounts) | List of account maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [aws\_auth\_roles](#input\_aws\_auth\_roles) | List of role maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [aws\_auth\_users](#input\_aws\_auth\_users) | List of user maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [create](#input\_create) | Controls if resources should be created (affects all resources) | `bool` | `true` | no | +| [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | +| [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `true` | no | + +## Outputs + +No outputs. + diff --git a/modules/aws-auth/main.tf b/modules/aws-auth/main.tf new file mode 100644 index 0000000000..2f7e9694a7 --- /dev/null +++ b/modules/aws-auth/main.tf @@ -0,0 +1,47 @@ + +################################################################################ +# aws-auth configmap +################################################################################ + +locals { + aws_auth_configmap_data = { + mapRoles = yamlencode(var.aws_auth_roles) + mapUsers = yamlencode(var.aws_auth_users) + mapAccounts = yamlencode(var.aws_auth_accounts) + } +} + +resource "kubernetes_config_map" "aws_auth" { + count = var.create && var.create_aws_auth_configmap ? 1 : 0 + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = local.aws_auth_configmap_data + + lifecycle { + # We are ignoring the data here since we will manage it with the resource below + # This is only intended to be used in scenarios where the configmap does not exist + ignore_changes = [data, metadata[0].labels, metadata[0].annotations] + } +} + +resource "kubernetes_config_map_v1_data" "aws_auth" { + count = var.create && var.manage_aws_auth_configmap ? 1 : 0 + + force = true + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = local.aws_auth_configmap_data + + depends_on = [ + # Required for instances where the configmap does not exist yet to avoid race condition + kubernetes_config_map.aws_auth, + ] +} diff --git a/examples/complete/variables.tf b/modules/aws-auth/outputs.tf similarity index 100% rename from examples/complete/variables.tf rename to modules/aws-auth/outputs.tf diff --git a/modules/aws-auth/variables.tf b/modules/aws-auth/variables.tf new file mode 100644 index 0000000000..3aaeb023e3 --- /dev/null +++ b/modules/aws-auth/variables.tf @@ -0,0 +1,39 @@ +variable "create" { + description = "Controls if resources should be created (affects all resources)" + type = bool + default = true +} + +################################################################################ +# aws-auth ConfigMap +################################################################################ + +variable "create_aws_auth_configmap" { + description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`" + type = bool + default = false +} + +variable "manage_aws_auth_configmap" { + description = "Determines whether to manage the aws-auth configmap" + type = bool + default = true +} + +variable "aws_auth_roles" { + description = "List of role maps to add to the aws-auth configmap" + type = list(any) + default = [] +} + +variable "aws_auth_users" { + description = "List of user maps to add to the aws-auth configmap" + type = list(any) + default = [] +} + +variable "aws_auth_accounts" { + description = "List of account maps to add to the aws-auth configmap" + type = list(any) + default = [] +} diff --git a/modules/aws-auth/versions.tf b/modules/aws-auth/versions.tf new file mode 100644 index 0000000000..8ddb07fa57 --- /dev/null +++ b/modules/aws-auth/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.20" + } + } +} diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 69d33ae51e..ebae013b92 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -63,14 +63,14 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules @@ -98,7 +98,7 @@ module "eks_managed_node_group" { |------|-------------|------|---------|:--------:| | [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance. If not supplied, EKS will use its own default image | `string` | `""` | no | | [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no | -| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no | +| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `null` | no | | [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no | | [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | | [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `{}` | no | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index e4a59758b4..16ca010ae6 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -409,7 +409,7 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"] + identifiers = ["ec2.amazonaws.com"] } } } diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 197cd28c95..ede9dc4c50 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -321,7 +321,7 @@ variable "use_name_prefix" { } variable "ami_type" { - description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64`" + description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values" type = string default = null } diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index 01d187af62..63e9319d92 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index d841524ac4..8656a6f191 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -28,14 +28,14 @@ module "fargate_profile" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index 01d187af62..63e9319d92 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } } } diff --git a/modules/karpenter/README.md b/modules/karpenter/README.md index 7b813471ad..3262f71c63 100644 --- a/modules/karpenter/README.md +++ b/modules/karpenter/README.md @@ -7,30 +7,15 @@ Configuration in this directory creates the AWS resources required by Karpenter ### All Resources (Default) In the following example, the Karpenter module will create: -- An IAM role for service accounts (IRSA) with a narrowly scoped IAM policy for the Karpenter controller to utilize -- An IAM role and instance profile for the nodes created by Karpenter to utilize - - Note: This IAM role ARN will need to be added to the `aws-auth` configmap for nodes to join the cluster successfully -- An SQS queue and Eventbridge event rules for Karpenter to utilize for spot termination handling, capacity rebalancing, etc. - -This setup is great for running Karpenter on EKS Fargate: +- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller +- A Node IAM role that Karpenter will use to create an Instance Profile for the nodes to receive IAM permissions +- An access entry for the Node IAM role to allow nodes to join the cluster +- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc. ```hcl module "eks" { - source = "terraform-aws-modules/eks" + source = "terraform-aws-modules/eks/aws" - # Shown just for connection between cluster and Karpenter sub-module below - manage_aws_auth_configmap = true - aws_auth_roles = [ - # We need to add in the Karpenter node IAM role for nodes launched by Karpenter - { - rolearn = module.karpenter.role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - ] ... } @@ -39,11 +24,8 @@ module "karpenter" { cluster_name = module.eks.cluster_name - irsa_oidc_provider_arn = module.eks.oidc_provider_arn - irsa_namespace_service_accounts = ["karpenter:karpenter"] - # Attach additional IAM policies to the Karpenter node IAM role - iam_role_additional_policies = { + node_iam_role_additional_policies = { AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } @@ -54,15 +36,13 @@ module "karpenter" { } ``` -### External Node IAM Role (Default) +### Re-Use Existing Node IAM Role In the following example, the Karpenter module will create: -- An IAM role for service accounts (IRSA) with a narrowly scoped IAM policy for the Karpenter controller to utilize -- An IAM instance profile for the nodes created by Karpenter to utilize - - Note: This setup will utilize the existing IAM role created by the EKS Managed Node group which means the role is already populated in the `aws-auth` configmap and no further updates are required. -- An SQS queue and Eventbridge event rules for Karpenter to utilize for spot termination handling, capacity rebalancing, etc. +- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller +- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc. -In this scenario, Karpenter would run atop the EKS Managed Node group and scale out nodes as needed from there: +In this scenario, Karpenter will re-use an existing Node IAM role from the EKS managed nodegroup which already has the necessary access entry permissions: ```hcl module "eks" { @@ -86,12 +66,12 @@ module "karpenter" { cluster_name = module.eks.cluster_name - irsa_oidc_provider_arn = module.eks.oidc_provider_arn - irsa_namespace_service_accounts = ["karpenter:karpenter"] - create_iam_role = false iam_role_arn = module.eks.eks_managed_node_groups["initial"].iam_role_arn + # Since the nodegroup role will already have an access entry + create_access_entry = false + tags = { Environment = "dev" Terraform = "true" @@ -104,14 +84,14 @@ module "karpenter" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules @@ -123,61 +103,65 @@ No modules. |------|------| | [aws_cloudwatch_event_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | | [aws_cloudwatch_event_target.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_eks_access_entry.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | | [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | -| [aws_iam_policy.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_role.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | -| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | -| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_iam_role_policy_attachment.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_iam_role_policy_attachment.irsa_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_policy.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.controller_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_sqs_queue.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | | [aws_sqs_queue_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.irsa_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.controller_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.node_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | +| [access\_entry\_type](#input\_access\_entry\_type) | Type of the access entry. `EC2_LINUX`, `FARGATE_LINUX`, or `EC2_WINDOWS`; defaults to `EC2_LINUX` | `string` | `"EC2_LINUX"` | no | +| [ami\_id\_ssm\_parameter\_arns](#input\_ami\_id\_ssm\_parameter\_arns) | List of SSM Parameter ARNs that Karpenter controller is allowed read access (for retrieving AMI IDs) | `list(string)` | `[]` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. Note: If `ipv6` is specified, the `AmazonEKS_CNI_IPv6_Policy` must exist in the account. This policy is created by the EKS module with `create_cni_ipv6_iam_policy = true` | `string` | `null` | no | | [cluster\_name](#input\_cluster\_name) | The name of the EKS cluster | `string` | `""` | no | -| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no | -| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [create\_instance\_profile](#input\_create\_instance\_profile) | Whether to create an IAM instance profile | `bool` | `true` | no | -| [create\_irsa](#input\_create\_irsa) | Determines whether an IAM role for service accounts is created | `bool` | `true` | no | -| [enable\_karpenter\_instance\_profile\_creation](#input\_enable\_karpenter\_instance\_profile\_creation) | Determines whether Karpenter will be allowed to create the IAM instance profile (v1beta1) or if Terraform will (v1alpha1) | `bool` | `false` | no | +| [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no | +| [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the node IAM role | `bool` | `true` | no | +| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created | `bool` | `true` | no | +| [create\_instance\_profile](#input\_create\_instance\_profile) | Whether to create an IAM instance profile | `bool` | `false` | no | +| [create\_node\_iam\_role](#input\_create\_node\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | +| [enable\_irsa](#input\_enable\_irsa) | Determines whether to enable support IAM role for service account | `bool` | `false` | no | | [enable\_spot\_termination](#input\_enable\_spot\_termination) | Determines whether to enable native spot termination handling | `bool` | `true` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | -| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no | -| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | -| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | +| [iam\_policy\_description](#input\_iam\_policy\_description) | IAM policy description | `string` | `"Karpenter controller IAM policy"` | no | +| [iam\_policy\_name](#input\_iam\_policy\_name) | Name of the IAM policy | `string` | `"KarpenterController"` | no | +| [iam\_policy\_path](#input\_iam\_policy\_path) | Path of the IAM policy | `string` | `"/"` | no | +| [iam\_policy\_use\_name\_prefix](#input\_iam\_policy\_use\_name\_prefix) | Determines whether the name of the IAM policy (`iam_policy_name`) is used as a prefix | `bool` | `true` | no | +| [iam\_role\_description](#input\_iam\_role\_description) | IAM role description | `string` | `"Karpenter controller IAM role"` | no | | [iam\_role\_max\_session\_duration](#input\_iam\_role\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no | -| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | -| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `"/"` | no | -| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | -| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no | -| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | +| [iam\_role\_name](#input\_iam\_role\_name) | Name of the IAM role | `string` | `"KarpenterController"` | no | +| [iam\_role\_path](#input\_iam\_role\_path) | Path of the IAM role | `string` | `"/"` | no | +| [iam\_role\_permissions\_boundary\_arn](#input\_iam\_role\_permissions\_boundary\_arn) | Permissions boundary ARN to use for the IAM role | `string` | `null` | no | +| [iam\_role\_policies](#input\_iam\_role\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no | +| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add the the IAM role | `map(any)` | `{}` | no | +| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the name of the IAM role (`iam_role_name`) is used as a prefix | `bool` | `true` | no | | [irsa\_assume\_role\_condition\_test](#input\_irsa\_assume\_role\_condition\_test) | Name of the [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) to evaluate when assuming the role | `string` | `"StringEquals"` | no | -| [irsa\_description](#input\_irsa\_description) | IAM role for service accounts description | `string` | `"Karpenter IAM role for service account"` | no | -| [irsa\_max\_session\_duration](#input\_irsa\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no | -| [irsa\_name](#input\_irsa\_name) | Name of IAM role for service accounts | `string` | `null` | no | | [irsa\_namespace\_service\_accounts](#input\_irsa\_namespace\_service\_accounts) | List of `namespace:serviceaccount`pairs to use in trust policy for IAM role for service accounts | `list(string)` |
[
"karpenter:karpenter"
]
| no | | [irsa\_oidc\_provider\_arn](#input\_irsa\_oidc\_provider\_arn) | OIDC provider arn used in trust policy for IAM role for service accounts | `string` | `""` | no | -| [irsa\_path](#input\_irsa\_path) | Path of IAM role for service accounts | `string` | `"/"` | no | -| [irsa\_permissions\_boundary\_arn](#input\_irsa\_permissions\_boundary\_arn) | Permissions boundary ARN to use for IAM role for service accounts | `string` | `null` | no | -| [irsa\_policy\_name](#input\_irsa\_policy\_name) | Name of IAM policy for service accounts | `string` | `null` | no | -| [irsa\_ssm\_parameter\_arns](#input\_irsa\_ssm\_parameter\_arns) | List of SSM Parameter ARNs that contain AMI IDs launched by Karpenter | `list(string)` |
[
"arn:aws:ssm:*:*:parameter/aws/service/*"
]
| no | -| [irsa\_subnet\_account\_id](#input\_irsa\_subnet\_account\_id) | Account ID of where the subnets Karpenter will utilize resides. Used when subnets are shared from another account | `string` | `""` | no | -| [irsa\_tag\_key](#input\_irsa\_tag\_key) | Tag key (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner | `string` | `"karpenter.sh/discovery"` | no | -| [irsa\_tag\_values](#input\_irsa\_tag\_values) | Tag values (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner. Defaults to cluster name when not set. | `list(string)` | `[]` | no | -| [irsa\_tags](#input\_irsa\_tags) | A map of additional tags to add the the IAM role for service accounts | `map(any)` | `{}` | no | -| [irsa\_use\_name\_prefix](#input\_irsa\_use\_name\_prefix) | Determines whether the IAM role for service accounts name (`irsa_name`) is used as a prefix | `bool` | `true` | no | -| [policies](#input\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no | +| [node\_iam\_role\_additional\_policies](#input\_node\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | +| [node\_iam\_role\_arn](#input\_node\_iam\_role\_arn) | Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no | +| [node\_iam\_role\_attach\_cni\_policy](#input\_node\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | +| [node\_iam\_role\_description](#input\_node\_iam\_role\_description) | Description of the role | `string` | `null` | no | +| [node\_iam\_role\_max\_session\_duration](#input\_node\_iam\_role\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no | +| [node\_iam\_role\_name](#input\_node\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | +| [node\_iam\_role\_path](#input\_node\_iam\_role\_path) | IAM role path | `string` | `"/"` | no | +| [node\_iam\_role\_permissions\_boundary](#input\_node\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | +| [node\_iam\_role\_tags](#input\_node\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no | +| [node\_iam\_role\_use\_name\_prefix](#input\_node\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | | [queue\_kms\_data\_key\_reuse\_period\_seconds](#input\_queue\_kms\_data\_key\_reuse\_period\_seconds) | The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again | `number` | `null` | no | | [queue\_kms\_master\_key\_id](#input\_queue\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK | `string` | `null` | no | | [queue\_managed\_sse\_enabled](#input\_queue\_managed\_sse\_enabled) | Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys | `bool` | `true` | no | @@ -190,17 +174,18 @@ No modules. | Name | Description | |------|-------------| | [event\_rules](#output\_event\_rules) | Map of the event rules created and their attributes | +| [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the controller IAM role | +| [iam\_role\_name](#output\_iam\_role\_name) | The name of the controller IAM role | +| [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the controller IAM role | | [instance\_profile\_arn](#output\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile | | [instance\_profile\_id](#output\_instance\_profile\_id) | Instance profile's ID | | [instance\_profile\_name](#output\_instance\_profile\_name) | Name of the instance profile | | [instance\_profile\_unique](#output\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile | -| [irsa\_arn](#output\_irsa\_arn) | The Amazon Resource Name (ARN) specifying the IAM role for service accounts | -| [irsa\_name](#output\_irsa\_name) | The name of the IAM role for service accounts | -| [irsa\_unique\_id](#output\_irsa\_unique\_id) | Stable and unique string identifying the IAM role for service accounts | +| [node\_access\_entry\_arn](#output\_node\_access\_entry\_arn) | Amazon Resource Name (ARN) of the node Access Entry | +| [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role | +| [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | The name of the node IAM role | +| [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the node IAM role | | [queue\_arn](#output\_queue\_arn) | The ARN of the SQS queue | | [queue\_name](#output\_queue\_name) | The name of the created Amazon SQS queue | | [queue\_url](#output\_queue\_url) | The URL for the created Amazon SQS queue | -| [role\_arn](#output\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | -| [role\_name](#output\_role\_name) | The name of the IAM role | -| [role\_unique\_id](#output\_role\_unique\_id) | Stable and unique string identifying the IAM role | diff --git a/modules/karpenter/main.tf b/modules/karpenter/main.tf index 2ce9f95c16..dfd6042e34 100644 --- a/modules/karpenter/main.tf +++ b/modules/karpenter/main.tf @@ -1,207 +1,406 @@ +data "aws_region" "current" {} data "aws_partition" "current" {} data "aws_caller_identity" "current" {} locals { account_id = data.aws_caller_identity.current.account_id - partition = data.aws_partition.current.partition dns_suffix = data.aws_partition.current.dns_suffix + partition = data.aws_partition.current.partition + region = data.aws_region.current.name } ################################################################################ -# IAM Role for Service Account (IRSA) -# This is used by the Karpenter controller +# Karpenter controller IAM Role ################################################################################ locals { - create_irsa = var.create && var.create_irsa - irsa_name = coalesce(var.irsa_name, "KarpenterIRSA-${var.cluster_name}") - irsa_policy_name = coalesce(var.irsa_policy_name, local.irsa_name) - + create_iam_role = var.create && var.create_iam_role irsa_oidc_provider_url = replace(var.irsa_oidc_provider_arn, "/^(.*provider/)/", "") } -data "aws_iam_policy_document" "irsa_assume_role" { - count = local.create_irsa ? 1 : 0 +data "aws_iam_policy_document" "controller_assume_role" { + count = local.create_iam_role ? 1 : 0 + # Pod Identity statement { - effect = "Allow" - actions = ["sts:AssumeRoleWithWebIdentity"] + actions = [ + "sts:AssumeRole", + "sts:TagSession", + ] principals { - type = "Federated" - identifiers = [var.irsa_oidc_provider_arn] + type = "Service" + identifiers = ["pods.eks.amazonaws.com"] } + } - condition { - test = var.irsa_assume_role_condition_test - variable = "${local.irsa_oidc_provider_url}:sub" - values = [for sa in var.irsa_namespace_service_accounts : "system:serviceaccount:${sa}"] - } + # IAM Roles for Service Accounts (IRSA) + dynamic "statement" { + for_each = var.enable_irsa ? [1] : [] - # https://aws.amazon.com/premiumsupport/knowledge-center/eks-troubleshoot-oidc-and-irsa/?nc1=h_ls - condition { - test = var.irsa_assume_role_condition_test - variable = "${local.irsa_oidc_provider_url}:aud" - values = ["sts.amazonaws.com"] + content { + actions = ["sts:AssumeRoleWithWebIdentity"] + + principals { + type = "Federated" + identifiers = [var.irsa_oidc_provider_arn] + } + + condition { + test = var.irsa_assume_role_condition_test + variable = "${local.irsa_oidc_provider_url}:sub" + values = [for sa in var.irsa_namespace_service_accounts : "system:serviceaccount:${sa}"] + } + + # https://aws.amazon.com/premiumsupport/knowledge-center/eks-troubleshoot-oidc-and-irsa/?nc1=h_ls + condition { + test = var.irsa_assume_role_condition_test + variable = "${local.irsa_oidc_provider_url}:aud" + values = ["sts.amazonaws.com"] + } } } } -resource "aws_iam_role" "irsa" { - count = local.create_irsa ? 1 : 0 +resource "aws_iam_role" "controller" { + count = local.create_iam_role ? 1 : 0 - name = var.irsa_use_name_prefix ? null : local.irsa_name - name_prefix = var.irsa_use_name_prefix ? "${local.irsa_name}-" : null - path = var.irsa_path - description = var.irsa_description + name = var.iam_role_use_name_prefix ? null : var.iam_role_name + name_prefix = var.iam_role_use_name_prefix ? "${var.iam_role_name}-" : null + path = var.iam_role_path + description = var.iam_role_description - assume_role_policy = data.aws_iam_policy_document.irsa_assume_role[0].json - max_session_duration = var.irsa_max_session_duration - permissions_boundary = var.irsa_permissions_boundary_arn + assume_role_policy = data.aws_iam_policy_document.controller_assume_role[0].json + max_session_duration = var.iam_role_max_session_duration + permissions_boundary = var.iam_role_permissions_boundary_arn force_detach_policies = true - tags = merge(var.tags, var.irsa_tags) + tags = merge(var.tags, var.iam_role_tags) } -locals { - irsa_tag_values = coalescelist(var.irsa_tag_values, [var.cluster_name]) -} +data "aws_iam_policy_document" "controller" { + count = local.create_iam_role ? 1 : 0 -data "aws_iam_policy_document" "irsa" { - count = local.create_irsa ? 1 : 0 + statement { + sid = "AllowScopedEC2InstanceActions" + resources = [ + "arn:${local.partition}:ec2:*::image/*", + "arn:${local.partition}:ec2:*::snapshot/*", + "arn:${local.partition}:ec2:*:*:spot-instances-request/*", + "arn:${local.partition}:ec2:*:*:security-group/*", + "arn:${local.partition}:ec2:*:*:subnet/*", + "arn:${local.partition}:ec2:*:*:launch-template/*", + ] + + actions = [ + "ec2:RunInstances", + "ec2:CreateFleet" + ] + } statement { + sid = "AllowScopedEC2InstanceActionsWithTags" + resources = [ + "arn:${local.partition}:ec2:*:*:fleet/*", + "arn:${local.partition}:ec2:*:*:instance/*", + "arn:${local.partition}:ec2:*:*:volume/*", + "arn:${local.partition}:ec2:*:*:network-interface/*", + "arn:${local.partition}:ec2:*:*:launch-template/*", + "arn:${local.partition}:ec2:*:*:spot-instances-request/*", + ] actions = [ - "ec2:CreateLaunchTemplate", + "ec2:RunInstances", "ec2:CreateFleet", - "ec2:CreateTags", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeImages", - "ec2:DescribeInstances", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts", + "ec2:CreateLaunchTemplate" ] - resources = ["*"] + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.sh/nodepool" + values = ["*"] + } } statement { - actions = [ - "ec2:TerminateInstances", - "ec2:DeleteLaunchTemplate", + sid = "AllowScopedResourceCreationTagging" + resources = [ + "arn:${local.partition}:ec2:*:*:fleet/*", + "arn:${local.partition}:ec2:*:*:instance/*", + "arn:${local.partition}:ec2:*:*:volume/*", + "arn:${local.partition}:ec2:*:*:network-interface/*", + "arn:${local.partition}:ec2:*:*:launch-template/*", + "arn:${local.partition}:ec2:*:*:spot-instances-request/*", ] + actions = ["ec2:CreateTags"] - resources = ["*"] + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } condition { test = "StringEquals" - variable = "ec2:ResourceTag/${var.irsa_tag_key}" - values = local.irsa_tag_values + variable = "ec2:CreateAction" + values = [ + "RunInstances", + "CreateFleet", + "CreateLaunchTemplate", + ] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.sh/nodepool" + values = ["*"] } } statement { - actions = ["ec2:RunInstances"] + sid = "AllowScopedResourceTagging" + resources = ["arn:${local.partition}:ec2:*:*:instance/*"] + actions = ["ec2:CreateTags"] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.sh/nodepool" + values = ["*"] + } + + condition { + test = "ForAllValues:StringEquals" + variable = "aws:TagKeys" + values = [ + "karpenter.sh/nodeclaim", + "Name", + ] + } + } + + statement { + sid = "AllowScopedDeletion" resources = [ - "arn:${local.partition}:ec2:*:${local.account_id}:launch-template/*", + "arn:${local.partition}:ec2:*:*:instance/*", + "arn:${local.partition}:ec2:*:*:launch-template/*" + ] + + actions = [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate" ] condition { test = "StringEquals" - variable = "ec2:ResourceTag/${var.irsa_tag_key}" - values = local.irsa_tag_values + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.sh/nodepool" + values = ["*"] } } statement { - actions = ["ec2:RunInstances"] - resources = [ - "arn:${local.partition}:ec2:*::image/*", - "arn:${local.partition}:ec2:*::snapshot/*", - "arn:${local.partition}:ec2:*:${local.account_id}:instance/*", - "arn:${local.partition}:ec2:*:${local.account_id}:spot-instances-request/*", - "arn:${local.partition}:ec2:*:${local.account_id}:security-group/*", - "arn:${local.partition}:ec2:*:${local.account_id}:volume/*", - "arn:${local.partition}:ec2:*:${local.account_id}:network-interface/*", - "arn:${local.partition}:ec2:*:${coalesce(var.irsa_subnet_account_id, local.account_id)}:subnet/*", + sid = "AllowRegionalReadActions" + resources = ["*"] + actions = [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets" ] + + condition { + test = "StringEquals" + variable = "aws:RequestedRegion" + values = [local.region] + } } statement { + sid = "AllowSSMReadActions" + resources = coalescelist(var.ami_id_ssm_parameter_arns, ["arn:${local.partition}:ssm:${local.region}::parameter/aws/service/*"]) actions = ["ssm:GetParameter"] - resources = var.irsa_ssm_parameter_arns } statement { - actions = ["eks:DescribeCluster"] - resources = ["arn:${local.partition}:eks:*:${local.account_id}:cluster/${var.cluster_name}"] + sid = "AllowPricingReadActions" + resources = ["*"] + actions = ["pricing:GetProducts"] } statement { + sid = "AllowInterruptionQueueActions" + resources = [aws_sqs_queue.this[0].arn] + actions = [ + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ] + } + + statement { + sid = "AllowPassingInstanceRole" + resources = var.create_node_iam_role ? [aws_iam_role.node[0].arn] : [var.node_iam_role_arn] actions = ["iam:PassRole"] - resources = [var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = ["ec2.amazonaws.com"] + } } - dynamic "statement" { - for_each = local.enable_spot_termination ? [1] : [] + statement { + sid = "AllowScopedInstanceProfileCreationActions" + resources = ["*"] + actions = ["iam:CreateInstanceProfile"] - content { - actions = [ - "sqs:DeleteMessage", - "sqs:GetQueueUrl", - "sqs:GetQueueAttributes", - "sqs:ReceiveMessage", - ] - resources = [aws_sqs_queue.this[0].arn] + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] } } - # TODO - this will be replaced in v20.0 with the scoped policy provided by Karpenter - # https://github.com/aws/karpenter/blob/main/website/content/en/docs/upgrading/v1beta1-controller-policy.json - dynamic "statement" { - for_each = var.enable_karpenter_instance_profile_creation ? [1] : [] + statement { + sid = "AllowScopedInstanceProfileTagActions" + resources = ["*"] + actions = ["iam:TagInstanceProfile"] - content { - actions = [ - "iam:AddRoleToInstanceProfile", - "iam:CreateInstanceProfile", - "iam:DeleteInstanceProfile", - "iam:GetInstanceProfile", - "iam:RemoveRoleFromInstanceProfile", - "iam:TagInstanceProfile", - ] - resources = ["*"] + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] } } + + statement { + sid = "AllowScopedInstanceProfileActions" + resources = ["*"] + actions = [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + } + + statement { + sid = "AllowInstanceProfileReadActions" + resources = ["*"] + actions = ["iam:GetInstanceProfile"] + } + + statement { + sid = "AllowAPIServerEndpointDiscovery" + resources = ["arn:${local.partition}:eks:${local.region}:${local.account_id}:cluster/${var.cluster_name}"] + actions = ["eks:DescribeCluster"] + } } -resource "aws_iam_policy" "irsa" { - count = local.create_irsa ? 1 : 0 +resource "aws_iam_policy" "controller" { + count = local.create_iam_role ? 1 : 0 - name_prefix = "${local.irsa_policy_name}-" - path = var.irsa_path - description = var.irsa_description - policy = data.aws_iam_policy_document.irsa[0].json + name = var.iam_policy_use_name_prefix ? null : var.iam_policy_name + name_prefix = var.iam_policy_use_name_prefix ? "${var.iam_policy_name}-" : null + path = var.iam_policy_path + description = var.iam_policy_description + policy = data.aws_iam_policy_document.controller[0].json tags = var.tags } -resource "aws_iam_role_policy_attachment" "irsa" { - count = local.create_irsa ? 1 : 0 +resource "aws_iam_role_policy_attachment" "controller" { + count = local.create_iam_role ? 1 : 0 - role = aws_iam_role.irsa[0].name - policy_arn = aws_iam_policy.irsa[0].arn + role = aws_iam_role.controller[0].name + policy_arn = aws_iam_policy.controller[0].arn } -resource "aws_iam_role_policy_attachment" "irsa_additional" { - for_each = { for k, v in var.policies : k => v if local.create_irsa } +resource "aws_iam_role_policy_attachment" "controller_additional" { + for_each = { for k, v in var.iam_role_policies : k => v if local.create_iam_role } - role = aws_iam_role.irsa[0].name + role = aws_iam_role.controller[0].name policy_arn = each.value } @@ -266,7 +465,7 @@ locals { detail-type = ["AWS Health Event"] } } - spot_interupt = { + spot_interrupt = { name = "SpotInterrupt" description = "Karpenter interrupt - EC2 spot instance interruption warning" event_pattern = { @@ -320,15 +519,15 @@ resource "aws_cloudwatch_event_target" "this" { ################################################################################ locals { - create_iam_role = var.create && var.create_iam_role + create_node_iam_role = var.create && var.create_node_iam_role - iam_role_name = coalesce(var.iam_role_name, "Karpenter-${var.cluster_name}") - iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${local.partition}:iam::${local.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + node_iam_role_name = coalesce(var.node_iam_role_name, "Karpenter-${var.cluster_name}") + node_iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${local.partition}:iam::${local.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.node_iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } -data "aws_iam_policy_document" "assume_role" { - count = local.create_iam_role ? 1 : 0 +data "aws_iam_policy_document" "node_assume_role" { + count = local.create_node_iam_role ? 1 : 0 statement { sid = "EKSNodeAssumeRole" @@ -336,62 +535,83 @@ data "aws_iam_policy_document" "assume_role" { principals { type = "Service" - identifiers = ["ec2.${local.dns_suffix}"] + identifiers = ["ec2.amazonaws.com"] } } } -resource "aws_iam_role" "this" { - count = local.create_iam_role ? 1 : 0 +resource "aws_iam_role" "node" { + count = local.create_node_iam_role ? 1 : 0 - name = var.iam_role_use_name_prefix ? null : local.iam_role_name - name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null - path = var.iam_role_path - description = var.iam_role_description + name = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name + name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null + path = var.node_iam_role_path + description = var.node_iam_role_description - assume_role_policy = data.aws_iam_policy_document.assume_role[0].json - max_session_duration = var.iam_role_max_session_duration - permissions_boundary = var.iam_role_permissions_boundary + assume_role_policy = data.aws_iam_policy_document.node_assume_role[0].json + max_session_duration = var.node_iam_role_max_session_duration + permissions_boundary = var.node_iam_role_permissions_boundary force_detach_policies = true - tags = merge(var.tags, var.iam_role_tags) + tags = merge(var.tags, var.node_iam_role_tags) } # Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group -resource "aws_iam_role_policy_attachment" "this" { +resource "aws_iam_role_policy_attachment" "node" { for_each = { for k, v in toset(compact([ - "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", - "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ])) : k => v if local.create_iam_role } + "${local.node_iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", + "${local.node_iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", + var.node_iam_role_attach_cni_policy ? local.cni_policy : "", + ])) : k => v if local.create_node_iam_role } policy_arn = each.value - role = aws_iam_role.this[0].name + role = aws_iam_role.node[0].name } -resource "aws_iam_role_policy_attachment" "additional" { - for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } +resource "aws_iam_role_policy_attachment" "node_additional" { + for_each = { for k, v in var.node_iam_role_additional_policies : k => v if local.create_node_iam_role } policy_arn = each.value - role = aws_iam_role.this[0].name + role = aws_iam_role.node[0].name +} + +################################################################################ +# Access Entry +################################################################################ + +resource "aws_eks_access_entry" "node" { + count = var.create && var.create_access_entry ? 1 : 0 + + cluster_name = var.cluster_name + principal_arn = var.create_node_iam_role ? aws_iam_role.node[0].arn : var.node_iam_role_arn + type = var.access_entry_type + + tags = var.tags + + depends_on = [ + # If we try to add this too quickly, it fails. So .... we wait + aws_sqs_queue_policy.this, + ] } ################################################################################ # Node IAM Instance Profile # This is used by the nodes launched by Karpenter +# Starting with Karpenter 0.32 this is no longer required as Karpenter will +# create the Instance Profile ################################################################################ locals { - external_role_name = try(replace(var.iam_role_arn, "/^(.*role/)/", ""), null) + external_role_name = try(replace(var.node_iam_role_arn, "/^(.*role/)/", ""), null) } resource "aws_iam_instance_profile" "this" { count = var.create && var.create_instance_profile ? 1 : 0 - name = var.iam_role_use_name_prefix ? null : local.iam_role_name - name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null - path = var.iam_role_path - role = var.create_iam_role ? aws_iam_role.this[0].name : local.external_role_name + name = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name + name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null + path = var.node_iam_role_path + role = var.create_node_iam_role ? aws_iam_role.node[0].name : local.external_role_name - tags = merge(var.tags, var.iam_role_tags) + tags = merge(var.tags, var.node_iam_role_tags) } diff --git a/modules/karpenter/migrations.tf b/modules/karpenter/migrations.tf new file mode 100644 index 0000000000..3e7ca59a9b --- /dev/null +++ b/modules/karpenter/migrations.tf @@ -0,0 +1,56 @@ +################################################################################ +# Migrations: v19.21 -> v20.0 +################################################################################ + +# Node IAM role +moved { + from = aws_iam_role.this + to = aws_iam_role.node +} + +moved { + from = aws_iam_policy.this + to = aws_iam_policy.node +} + +moved { + from = aws_iam_role_policy_attachment.this + to = aws_iam_role_policy_attachment.node +} + +moved { + from = aws_iam_role_policy_attachment.additional + to = aws_iam_role_policy_attachment.node_additional +} + +# Controller IAM role +moved { + from = aws_iam_role.irsa + to = aws_iam_role.controller +} + +moved { + from = aws_iam_policy.irsa + to = aws_iam_policy.controller +} + +moved { + from = aws_iam_role_policy_attachment.irsa + to = aws_iam_role_policy_attachment.controller +} + +moved { + from = aws_iam_role_policy_attachment.irsa_additional + to = aws_iam_role_policy_attachment.controller_additional +} + +# Spelling correction +moved { + from = aws_cloudwatch_event_target.this["spot_interupt"] + to = aws_cloudwatch_event_target.this["spot_interrupt"] +} + +moved { + from = aws_cloudwatch_event_rule.this["spot_interupt"] + to = aws_cloudwatch_event_rule.this["spot_interrupt"] +} diff --git a/modules/karpenter/outputs.tf b/modules/karpenter/outputs.tf index 947de39bfd..164baa142c 100644 --- a/modules/karpenter/outputs.tf +++ b/modules/karpenter/outputs.tf @@ -1,20 +1,20 @@ ################################################################################ -# IAM Role for Service Account (IRSA) +# Karpenter controller IAM Role ################################################################################ -output "irsa_name" { - description = "The name of the IAM role for service accounts" - value = try(aws_iam_role.irsa[0].name, null) +output "iam_role_name" { + description = "The name of the controller IAM role" + value = try(aws_iam_role.controller[0].name, null) } -output "irsa_arn" { - description = "The Amazon Resource Name (ARN) specifying the IAM role for service accounts" - value = try(aws_iam_role.irsa[0].arn, null) +output "iam_role_arn" { + description = "The Amazon Resource Name (ARN) specifying the controller IAM role" + value = try(aws_iam_role.controller[0].arn, null) } -output "irsa_unique_id" { - description = "Stable and unique string identifying the IAM role for service accounts" - value = try(aws_iam_role.irsa[0].unique_id, null) +output "iam_role_unique_id" { + description = "Stable and unique string identifying the controller IAM role" + value = try(aws_iam_role.controller[0].unique_id, null) } ################################################################################ @@ -49,19 +49,28 @@ output "event_rules" { # Node IAM Role ################################################################################ -output "role_name" { - description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, null) +output "node_iam_role_name" { + description = "The name of the node IAM role" + value = try(aws_iam_role.node[0].name, null) } -output "role_arn" { - description = "The Amazon Resource Name (ARN) specifying the IAM role" - value = try(aws_iam_role.this[0].arn, var.iam_role_arn) +output "node_iam_role_arn" { + description = "The Amazon Resource Name (ARN) specifying the node IAM role" + value = try(aws_iam_role.node[0].arn, var.node_iam_role_arn) } -output "role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, null) +output "node_iam_role_unique_id" { + description = "Stable and unique string identifying the node IAM role" + value = try(aws_iam_role.node[0].unique_id, null) +} + +################################################################################ +# Access Entry +################################################################################ + +output "node_access_entry_arn" { + description = "Amazon Resource Name (ARN) of the node Access Entry" + value = try(aws_eks_access_entry.node[0].access_entry_arn, null) } ################################################################################ diff --git a/modules/karpenter/variables.tf b/modules/karpenter/variables.tf index 4a8389671b..3af82d4fc6 100644 --- a/modules/karpenter/variables.tf +++ b/modules/karpenter/variables.tf @@ -1,5 +1,5 @@ variable "create" { - description = "Determines whether to create EKS managed node group or not" + description = "Controls if resources should be created (affects nearly all resources)" type = bool default = true } @@ -17,92 +17,101 @@ variable "cluster_name" { } ################################################################################ -# IAM Role for Service Account (IRSA) +# Karpenter controller IAM Role ################################################################################ -variable "create_irsa" { - description = "Determines whether an IAM role for service accounts is created" +variable "create_iam_role" { + description = "Determines whether an IAM role is created" type = bool default = true } -variable "irsa_name" { - description = "Name of IAM role for service accounts" - type = string - default = null -} - -variable "irsa_policy_name" { - description = "Name of IAM policy for service accounts" +variable "iam_role_name" { + description = "Name of the IAM role" type = string - default = null + default = "KarpenterController" } -variable "irsa_use_name_prefix" { - description = "Determines whether the IAM role for service accounts name (`irsa_name`) is used as a prefix" +variable "iam_role_use_name_prefix" { + description = "Determines whether the name of the IAM role (`iam_role_name`) is used as a prefix" type = bool default = true } -variable "irsa_path" { - description = "Path of IAM role for service accounts" +variable "iam_role_path" { + description = "Path of the IAM role" type = string default = "/" } -variable "irsa_description" { - description = "IAM role for service accounts description" +variable "iam_role_description" { + description = "IAM role description" type = string - default = "Karpenter IAM role for service account" + default = "Karpenter controller IAM role" } -variable "irsa_max_session_duration" { +variable "iam_role_max_session_duration" { description = "Maximum API session duration in seconds between 3600 and 43200" type = number default = null } -variable "irsa_permissions_boundary_arn" { - description = "Permissions boundary ARN to use for IAM role for service accounts" +variable "iam_role_permissions_boundary_arn" { + description = "Permissions boundary ARN to use for the IAM role" type = string default = null } -variable "irsa_tags" { - description = "A map of additional tags to add the the IAM role for service accounts" +variable "iam_role_tags" { + description = "A map of additional tags to add the the IAM role" type = map(any) default = {} } -variable "policies" { - description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format" - type = map(string) - default = {} +variable "iam_policy_name" { + description = "Name of the IAM policy" + type = string + default = "KarpenterController" +} + +variable "iam_policy_use_name_prefix" { + description = "Determines whether the name of the IAM policy (`iam_policy_name`) is used as a prefix" + type = bool + default = true } -variable "irsa_tag_key" { - description = "Tag key (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner" +variable "iam_policy_path" { + description = "Path of the IAM policy" type = string - default = "karpenter.sh/discovery" + default = "/" } -variable "irsa_tag_values" { - description = "Tag values (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner. Defaults to cluster name when not set." - type = list(string) - default = [] +variable "iam_policy_description" { + description = "IAM policy description" + type = string + default = "Karpenter controller IAM policy" } -variable "irsa_ssm_parameter_arns" { - description = "List of SSM Parameter ARNs that contain AMI IDs launched by Karpenter" +variable "iam_role_policies" { + description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format" + type = map(string) + default = {} +} + +variable "ami_id_ssm_parameter_arns" { + description = "List of SSM Parameter ARNs that Karpenter controller is allowed read access (for retrieving AMI IDs)" type = list(string) - # https://github.com/aws/karpenter/blob/ed9473a9863ca949b61b9846c8b9f33f35b86dbd/pkg/cloudprovider/aws/ami.go#L105-L123 - default = ["arn:aws:ssm:*:*:parameter/aws/service/*"] + default = [] } -variable "irsa_subnet_account_id" { - description = "Account ID of where the subnets Karpenter will utilize resides. Used when subnets are shared from another account" - type = string - default = "" +################################################################################ +# IAM Role for Service Account (IRSA) +################################################################################ + +variable "enable_irsa" { + description = "Determines whether to enable support IAM role for service account" + type = bool + default = false } variable "irsa_oidc_provider_arn" { @@ -123,12 +132,6 @@ variable "irsa_assume_role_condition_test" { default = "StringEquals" } -variable "enable_karpenter_instance_profile_creation" { - description = "Determines whether Karpenter will be allowed to create the IAM instance profile (v1beta1) or if Terraform will (v1alpha1)" - type = bool - default = false -} - ################################################################################ # Node Termination Queue ################################################################################ @@ -164,81 +167,97 @@ variable "queue_kms_data_key_reuse_period_seconds" { } ################################################################################ -# Node IAM Role & Instance Profile +# Node IAM Role ################################################################################ -variable "create_iam_role" { +variable "create_node_iam_role" { description = "Determines whether an IAM role is created or to use an existing IAM role" type = bool default = true } variable "cluster_ip_family" { - description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" + description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. Note: If `ipv6` is specified, the `AmazonEKS_CNI_IPv6_Policy` must exist in the account. This policy is created by the EKS module with `create_cni_ipv6_iam_policy = true`" type = string default = null } -variable "iam_role_arn" { +variable "node_iam_role_arn" { description = "Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false`" type = string default = null } -variable "iam_role_name" { +variable "node_iam_role_name" { description = "Name to use on IAM role created" type = string default = null } -variable "iam_role_use_name_prefix" { +variable "node_iam_role_use_name_prefix" { description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix" type = bool default = true } -variable "iam_role_path" { +variable "node_iam_role_path" { description = "IAM role path" type = string default = "/" } -variable "iam_role_description" { +variable "node_iam_role_description" { description = "Description of the role" type = string default = null } -variable "iam_role_max_session_duration" { +variable "node_iam_role_max_session_duration" { description = "Maximum API session duration in seconds between 3600 and 43200" type = number default = null } -variable "iam_role_permissions_boundary" { +variable "node_iam_role_permissions_boundary" { description = "ARN of the policy that is used to set the permissions boundary for the IAM role" type = string default = null } -variable "iam_role_attach_cni_policy" { +variable "node_iam_role_attach_cni_policy" { description = "Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster" type = bool default = true } -variable "iam_role_additional_policies" { +variable "node_iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" type = map(string) default = {} } -variable "iam_role_tags" { +variable "node_iam_role_tags" { description = "A map of additional tags to add to the IAM role created" type = map(string) default = {} } +################################################################################ +# Access Entry +################################################################################ + +variable "create_access_entry" { + description = "Determines whether an access entry is created for the IAM role used by the node IAM role" + type = bool + default = true +} + +variable "access_entry_type" { + description = "Type of the access entry. `EC2_LINUX`, `FARGATE_LINUX`, or `EC2_WINDOWS`; defaults to `EC2_LINUX`" + type = string + default = "EC2_LINUX" +} + ################################################################################ # Node IAM Instance Profile ################################################################################ @@ -246,7 +265,7 @@ variable "iam_role_tags" { variable "create_instance_profile" { description = "Whether to create an IAM instance profile" type = bool - default = true + default = false } ################################################################################ diff --git a/modules/karpenter/versions.tf b/modules/karpenter/versions.tf index 01d187af62..63e9319d92 100644 --- a/modules/karpenter/versions.tf +++ b/modules/karpenter/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 8964144994..83e44d0bc5 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -42,14 +42,14 @@ module "self_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 5.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.34 | ## Modules @@ -63,6 +63,7 @@ module "self_managed_node_group" { |------|------| | [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource | | [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | +| [aws_eks_access_entry.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | | [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | @@ -93,6 +94,7 @@ module "self_managed_node_group" { | [context](#input\_context) | Reserved | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no | +| [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the nodegroup | `bool` | `true` | no | | [create\_autoscaling\_group](#input\_create\_autoscaling\_group) | Determines whether to create autoscaling group or not | `bool` | `true` | no | | [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no | @@ -116,6 +118,7 @@ module "self_managed_node_group" { | [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `{}` | no | | [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no | | [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | +| [iam\_role\_arn](#input\_iam\_role\_arn) | ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false` | `string` | `null` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | | [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | @@ -125,6 +128,7 @@ module "self_managed_node_group" { | [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | | [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no | | [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no | +| [instance\_maintenance\_policy](#input\_instance\_maintenance\_policy) | If this block is configured, add a instance maintenance policy to the specified Auto Scaling group | `any` | `{}` | no | | [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `{}` | no | | [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` |
{
"preferences": {
"min_healthy_percentage": 66
},
"strategy": "Rolling"
}
| no | | [instance\_requirements](#input\_instance\_requirements) | The attribute requirements for the type of instance. If present then `instance_type` cannot be present | `any` | `{}` | no | @@ -178,6 +182,7 @@ module "self_managed_node_group" { | Name | Description | |------|-------------| +| [access\_entry\_arn](#output\_access\_entry\_arn) | Amazon Resource Name (ARN) of the Access Entry | | [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this autoscaling group | | [autoscaling\_group\_availability\_zones](#output\_autoscaling\_group\_availability\_zones) | The availability zones of the autoscaling group | | [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index 3b04db8b80..eb3192970b 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -438,6 +438,15 @@ resource "aws_autoscaling_group" "this" { } } + dynamic "instance_maintenance_policy" { + for_each = length(var.instance_maintenance_policy) > 0 ? [var.instance_maintenance_policy] : [] + + content { + min_healthy_percentage = instance_maintenance_policy.value.min_healthy_percentage + max_healthy_percentage = instance_maintenance_policy.value.max_healthy_percentage + } + } + dynamic "instance_refresh" { for_each = length(var.instance_refresh) > 0 ? [var.instance_refresh] : [] @@ -446,11 +455,14 @@ resource "aws_autoscaling_group" "this" { for_each = try([instance_refresh.value.preferences], []) content { - checkpoint_delay = try(preferences.value.checkpoint_delay, null) - checkpoint_percentages = try(preferences.value.checkpoint_percentages, null) - instance_warmup = try(preferences.value.instance_warmup, null) - min_healthy_percentage = try(preferences.value.min_healthy_percentage, null) - skip_matching = try(preferences.value.skip_matching, null) + checkpoint_delay = try(preferences.value.checkpoint_delay, null) + checkpoint_percentages = try(preferences.value.checkpoint_percentages, null) + instance_warmup = try(preferences.value.instance_warmup, null) + max_healthy_percentage = try(preferences.value.max_healthy_percentage, null) + min_healthy_percentage = try(preferences.value.min_healthy_percentage, null) + scale_in_protected_instances = try(preferences.value.scale_in_protected_instances, null) + skip_matching = try(preferences.value.skip_matching, null) + standby_instances = try(preferences.value.standby_instances, null) } } @@ -686,28 +698,6 @@ resource "aws_autoscaling_group" "this" { } } -################################################################################ -# Autoscaling group schedule -################################################################################ - -resource "aws_autoscaling_schedule" "this" { - for_each = { for k, v in var.schedules : k => v if var.create && var.create_schedule } - - scheduled_action_name = each.key - autoscaling_group_name = aws_autoscaling_group.this[0].name - - min_size = try(each.value.min_size, null) - max_size = try(each.value.max_size, null) - desired_capacity = try(each.value.desired_size, null) - start_time = try(each.value.start_time, null) - end_time = try(each.value.end_time, null) - time_zone = try(each.value.time_zone, null) - - # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] - # Cron examples: https://crontab.guru/examples.html - recurrence = try(each.value.recurrence, null) -} - ################################################################################ # IAM Role ################################################################################ @@ -727,7 +717,7 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"] + identifiers = ["ec2.amazonaws.com"] } } } @@ -780,3 +770,39 @@ resource "aws_iam_instance_profile" "this" { create_before_destroy = true } } + +################################################################################ +# Access Entry +################################################################################ + +resource "aws_eks_access_entry" "this" { + count = var.create && var.create_access_entry ? 1 : 0 + + cluster_name = var.cluster_name + principal_arn = var.create_iam_instance_profile ? aws_iam_role.this[0].arn : var.iam_role_arn + type = var.platform == "windows" ? "EC2_WINDOWS" : "EC2_LINUX" + + tags = var.tags +} + +################################################################################ +# Autoscaling group schedule +################################################################################ + +resource "aws_autoscaling_schedule" "this" { + for_each = { for k, v in var.schedules : k => v if var.create && var.create_schedule } + + scheduled_action_name = each.key + autoscaling_group_name = aws_autoscaling_group.this[0].name + + min_size = try(each.value.min_size, null) + max_size = try(each.value.max_size, null) + desired_capacity = try(each.value.desired_size, null) + start_time = try(each.value.start_time, null) + end_time = try(each.value.end_time, null) + time_zone = try(each.value.time_zone, null) + + # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] + # Cron examples: https://crontab.guru/examples.html + recurrence = try(each.value.recurrence, null) +} diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf index 5c83497218..aba5b13233 100644 --- a/modules/self-managed-node-group/outputs.tf +++ b/modules/self-managed-node-group/outputs.tf @@ -81,15 +81,6 @@ output "autoscaling_group_vpc_zone_identifier" { value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, null) } -################################################################################ -# Autoscaling Group Schedule -################################################################################ - -output "autoscaling_group_schedule_arns" { - description = "ARNs of autoscaling group schedules" - value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } -} - ################################################################################ # IAM Role ################################################################################ @@ -128,6 +119,24 @@ output "iam_instance_profile_unique" { value = try(aws_iam_instance_profile.this[0].unique_id, null) } +################################################################################ +# Access Entry +################################################################################ + +output "access_entry_arn" { + description = "Amazon Resource Name (ARN) of the Access Entry" + value = try(aws_eks_access_entry.this[0].access_entry_arn, null) +} + +################################################################################ +# Autoscaling Group Schedule +################################################################################ + +output "autoscaling_group_schedule_arns" { + description = "ARNs of autoscaling group schedules" + value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } +} + ################################################################################ # Additional ################################################################################ diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index 7e5d0cecb6..c6c5086a57 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -476,6 +476,12 @@ variable "initial_lifecycle_hooks" { default = [] } +variable "instance_maintenance_policy" { + description = "If this block is configured, add a instance maintenance policy to the specified Auto Scaling group" + type = any + default = {} +} + variable "instance_refresh" { description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated" type = any @@ -517,22 +523,6 @@ variable "autoscaling_group_tags" { default = {} } -################################################################################ -# Autoscaling group schedule -################################################################################ - -variable "create_schedule" { - description = "Determines whether to create autoscaling group schedule or not" - type = bool - default = true -} - -variable "schedules" { - description = "Map of autoscaling group schedule to create" - type = map(any) - default = {} -} - ################################################################################ # IAM Role ################################################################################ @@ -602,3 +592,35 @@ variable "iam_role_tags" { type = map(string) default = {} } + +################################################################################ +# Access Entry +################################################################################ + +variable "create_access_entry" { + description = "Determines whether an access entry is created for the IAM role used by the nodegroup" + type = bool + default = true +} + +variable "iam_role_arn" { + description = "ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false`" + type = string + default = null +} + +################################################################################ +# Autoscaling group schedule +################################################################################ + +variable "create_schedule" { + description = "Determines whether to create autoscaling group schedule or not" + type = bool + default = true +} + +variable "schedules" { + description = "Map of autoscaling group schedule to create" + type = map(any) + default = {} +} diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index 01d187af62..63e9319d92 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } } } diff --git a/node_groups.tf b/node_groups.tf index db78861a81..ddfc20fa09 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -40,7 +40,6 @@ resource "time_sleep" "this" { ################################################################################ # EKS IPV6 CNI Policy -# TODO - hopefully AWS releases a managed policy which can replace this # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy ################################################################################ @@ -62,7 +61,7 @@ data "aws_iam_policy_document" "cni_ipv6_policy" { statement { sid = "CreateTags" actions = ["ec2:CreateTags"] - resources = ["arn:${data.aws_partition.current.partition}:ec2:*:*:network-interface/*"] + resources = ["arn:${local.partition}:ec2:*:*:network-interface/*"] } } @@ -363,6 +362,7 @@ module "eks_managed_node_group" { # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.eks_managed_node_group_defaults, "iam_role_additional_policies", {})) + # Autoscaling group schedule create_schedule = try(each.value.create_schedule, var.eks_managed_node_group_defaults.create_schedule, true) schedules = try(each.value.schedules, var.eks_managed_node_group_defaults.schedules, {}) @@ -423,14 +423,12 @@ module "self_managed_node_group" { metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null) service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_managed_node_group_defaults.service_linked_role_arn, null) - initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) - instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) - use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) - mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) - warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) - - create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, true) - schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) + initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) + instance_maintenance_policy = try(each.value.instance_maintenance_policy, var.self_managed_node_group_defaults.instance_maintenance_policy, {}) + instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) + use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) + mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) + warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null) autoscaling_group_tags = try(each.value.autoscaling_group_tags, var.self_managed_node_group_defaults.autoscaling_group_tags, {}) @@ -499,6 +497,14 @@ module "self_managed_node_group" { # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.self_managed_node_group_defaults, "iam_role_additional_policies", {})) + # Access entry + create_access_entry = try(each.value.create_access_entry, var.self_managed_node_group_defaults.create_access_entry, true) + iam_role_arn = try(each.value.iam_role_arn, var.self_managed_node_group_defaults.iam_role_arn, null) + + # Autoscaling group schedule + create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, true) + schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) + # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.self_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null diff --git a/outputs.tf b/outputs.tf index ea02a3a8cc..adba15c10c 100644 --- a/outputs.tf +++ b/outputs.tf @@ -52,6 +52,15 @@ output "cluster_primary_security_group_id" { value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, null) } +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = aws_eks_access_entry.this +} + ################################################################################ # KMS Key ################################################################################ @@ -205,19 +214,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = compact([for group in module.self_managed_node_group : group.autoscaling_group_name]) } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "[DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = templatefile("${path.module}/templates/aws_auth_cm.tpl", - { - eks_managed_role_arns = distinct(compact([for group in module.eks_managed_node_group : group.iam_role_arn])) - self_managed_role_arns = distinct(compact([for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"])) - win32_self_managed_role_arns = distinct(compact([for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"])) - fargate_profile_pod_execution_role_arns = distinct(compact([for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn])) - } - ) -} diff --git a/templates/aws_auth_cm.tpl b/templates/aws_auth_cm.tpl deleted file mode 100644 index 73a898e966..0000000000 --- a/templates/aws_auth_cm.tpl +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: aws-auth - namespace: kube-system -data: - mapRoles: | -%{ for role in eks_managed_role_arns ~} - - rolearn: ${role} - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes -%{ endfor ~} -%{ for role in self_managed_role_arns ~} - - rolearn: ${role} - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes -%{ endfor ~} -%{ for role in win32_self_managed_role_arns ~} - - rolearn: ${role} - username: system:node:{{EC2PrivateDNSName}} - groups: - - eks:kube-proxy-windows - - system:bootstrappers - - system:nodes -%{ endfor ~} -%{ for role in fargate_profile_pod_execution_role_arns ~} - - rolearn: ${role} - username: system:node:{{SessionName}} - groups: - - system:bootstrappers - - system:nodes - - system:node-proxier -%{ endfor ~} diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl index 5000850604..9721d3cc33 100644 --- a/templates/windows_user_data.tpl +++ b/templates/windows_user_data.tpl @@ -1,5 +1,8 @@ +%{ if enable_bootstrap_user_data ~} +%{ endif ~} ${pre_bootstrap_user_data ~} +%{ if enable_bootstrap_user_data ~} [string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS" [string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1' [string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName" @@ -7,3 +10,4 @@ ${pre_bootstrap_user_data ~} $LastError = if ($?) { 0 } else { $Error[0].Exception.HResult } ${post_bootstrap_user_data ~} +%{ endif ~} diff --git a/variables.tf b/variables.tf index 810cee2bda..83776d6f4f 100644 --- a/variables.tf +++ b/variables.tf @@ -1,5 +1,5 @@ variable "create" { - description = "Controls if EKS resources should be created (affects nearly all resources)" + description = "Controls if resources should be created (affects nearly all resources)" type = bool default = true } @@ -38,6 +38,12 @@ variable "cluster_enabled_log_types" { default = ["audit", "api", "authenticator"] } +variable "authentication_mode" { + description = "The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP`" + type = string + default = "API_AND_CONFIG_MAP" +} + variable "cluster_additional_security_group_ids" { description = "List of additional, externally created security group IDs to attach to the cluster control plane" type = list(string) @@ -130,6 +136,22 @@ variable "cluster_timeouts" { default = {} } +################################################################################ +# Access Entry +################################################################################ + +variable "access_entries" { + description = "Map of access entries to add to the cluster" + type = any + default = {} +} + +variable "enable_cluster_creator_admin_permissions" { + description = "Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry" + type = bool + default = false +} + ################################################################################ # KMS Key ################################################################################ @@ -153,15 +175,15 @@ variable "kms_key_deletion_window_in_days" { } variable "enable_kms_key_rotation" { - description = "Specifies whether key rotation is enabled. Defaults to `true`" + description = "Specifies whether key rotation is enabled" type = bool default = true } variable "kms_key_enable_default_policy" { - description = "Specifies whether to enable the default key policy. Defaults to `false`" + description = "Specifies whether to enable the default key policy" type = bool - default = false + default = true } variable "kms_key_owners" { @@ -228,6 +250,12 @@ variable "cloudwatch_log_group_kms_key_id" { default = null } +variable "cloudwatch_log_group_class" { + description = "Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`" + type = string + default = null +} + variable "cloudwatch_log_group_tags" { description = "A map of additional tags to add to the cloudwatch log group created" type = map(string) @@ -428,14 +456,6 @@ variable "iam_role_additional_policies" { default = {} } -# TODO - hopefully this can be removed once the AWS endpoint is named properly in China -# https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1904 -variable "cluster_iam_role_dns_suffix" { - description = "Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China)" - type = string - default = null -} - variable "iam_role_tags" { description = "A map of additional tags to add to the IAM role created" type = map(string) @@ -557,55 +577,3 @@ variable "putin_khuylo" { type = bool default = true } - -################################################################################ -# aws-auth configmap -################################################################################ - -variable "manage_aws_auth_configmap" { - description = "Determines whether to manage the aws-auth configmap" - type = bool - default = false -} - -variable "create_aws_auth_configmap" { - description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`" - type = bool - default = false -} - -variable "aws_auth_node_iam_role_arns_non_windows" { - description = "List of non-Windows based node IAM role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_node_iam_role_arns_windows" { - description = "List of Windows based node IAM role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_fargate_profile_pod_execution_role_arns" { - description = "List of Fargate profile pod execution role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_roles" { - description = "List of role maps to add to the aws-auth configmap" - type = list(any) - default = [] -} - -variable "aws_auth_users" { - description = "List of user maps to add to the aws-auth configmap" - type = list(any) - default = [] -} - -variable "aws_auth_accounts" { - description = "List of account maps to add to the aws-auth configmap" - type = list(any) - default = [] -} diff --git a/versions.tf b/versions.tf index e3bdcbf6d7..a7084a7252 100644 --- a/versions.tf +++ b/versions.tf @@ -1,19 +1,15 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.34" } tls = { source = "hashicorp/tls" version = ">= 3.0" } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } time = { source = "hashicorp/time" version = ">= 0.9"