From d856d034636230130401a3bbc30105fc30ab1501 Mon Sep 17 00:00:00 2001 From: Tim Birkett Date: Wed, 11 Mar 2020 09:22:26 +0000 Subject: [PATCH] fix: Add wait_for_cluster to aws_auth module --- README.md | 2 +- modules/aws_auth/README.md | 3 +++ modules/aws_auth/aws_auth.tf | 20 ++++++++++++++++++++ modules/aws_auth/variables.tf | 6 ++++++ modules/aws_auth/versions.tf | 1 + modules/worker_groups/data.tf | 10 +++++----- variables.tf | 4 ++-- 7 files changed, 38 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 1acba21..feb273a 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ No provider. | subnets | A list of subnets to place the EKS cluster and workers within. | `list(string)` | n/a | yes | | tags | A map of tags to add to all resources. | `map(string)` | `{}` | no | | vpc\_id | VPC where the cluster and workers will be deployed. | `string` | n/a | yes | -| wait\_for\_cluster\_cmd | DEPREDATED: Unused variable, no longer required. Maintained for backwards compatibility with upstream. | `string` | `""` | no | +| wait\_for\_cluster\_cmd | Custom local-exec command to execute for determining if the eks cluster is healthy. Cluster endpoint will be available as an environment variable called ENDPOINT | `string` | `"until wget --no-check-certificate -O - -q $ENDPOINT/healthz \u003e/dev/null; do sleep 4; done"` | no | | worker\_additional\_security\_group\_ids | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no | | worker\_ami\_name\_filter | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no | | worker\_ami\_name\_filter\_windows | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no | diff --git a/modules/aws_auth/README.md b/modules/aws_auth/README.md index a9925f6..de15288 100644 --- a/modules/aws_auth/README.md +++ b/modules/aws_auth/README.md @@ -5,7 +5,9 @@ | Name | Version | |------|---------| +| aws | >= 2.44.0 | | kubernetes | >= 1.6.2 | +| null | >= 2.1 | | template | >= 2.1 | ## Inputs @@ -18,6 +20,7 @@ | map\_instances | IAM instance roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format. |
list(object({
instance_role_arn = string
platform = string
}))
| `[]` | no | | map\_roles | Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | | map\_users | Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format. |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | +| wait\_for\_cluster\_cmd | Custom local-exec command to execute for determining if the eks cluster is healthy. Cluster endpoint will be available as an environment variable called ENDPOINT | `string` | `"until wget --no-check-certificate -O - -q $ENDPOINT/healthz \u003e/dev/null; do sleep 4; done"` | no | ## Outputs diff --git a/modules/aws_auth/aws_auth.tf b/modules/aws_auth/aws_auth.tf index 62ff882..71a0ca0 100644 --- a/modules/aws_auth/aws_auth.tf +++ b/modules/aws_auth/aws_auth.tf @@ -5,9 +5,29 @@ data "template_file" "map_instances" { vars = var.map_instances[count.index] } +data "aws_eks_cluster" "this" { + name = var.cluster_name +} + +resource "null_resource" "wait_for_cluster" { + count = var.create_eks && var.manage_aws_auth ? 1 : 0 + + provisioner "local-exec" { + environment = { + ENDPOINT = data.aws_eks_cluster.this.endpoint + } + + command = var.wait_for_cluster_cmd + } +} + resource "kubernetes_config_map" "aws_auth" { count = var.create_eks && var.manage_aws_auth ? 1 : 0 + depends_on = [ + null_resource.wait_for_cluster[0] + ] + metadata { name = "aws-auth" namespace = "kube-system" diff --git a/modules/aws_auth/variables.tf b/modules/aws_auth/variables.tf index 8f10bdc..4f91861 100644 --- a/modules/aws_auth/variables.tf +++ b/modules/aws_auth/variables.tf @@ -43,3 +43,9 @@ variable "map_users" { })) default = [] } + +variable "wait_for_cluster_cmd" { + description = "Custom local-exec command to execute for determining if the eks cluster is healthy. Cluster endpoint will be available as an environment variable called ENDPOINT" + type = string + default = "until wget --no-check-certificate -O - -q $ENDPOINT/healthz >/dev/null; do sleep 4; done" +} diff --git a/modules/aws_auth/versions.tf b/modules/aws_auth/versions.tf index f466cf7..4354fcf 100644 --- a/modules/aws_auth/versions.tf +++ b/modules/aws_auth/versions.tf @@ -3,6 +3,7 @@ terraform { required_providers { aws = ">= 2.44.0" + null = ">= 2.1" template = ">= 2.1" kubernetes = ">= 1.6.2" } diff --git a/modules/worker_groups/data.tf b/modules/worker_groups/data.tf index 6ad7ecc..d78e107 100644 --- a/modules/worker_groups/data.tf +++ b/modules/worker_groups/data.tf @@ -1,13 +1,13 @@ locals { - worker_ami_name_filter = var.worker_ami_name_filter != "" ? var.worker_ami_name_filter : "amazon-eks-node-${data.aws_eks_cluster.cluster.version}-v*" + worker_ami_name_filter = var.worker_ami_name_filter != "" ? var.worker_ami_name_filter : "amazon-eks-node-${data.aws_eks_cluster.this.version}-v*" # Windows nodes are available from k8s 1.14. If cluster version is less than 1.14, fix ami filter to some constant to not fail on 'terraform plan'. worker_ami_name_filter_windows = (var.worker_ami_name_filter_windows != "" ? - var.worker_ami_name_filter_windows : "Windows_Server-2019-English-Core-EKS_Optimized-${tonumber(data.aws_eks_cluster.cluster.version) >= 1.14 ? data.aws_eks_cluster.cluster.version : 1.14}-*" + var.worker_ami_name_filter_windows : "Windows_Server-2019-English-Core-EKS_Optimized-${tonumber(data.aws_eks_cluster.this.version) >= 1.14 ? data.aws_eks_cluster.this.version : 1.14}-*" ) } -data "aws_eks_cluster" "cluster" { +data "aws_eks_cluster" "this" { name = var.cluster_name } @@ -68,8 +68,8 @@ data "template_file" "launch_template_userdata" { vars = merge({ platform = each.value["platform"] cluster_name = var.cluster_name - endpoint = data.aws_eks_cluster.cluster.endpoint - cluster_auth_base64 = data.aws_eks_cluster.cluster.certificate_authority.0.data + endpoint = data.aws_eks_cluster.this.endpoint + cluster_auth_base64 = data.aws_eks_cluster.this.certificate_authority.0.data pre_userdata = each.value["pre_userdata"] additional_userdata = each.value["additional_userdata"] bootstrap_extra_args = each.value["bootstrap_extra_args"] diff --git a/variables.tf b/variables.tf index 964b465..09e3147 100644 --- a/variables.tf +++ b/variables.tf @@ -205,9 +205,9 @@ variable "cluster_delete_timeout" { } variable "wait_for_cluster_cmd" { - description = "DEPREDATED: Unused variable, no longer required. Maintained for backwards compatibility with upstream." + description = "Custom local-exec command to execute for determining if the eks cluster is healthy. Cluster endpoint will be available as an environment variable called ENDPOINT" type = string - default = "" + default = "until wget --no-check-certificate -O - -q $ENDPOINT/healthz >/dev/null; do sleep 4; done" } variable "cluster_create_security_group" {