diff --git a/.github/workflows/e2e-parallel-destroy.yml b/.github/workflows/e2e-parallel-destroy.yml index 615cff4163..e00efac3cc 100644 --- a/.github/workflows/e2e-parallel-destroy.yml +++ b/.github/workflows/e2e-parallel-destroy.yml @@ -9,7 +9,7 @@ on: default: "true" env: - DEFAULT_DEPLOY_ORDER: "module.e2e-test.module.aws_vpc,module.e2e-test.module.eks-blueprints,module.e2e-test.module.eks-blueprints-kubernetes-addons" + DEFAULT_DEPLOY_ORDER: "module.e2e_test.module.aws_vpc,module.e2e_test.module.eks_blueprints,module.e2e_test.module.eks_blueprints_kubernetes_addons" jobs: deploy: @@ -40,10 +40,10 @@ jobs: tenant_name: "private" deployment_order: [ - "module.e2e-test.module.aws_vpc", - "module.e2e-test.module.vpc_endpoint_gateway", - "module.e2e-test.module.vpc_endpoints", - "module.e2e-test.module.eks-blueprints", + "module.e2e_test.module.aws_vpc", + "module.e2e_test.module.vpc_endpoint_gateway", + "module.e2e_test.module.vpc_endpoints", + "module.e2e_test.module.eks_blueprints", ] - example_path: examples/game-tech/agones-game-controller tenant_name: "agones" @@ -51,11 +51,11 @@ jobs: tenant_name: "nginx" deployment_order: [ - "module.e2e-test.module.aws_vpc", - "module.e2e-test.module.eks-blueprints", - "module.e2e-test.module.eks-blueprints-kubernetes-addons", - "module.e2e-test.module.aws_load_balancer_controller", - "module.e2e-test.module.ingress_nginx", + "module.e2e_test.module.aws_vpc", + "module.e2e_test.module.eks_blueprints", + "module.e2e_test.module.eks_blueprints_kubernetes_addons", + "module.e2e_test.module.aws_load_balancer_controller", + "module.e2e_test.module.ingress_nginx", ] - example_path: examples/karpenter tenant_name: "karpenter" @@ -74,17 +74,17 @@ jobs: - name: Pre Setup id: pre-setup run: | - mkdir -p deploy/e2e/gh-e2e-test + mkdir -p deploy/e2e-test if [[ ${{ matrix.example_path }} == deploy/* ]] then echo "Skipping pre-setup for ${{ matrix.example_path }}" - cp -R ${{ matrix.example_path }}/* deploy/e2e/gh-e2e-test/ + cp -R ${{ matrix.example_path }}/* deploy/e2e-test/ else echo "Running pre-setup for ${{ matrix.example_path }}" - cp -R deploy/e2e/gh-e2e-template/* deploy/e2e/gh-e2e-test/ - sed -i "s!REPLACE_ME!${{ matrix.tenant_name }}!g" deploy/e2e/gh-e2e-test/base.tfvars - sed -i "s!TF_STATE_PATH!${{ matrix.example_path }}!g" deploy/e2e/gh-e2e-test/backend.conf - sed -i "s!EXAMPLE_PATH!${{ matrix.example_path }}!g" deploy/e2e/gh-e2e-test/main.tf + cp -R deploy/e2e-template/* deploy/e2e-test/ + sed -i "s!REPLACE_ME!${{ matrix.tenant_name }}!g" deploy/e2e-test/base.tfvars + sed -i "s!TF_STATE_PATH!${{ matrix.example_path }}!g" deploy/e2e-test/backend.conf + sed -i "s!EXAMPLE_PATH!${{ matrix.example_path }}!g" deploy/e2e-test/main.tf fi continue-on-error: false @@ -123,24 +123,24 @@ jobs: - name: Terraform Init id: init run: terraform init -backend-config backend.conf -reconfigure - working-directory: deploy/e2e/gh-e2e-test + working-directory: deploy/e2e-test continue-on-error: false - name: Terraform Validate id: validate - working-directory: deploy/e2e/gh-e2e-test + working-directory: deploy/e2e-test run: terraform validate -no-color continue-on-error: false - name: Terraform Plan Destroy id: plan-destroy - working-directory: deploy/e2e/gh-e2e-test + working-directory: deploy/e2e-test run: terraform plan -destroy -var-file base.tfvars -no-color continue-on-error: false - name: Terraform Destroy id: destroy - working-directory: deploy/e2e/gh-e2e-test + working-directory: deploy/e2e-test run: | reverse_array=$(echo ${{ env.DEPLOYMENT_ORDER }} | awk -F, '{for (i=NF; i>0; --i) printf "%s%s", (i0; --i) printf "%s%s", (i\n{{ .Content }}\n' diff --git a/.github/workflows/vpc-test-cleanup.yml b/.github/workflows/vpc-test-cleanup.yml deleted file mode 100644 index e89bcdb4a0..0000000000 --- a/.github/workflows/vpc-test-cleanup.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: vpc-cleanup - -on: - workflow_dispatch: - -jobs: - deploy: - name: Destroy VPC - runs-on: ubuntu-latest - - # These permissions are needed to interact with GitHub's OIDC Token endpoint. - permissions: - id-token: write - contents: read - - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Configure AWS credentials from Test account - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} - aws-region: us-west-2 - role-duration-seconds: 1800 - role-session-name: GithubActions-Session - - - name: Terraform Job - uses: hashicorp/setup-terraform@v1 - with: - terraform_version: 1.0.11 - - - name: Terraform Init - id: init - run: terraform init - working-directory: deploy/e2e/vpc - continue-on-error: false - - - name: Terraform Plan Destroy - id: plan-destroy - working-directory: deploy/e2e/vpc - run: terraform plan -no-color -destroy - continue-on-error: false - - - name: Terraform Destroy - id: destroy - working-directory: deploy/e2e/vpc - run: terraform destroy -no-color -auto-approve - continue-on-error: false diff --git a/.github/workflows/vpc-test.yml b/.github/workflows/vpc-test.yml deleted file mode 100644 index 48a0b3fe16..0000000000 --- a/.github/workflows/vpc-test.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: vpc-test - -on: - workflow_dispatch: - -jobs: - deploy: - name: Create VPC - runs-on: ubuntu-latest - - # These permissions are needed to interact with GitHub's OIDC Token endpoint. - permissions: - id-token: write - contents: read - - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Configure AWS credentials from Test account - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} - aws-region: us-west-2 - role-duration-seconds: 1800 - role-session-name: GithubActions-Session - - - name: Terraform Job - uses: hashicorp/setup-terraform@v1 - with: - terraform_version: 1.0.11 - - - name: Terraform Fmt - id: fmt - run: terraform fmt -check -recursive -list -no-color - continue-on-error: false - - - name: Terraform Init - id: init - run: terraform init -backend-config backend.conf -reconfigure - working-directory: deploy/e2e/vpc - continue-on-error: false - - - name: Terraform Validate - id: validate - working-directory: deploy/e2e/vpc - run: terraform validate -no-color - continue-on-error: false - - - name: Terraform Plan - id: plan - working-directory: deploy/e2e/vpc - run: terraform plan -var-file base.tfvars -no-color - continue-on-error: false - - - name: Terraform Apply - id: apply - working-directory: deploy/e2e/vpc - run: terraform apply -var-file base.tfvars -no-color -auto-approve - continue-on-error: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33da9fe0f6..a33bc906e2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,9 +10,26 @@ repos: - id: detect-aws-credentials args: ['--allow-missing-credentials'] - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.68.1 + rev: v1.70.1 hooks: - id: terraform_fmt + - id: terraform_docs + args: + - '--args=--lockfile=false' - id: terraform_validate + exclude: deploy - id: terraform_tflint -default_stages: [commit, push] + args: + - '--args=--only=terraform_deprecated_interpolation' + - '--args=--only=terraform_deprecated_index' + - '--args=--only=terraform_unused_declarations' + - '--args=--only=terraform_comment_syntax' + - '--args=--only=terraform_documented_outputs' + - '--args=--only=terraform_documented_variables' + - '--args=--only=terraform_typed_variables' + - '--args=--only=terraform_module_pinned_source' + - '--args=--only=terraform_naming_convention' + - '--args=--only=terraform_required_version' + - '--args=--only=terraform_required_providers' + - '--args=--only=terraform_standard_module_structure' + - '--args=--only=terraform_workspace_remote' diff --git a/.tflint.hcl b/.tflint.hcl deleted file mode 100644 index bfee7e8661..0000000000 --- a/.tflint.hcl +++ /dev/null @@ -1,66 +0,0 @@ -# https://github.com/terraform-linters/tflint/blob/master/docs/user-guide/module-inspection.md -# borrowed & modified indefinitely from https://github.com/ksatirli/building-infrastructure-you-can-mostly-trust/blob/main/.tflint.hcl - -plugin "aws" { - enabled = true - version = "0.13.3" - source = "github.com/terraform-linters/tflint-ruleset-aws" -} - -config { - module = false - force = false -} - -rule "terraform_required_providers" { - enabled = true -} - -rule "terraform_required_version" { - enabled = true -} - -rule "terraform_naming_convention" { - enabled = true - format = "snake_case" -} - -rule "terraform_typed_variables" { - enabled = true -} - -rule "terraform_unused_declarations" { - enabled = true -} - -rule "terraform_comment_syntax" { - enabled = true -} - -rule "terraform_deprecated_index" { - enabled = true -} - -rule "terraform_deprecated_interpolation" { - enabled = true -} - -rule "terraform_documented_outputs" { - enabled = true -} - -rule "terraform_documented_variables" { - enabled = true -} - -rule "terraform_module_pinned_source" { - enabled = true -} - -rule "terraform_standard_module_structure" { - enabled = true -} - -rule "terraform_workspace_remote" { - enabled = true -} diff --git a/README.md b/README.md index 6cd2568acf..740c3e7397 100644 --- a/README.md +++ b/README.md @@ -69,13 +69,13 @@ module "eks_blueprints_kubernetes_addons" { The code above will provision the following: -✅ A new EKS Cluster with a managed node group. -✅ Amazon EKS add-ons `vpc-cni`, `CoreDNS`, `kube-proxy`, and `aws-ebs-csi-driver`. -✅ `Cluster Autoscaler` and `Metrics Server` for scaling your workloads. -✅ `Fluent Bit` for routing logs. -✅ `AWS Load Balancer Controller` for distributing traffic. -✅ `Argocd` for declarative GitOps CD for Kubernetes. -✅ `Prometheus` for observability. +✅ A new EKS Cluster with a managed node group. +✅ Amazon EKS add-ons `vpc-cni`, `CoreDNS`, `kube-proxy`, and `aws-ebs-csi-driver`. +✅ `Cluster Autoscaler` and `Metrics Server` for scaling your workloads. +✅ `Fluent Bit` for routing logs. +✅ `AWS Load Balancer Controller` for distributing traffic. +✅ `Argocd` for declarative GitOps CD for Kubernetes. +✅ `Prometheus` for observability. ## Add-ons @@ -105,27 +105,28 @@ For architectural details, step-by-step instructions, and customization options, If you are interested in contributing to EKS Blueprints, see the [Contribution guide](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/CONTRIBUTING.md). --- - + + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | | [http](#requirement\_http) | 2.4.1 | -| [kubectl](#requirement\_kubectl) | >= 1.7.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | -| [local](#requirement\_local) | 2.1.0 | -| [null](#requirement\_null) | 3.1.0 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [local](#requirement\_local) | >= 2.1 | +| [null](#requirement\_null) | >= 3.1 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | | [http](#provider\_http) | 2.4.1 | -| [kubernetes](#provider\_kubernetes) | >= 2.7.1 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -222,6 +223,8 @@ If you are interested in contributing to EKS Blueprints, see the [Contribution g | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | EKS Control Plane Security Group ID | | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | +| [eks\_cluster\_certificate\_authority\_data](#output\_eks\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | +| [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | Endpoint for your Kubernetes API server | | [eks\_cluster\_id](#output\_eks\_cluster\_id) | Amazon EKS Cluster Name | | [eks\_cluster\_status](#output\_eks\_cluster\_status) | Amazon EKS Cluster Name | | [eks\_oidc\_issuer\_url](#output\_eks\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer | @@ -250,8 +253,7 @@ If you are interested in contributing to EKS Blueprints, see the [Contribution g | [windows\_node\_group\_aws\_auth\_config\_map](#output\_windows\_node\_group\_aws\_auth\_config\_map) | Windows node groups AWS auth map | | [worker\_node\_security\_group\_arn](#output\_worker\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the worker node shared security group | | [worker\_node\_security\_group\_id](#output\_worker\_node\_security\_group\_id) | ID of the worker node shared security group | - - + ## Security @@ -259,4 +261,4 @@ See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more inform ## License -This library is licensed under the MIT-0 License. See the LICENSE file. +Apache-2.0 Licensed. See [LICENSE](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/LICENSE). diff --git a/aws-auth-configmap.tf b/aws-auth-configmap.tf index 36833630e3..23f89ea76b 100644 --- a/aws-auth-configmap.tf +++ b/aws-auth-configmap.tf @@ -7,7 +7,7 @@ resource "kubernetes_config_map" "aws_auth" { labels = merge( { "app.kubernetes.io/managed-by" = "Terraform" - "terraform.io/module" = "terraform-eks-blueprints" + "terraform.io/module" = "terraform-aws-eks-blueprints" }, var.aws_auth_additional_labels ) diff --git a/deploy/e2e-template/README.md b/deploy/e2e-template/README.md new file mode 100644 index 0000000000..8f04b05adb --- /dev/null +++ b/deploy/e2e-template/README.md @@ -0,0 +1 @@ +# Usage diff --git a/deploy/e2e/gh-e2e-template/backend.conf b/deploy/e2e-template/backend.conf similarity index 100% rename from deploy/e2e/gh-e2e-template/backend.conf rename to deploy/e2e-template/backend.conf diff --git a/deploy/e2e/gh-e2e-template/base.tfvars b/deploy/e2e-template/base.tfvars similarity index 58% rename from deploy/e2e/gh-e2e-template/base.tfvars rename to deploy/e2e-template/base.tfvars index 53356446a6..d38e677a56 100644 --- a/deploy/e2e/gh-e2e-template/base.tfvars +++ b/deploy/e2e-template/base.tfvars @@ -1,6 +1,3 @@ tenant = "REPLACE_ME" environment = "preprod" zone = "test" -region = "us-west-2" - -cluster_version = "1.21" diff --git a/deploy/e2e/gh-e2e-template/main.tf b/deploy/e2e-template/main.tf similarity index 65% rename from deploy/e2e/gh-e2e-template/main.tf rename to deploy/e2e-template/main.tf index 8b1a03f081..7ec8235efb 100644 --- a/deploy/e2e/gh-e2e-template/main.tf +++ b/deploy/e2e-template/main.tf @@ -1,16 +1,11 @@ provider "aws" { - region = var.region + region = "us-west-2" } -terraform { - backend "s3" {} -} - -module "e2e-test" { +module "e2e_test" { source = "../../../EXAMPLE_PATH" tenant = var.tenant environment = var.environment zone = var.zone - } diff --git a/deploy/e2e-template/outputs.tf b/deploy/e2e-template/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deploy/e2e/vpc/variables.tf b/deploy/e2e-template/variables.tf similarity index 65% rename from deploy/e2e/vpc/variables.tf rename to deploy/e2e-template/variables.tf index f0b951d10d..0b273ac2f8 100644 --- a/deploy/e2e/vpc/variables.tf +++ b/deploy/e2e-template/variables.tf @@ -1,14 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - -variable "region" { - type = string - description = "AWS region" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/deploy/e2e-template/versions.tf b/deploy/e2e-template/versions.tf new file mode 100644 index 0000000000..e10a6b663e --- /dev/null +++ b/deploy/e2e-template/versions.tf @@ -0,0 +1,12 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } + + backend "s3" {} +} diff --git a/deploy/e2e/eks/README.md b/deploy/e2e/eks/README.md deleted file mode 100644 index fb8c255fce..0000000000 --- a/deploy/e2e/eks/README.md +++ /dev/null @@ -1,71 +0,0 @@ -## How to deploy the example - - git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git - - cd ~/eks-blueprints/deploy/e2e/eks - - terraform init -backend-config backend.conf -reconfigure - - terraform plan -var-file base.tfvars - - terraform apply -var-file base.tfvars -auto-approve - - -## How to Destroy the cluster - - terraform destroy -var-file base.tfvars -auto-approve - - - - -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: MIT-0 - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -## Requirements - -No requirements. - -## Providers - -No providers. - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [eks-cluster-with-import-vpc](#module\_eks-cluster-with-import-vpc) | ../../../examples/eks-cluster-with-import-vpc/eks | n/a | - -## Resources - -No resources. - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | -| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | n/a | yes | -| [region](#input\_region) | AWS region | `string` | n/a | yes | -| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | n/a | yes | -| [tf\_state\_vpc\_s3\_bucket](#input\_tf\_state\_vpc\_s3\_bucket) | Terraform state S3 Bucket Name | `string` | n/a | yes | -| [tf\_state\_vpc\_s3\_key](#input\_tf\_state\_vpc\_s3\_key) | Terraform state S3 Key path | `string` | n/a | yes | -| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | n/a | yes | - -## Outputs - -No outputs. - - diff --git a/deploy/e2e/eks/backend.conf b/deploy/e2e/eks/backend.conf deleted file mode 100644 index 31d3af3749..0000000000 --- a/deploy/e2e/eks/backend.conf +++ /dev/null @@ -1,3 +0,0 @@ -bucket = "terraform-ssp-github-actions-state" -region = "us-west-2" -key = "e2e/eks/terraform-main.tfstate" diff --git a/deploy/e2e/eks/base.tfvars b/deploy/e2e/eks/base.tfvars deleted file mode 100644 index ba8e93260d..0000000000 --- a/deploy/e2e/eks/base.tfvars +++ /dev/null @@ -1,9 +0,0 @@ -tenant = "aws001" -environment = "preprod" -zone = "test" -region = "us-west-2" - -cluster_version = "1.21" - -tf_state_vpc_s3_bucket = "terraform-ssp-github-actions-state" -tf_state_vpc_s3_key = "e2e/vpc/terraform-main.tfstate" diff --git a/deploy/e2e/eks/main.tf b/deploy/e2e/eks/main.tf deleted file mode 100644 index dbde7f4b5e..0000000000 --- a/deploy/e2e/eks/main.tf +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: MIT-0 - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this - * software and associated documentation files (the "Software"), to deal in the Software - * without restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, - * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -provider "aws" { - region = var.region -} - -terraform { - backend "s3" {} -} - -module "eks-cluster-with-import-vpc" { - source = "../../../examples/eks-cluster-with-import-vpc/eks" - - tenant = var.tenant - environment = var.environment - zone = var.zone - region = var.region - - # VPC S3 TF State - tf_state_vpc_s3_bucket = var.tf_state_vpc_s3_bucket - tf_state_vpc_s3_key = var.tf_state_vpc_s3_key - -} diff --git a/deploy/e2e/eks/variables.tf b/deploy/e2e/eks/variables.tf deleted file mode 100644 index def0a2b215..0000000000 --- a/deploy/e2e/eks/variables.tf +++ /dev/null @@ -1,35 +0,0 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - -variable "region" { - type = string - description = "AWS region" -} - -variable "tf_state_vpc_s3_bucket" { - type = string - description = "Terraform state S3 Bucket Name" -} - -variable "tf_state_vpc_s3_key" { - type = string - description = "Terraform state S3 Key path" -} - -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" -} - -variable "environment" { - type = string - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." -} diff --git a/deploy/e2e/gh-e2e-template/README.md b/deploy/e2e/gh-e2e-template/README.md deleted file mode 100644 index af07160877..0000000000 --- a/deploy/e2e/gh-e2e-template/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Usage - - -## Requirements - -No requirements. - -## Providers - -No providers. - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [e2e-test](#module\_e2e-test) | ../../../EXAMPLE_PATH | n/a | - -## Resources - -No resources. - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | -| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | n/a | yes | -| [region](#input\_region) | AWS region | `string` | n/a | yes | -| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | n/a | yes | -| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | n/a | yes | - -## Outputs - -No outputs. - - diff --git a/deploy/e2e/gh-e2e-template/variables.tf b/deploy/e2e/gh-e2e-template/variables.tf deleted file mode 100644 index f0b951d10d..0000000000 --- a/deploy/e2e/gh-e2e-template/variables.tf +++ /dev/null @@ -1,25 +0,0 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - -variable "region" { - type = string - description = "AWS region" -} - -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" -} - -variable "environment" { - type = string - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." -} diff --git a/deploy/e2e/vpc/README.md b/deploy/e2e/vpc/README.md deleted file mode 100644 index 0a0c7550c1..0000000000 --- a/deploy/e2e/vpc/README.md +++ /dev/null @@ -1,72 +0,0 @@ -## How to deploy the example - - git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git - - cd ~/eks-blueprints/deploy/e2e/vpc - - terraform init -backend-config backend.conf -reconfigure - - terraform plan -var-file base.tfvars - - terraform apply -var-file base.tfvars -auto-approve - - -## How to Destroy the cluster - - terraform destroy -var-file base.tfvars -auto-approve - - - -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: MIT-0 - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -## Requirements - -No requirements. - -## Providers - -No providers. - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [eks-cluster-with-import-vpc](#module\_eks-cluster-with-import-vpc) | ../../../examples/eks-cluster-with-import-vpc/vpc | n/a | - -## Resources - -No resources. - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | -| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | n/a | yes | -| [region](#input\_region) | AWS region | `string` | n/a | yes | -| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | n/a | yes | -| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | n/a | yes | - -## Outputs - -| Name | Description | -|------|-------------| -| [private\_subnets](#output\_private\_subnets) | List of IDs of private subnets | -| [public\_subnets](#output\_public\_subnets) | List of IDs of public subnets | -| [vpc\_id](#output\_vpc\_id) | The ID of the VPC | - - diff --git a/deploy/e2e/vpc/backend.conf b/deploy/e2e/vpc/backend.conf deleted file mode 100644 index 7c701df9b2..0000000000 --- a/deploy/e2e/vpc/backend.conf +++ /dev/null @@ -1,3 +0,0 @@ -bucket = "terraform-ssp-github-actions-state" -region = "us-west-2" -key = "e2e/vpc/terraform-main.tfstate" diff --git a/deploy/e2e/vpc/base.tfvars b/deploy/e2e/vpc/base.tfvars deleted file mode 100644 index 8279019e3b..0000000000 --- a/deploy/e2e/vpc/base.tfvars +++ /dev/null @@ -1,6 +0,0 @@ -tenant = "aws001" -environment = "preprod" -zone = "test" -region = "us-west-2" - -cluster_version = "1.21" diff --git a/deploy/e2e/vpc/main.tf b/deploy/e2e/vpc/main.tf deleted file mode 100644 index d1c32b4c65..0000000000 --- a/deploy/e2e/vpc/main.tf +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: MIT-0 - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this - * software and associated documentation files (the "Software"), to deal in the Software - * without restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, - * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -provider "aws" { - region = var.region -} - -terraform { - backend "s3" {} -} - -module "eks-cluster-with-import-vpc" { - source = "../../../examples/eks-cluster-with-import-vpc/vpc" - - tenant = var.tenant - environment = var.environment - zone = var.zone - region = var.region - -} diff --git a/deploy/e2e/vpc/outputs.tf b/deploy/e2e/vpc/outputs.tf deleted file mode 100644 index 66b6a288ae..0000000000 --- a/deploy/e2e/vpc/outputs.tf +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: MIT-0 - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this - * software and associated documentation files (the "Software"), to deal in the Software - * without restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, - * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -output "vpc_id" { - description = "The ID of the VPC" - value = module.eks-cluster-with-import-vpc.vpc_id -} - -output "private_subnets" { - description = "List of IDs of private subnets" - value = module.eks-cluster-with-import-vpc.private_subnets -} - -output "public_subnets" { - description = "List of IDs of public subnets" - value = module.eks-cluster-with-import-vpc.public_subnets -} diff --git a/deploy/pr/README.md b/deploy/pr/README.md deleted file mode 100644 index 25a3f7c52d..0000000000 --- a/deploy/pr/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Usage - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | -| [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | - -## Providers - -| Name | Version | -|------|---------| -| [terraform](#provider\_terraform) | n/a | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [eks-cluster-with-import-vpc](#module\_eks-cluster-with-import-vpc) | ../../examples/complete-kubernetes-addons | n/a | - -## Resources - -| Name | Type | -|------|------| -| [terraform_remote_state.vpc_s3_backend](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | - -## Inputs - -No inputs. - -## Outputs - -No outputs. - - diff --git a/deploy/pr/main.tf b/deploy/pr/main.tf deleted file mode 100644 index cb1db11e9e..0000000000 --- a/deploy/pr/main.tf +++ /dev/null @@ -1,49 +0,0 @@ - -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.7.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } -} - -terraform { - backend "s3" { - bucket = "terraform-ssp-github-actions-state" - key = "pr/eks/terraform-main.tfstate" - region = "us-west-2" - } -} - -# Importing VPC remote state config -data "terraform_remote_state" "vpc_s3_backend" { - backend = "s3" - config = { - bucket = "terraform-ssp-github-actions-state" - key = "e2e/vpc/terraform-main.tfstate" - region = "us-west-2" - } -} - -module "eks-cluster-with-import-vpc" { - source = "../../examples/complete-kubernetes-addons" - - tenant = "aws" - environment = "preprod" - zone = "pr" - - vpc_id = data.terraform_remote_state.vpc_s3_backend.outputs.vpc_id - private_subnet_ids = data.terraform_remote_state.vpc_s3_backend.outputs.private_subnets - public_subnet_ids = data.terraform_remote_state.vpc_s3_backend.outputs.public_subnets -} diff --git a/docs/add-ons/aws-privateca-issuer.md b/docs/add-ons/aws-privateca-issuer.md index 25790a9b75..35f0041f40 100644 --- a/docs/add-ons/aws-privateca-issuer.md +++ b/docs/add-ons/aws-privateca-issuer.md @@ -91,14 +91,14 @@ aws001-preprod-dev-eks-clusterissuer Name: aws001-preprod-dev-eks-clusterissuer Namespace: default Labels: -Annotations: cert-manager.io/alt-names: +Annotations: cert-manager.io/alt-names: cert-manager.io/certificate-name: example cert-manager.io/common-name: example.com - cert-manager.io/ip-sans: + cert-manager.io/ip-sans: cert-manager.io/issuer-group: awspca.cert-manager.io cert-manager.io/issuer-kind: AWSPCAClusterIssuer cert-manager.io/issuer-name: aws001-preprod-dev-eks - cert-manager.io/uri-sans: + cert-manager.io/uri-sans: Type: kubernetes.io/tls @@ -107,4 +107,4 @@ Data ca.crt: 1785 bytes tls.crt: 1517 bytes tls.key: 1679 bytes -``` \ No newline at end of file +``` diff --git a/docs/advanced/cluster-upgrades.md b/docs/advanced/cluster-upgrades.md index dd962cc24e..ad7f2ff2da 100644 --- a/docs/advanced/cluster-upgrades.md +++ b/docs/advanced/cluster-upgrades.md @@ -24,7 +24,7 @@ This table shows the supported plugin versions for each EKS Kubernetes version 1. Change the version in Terraform to desired version under `base.tfvars`. See the example below ```hcl-terraform - cluster_version = "1.20" + cluster_version = "1.21" ``` 2. Apply the changes to the cluster with Terraform. This step will upgrade the Control Plane and Data Plane to the newer version, and it will roughly take 35 mins to 1 hour diff --git a/docs/advanced/ecr-instructions.md b/docs/advanced/ecr-instructions.md index c1e9fec479..cc23551caf 100644 --- a/docs/advanced/ecr-instructions.md +++ b/docs/advanced/ecr-instructions.md @@ -24,7 +24,7 @@ After the repo is created in ECR, tag your image so, you can push the image to t $ docker tag : .dkr.ecr.: ``` -Step6: Run the following command to push this image to your newly created AWS repository: +Step 6: Run the following command to push this image to your newly created AWS repository: ``` $ docker push .dkr.ecr.: diff --git a/docs/getting-started.md b/docs/getting-started.md index 1b76ec2df2..dfc71a1355 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -14,12 +14,12 @@ First, ensure that you have installed the following tools locally. The following steps will walk you through the deployment of an [example blueprint](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/eks-cluster-with-new-vpc/main.tf). This example will deploy a new VPC, a private EKS cluster with public and private subnets, and one managed node group that will be placed in the private subnets. The example will also deploy the following add-ons into the EKS cluster: -✅ AWS Load Balancer Controller -✅ Cluster Autoscaler -✅ CoreDNS -✅ kube-proxy -✅ Metrics Server -✅ vpc-cni +✅ AWS Load Balancer Controller +✅ Cluster Autoscaler +✅ CoreDNS +✅ kube-proxy +✅ Metrics Server +✅ vpc-cni ### Clone the repo diff --git a/docs/index.md b/docs/index.md index a7e90fff8b..316102ac79 100644 --- a/docs/index.md +++ b/docs/index.md @@ -14,7 +14,7 @@ You can use EKS Blueprints to easily bootstrap an EKS cluster with Amazon EKS ad ## Examples -To view a library of examples for how you can leverage the terraform-eks-blueprints, please see our [examples](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/examples). +To view a library of examples for how you can leverage `terraform-aws-eks-blueprints`, please see our [examples](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/examples). ## Motivation @@ -26,8 +26,8 @@ AWS customers have asked for examples that demonstrate how to integrate the land Customers can use this solution to easily architect and deploy complete, opinionated EKS clusters. Specifically, customers can leverage the eks-blueprints module to: -✅ Deploy Well-Architected EKS clusters across any number of accounts and regions. -✅ Manage cluster configuration, including add-ons that run in each cluster, from a single Git repository. -✅ Define teams, namespaces, and their associated access permissions for your clusters. -✅ Leverage GitOps-based workflows for onboarding and managing workloads for your teams. -✅ Create Continuous Delivery (CD) pipelines that are responsible for deploying your infrastructure. +✅ Deploy Well-Architected EKS clusters across any number of accounts and regions. +✅ Manage cluster configuration, including add-ons that run in each cluster, from a single Git repository. +✅ Define teams, namespaces, and their associated access permissions for your clusters. +✅ Leverage GitOps-based workflows for onboarding and managing workloads for your teams. +✅ Create Continuous Delivery (CD) pipelines that are responsible for deploying your infrastructure. diff --git a/docs/node-groups.md b/docs/node-groups.md index c6e5e4bcbc..6a0998762b 100644 --- a/docs/node-groups.md +++ b/docs/node-groups.md @@ -18,7 +18,7 @@ module "eks_blueprints" { source = "github.com/aws-ia/terraform-aws-eks-blueprints" # EKS CLUSTER - cluster_version = "1.21" # EKS Cluster Version + cluster_version = "1.21" # EKS Cluster Version vpc_id = "" # Enter VPC ID private_subnet_ids = ["", "", ""] # Enter Private Subnet IDs diff --git a/examples/analytics/emr-on-eks/README.md b/examples/analytics/emr-on-eks/README.md index 5b1d09be91..098a1a021b 100644 --- a/examples/analytics/emr-on-eks/README.md +++ b/examples/analytics/emr-on-eks/README.md @@ -1,21 +1,24 @@ # EMR on EKS This example deploys the following resources - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with public endpoint (for demo purpose only) with one managed node group - - Deploys Metrics server, Cluster Autoscaler, Prometheus and EMR on EKS Addon - - Creates Amazon managed Prometheus and configures Prometheus addon to remote write metrics to AMP + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with public endpoint (for demo purpose only) with one managed node group +- Deploys Metrics server, Cluster Autoscaler, Prometheus and EMR on EKS Addon +- Creates Amazon managed Prometheus and configures Prometheus addon to remote write metrics to AMP ## Prerequisites: + Ensure that you have installed the following tools on your machine. + 1. [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) _Note: Currently Amazon Prometheus supported only in selected regions. Please see this [userguide](https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) for supported regions._ -## Step1: Deploy EKS Clusters with EMR on EKS feature +## Step 1: Deploy EKS Clusters with EMR on EKS feature Clone the repository @@ -33,9 +36,8 @@ terraform init Set AWS_REGION and Run Terraform plan to verify the resources created by this execution. ``` -export AWS_REGION="" +export AWS_REGION="" terraform plan - ``` Deploy the pattern @@ -46,7 +48,7 @@ terraform apply Enter `yes` to apply. -## Step3: Verify the resources +## Step 3: Verify the resources Let’s verify the resources created by Step 4. @@ -76,7 +78,7 @@ kubectl get pods --namespace=kube-system | grep metrics-server # Output shows M kubectl get pods --namespace=kube-system | grep cluster-autoscaler # Output shows Cluster Autoscaler pod ``` -## Step4: Create EMR Virtual Cluster for EKS +## Step 4: Create EMR Virtual Cluster for EKS We are using AWS CLI to create EMR on EKS Clusters. You can leverage Terraform Module once the [EMR on EKS TF provider](https://github.com/hashicorp/terraform-provider-aws/pull/20003) is available. @@ -86,8 +88,8 @@ vi examples/analytics/emr-on-eks/examples/create_emr_virtual_cluster_for_eks.sh Update the following variables. -Extract the cluster_name as **EKS_CLUSTER_ID** from Terraform Outputs (**Step1**) -**EMR_ON_EKS_NAMESPACE** is same as what you passed from **Step1** +Extract the cluster_name as **EKS_CLUSTER_ID** from Terraform Outputs (**Step 1**) +**EMR_ON_EKS_NAMESPACE** is same as what you passed from **Step 1** EKS_CLUSTER_ID='aws001-preprod-test-eks' EMR_ON_EKS_NAMESPACE='emr-data-team-a' @@ -99,7 +101,7 @@ cd examples/analytics/emr-on-eks/examples/ ./create_emr_virtual_cluster_for_eks.sh ``` -## Step5: Execute Spark job on EMR Virtual Cluster +## Step 5: Execute Spark job on EMR Virtual Cluster Execute the Spark job using the below shell script. @@ -119,7 +121,7 @@ Verify the job execution kubectl get pods --namespace=emr-data-team-a -w ``` -## Step5: Cleanup +## Step 5: Cleanup ### Delete EMR Virtual Cluster for EKS @@ -131,6 +133,7 @@ cd examples/analytics/emr-on-eks/examples/ ## Additional examples ### Node Placements example + Add these to `applicationConfiguration`.`properties` "spark.kubernetes.node.selector.topology.kubernetes.io/zone":"", @@ -141,7 +144,6 @@ Add these to `applicationConfiguration`.`properties` In this example we are connecting to mysql db, so mariadb-connector-java.jar needs to be passed with --jars option https://aws.github.io/aws-emr-containers-best-practices/metastore-integrations/docs/hive-metastore/ - "sparkSubmitJobDriver": { "entryPoint": "s3:///hivejdbc.py", "sparkSubmitParameters": "--jars s3:///mariadb-connector-java.jar @@ -176,6 +178,7 @@ Specifically, you can use persistent volume claims if the jobs require large shu ## Debugging ##### Issue1: Error: local-exec provisioner error + ```shell script Error: local-exec provisioner error \ with module.eks-blueprints.module.emr_on_eks["data_team_b"].null_resource.update_trust_policy,\ @@ -184,49 +187,46 @@ with module.eks-blueprints.module.emr_on_eks["data_team_b"].null_resource.update │ --cluster-name aws001-preprod-test-eks \│ --namespace emr-data-team-b \│ --role-name aws001-preprod-test-eks-emr-eks-data-team-b ``` -##### Solution : - - emr-containers not present in cli version 2.0.41 Python/3.7.4. For more [details](https://github.com/aws/aws-cli/issues/6162) -This is fixed in version 2.0.54. -- Action: aws cli version should be updated to 2.0.54 or later : Execute `pip install --upgrade awscliv2 ` +##### Solution : - +- emr-containers not present in cli version 2.0.41 Python/3.7.4. For more [details](https://github.com/aws/aws-cli/issues/6162) + This is fixed in version 2.0.54. +- Action: aws cli version should be updated to 2.0.54 or later : Execute `pip install --upgrade awscliv2 ` + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"test"` | no | @@ -236,5 +236,4 @@ This is fixed in version 2.0.54. | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/analytics/emr-on-eks/examples/spark-execute/3-spark-job-script-in-s3-path.sh b/examples/analytics/emr-on-eks/examples/spark-execute/3-spark-job-script-in-s3-path.sh index 13ada57bb0..d34d2f9fbd 100755 --- a/examples/analytics/emr-on-eks/examples/spark-execute/3-spark-job-script-in-s3-path.sh +++ b/examples/analytics/emr-on-eks/examples/spark-execute/3-spark-job-script-in-s3-path.sh @@ -16,7 +16,7 @@ S3_BUCKET='s3://' # Create your own s3 CW_LOG_GROUP="/emr-on-eks-logs/${EMR_VIRTUAL_CLUSTER_NAME}/${EMR_ON_EKS_NAMESPACE}" # Create CW Log group if not exist SPARK_JOB_S3_PATH="${S3_BUCKET}/${EMR_VIRTUAL_CLUSTER_NAME}/${EMR_ON_EKS_NAMESPACE}/${JOB_NAME}" -# Step1: COPY POD TEMPLATES TO S3 Bucket +# Step 1: COPY POD TEMPLATES TO S3 Bucket aws s3 sync ./spark-scripts/ "${SPARK_JOB_S3_PATH}/" # FIND ROLE ARN and EMR VIRTUAL CLUSTER ID diff --git a/examples/analytics/emr-on-eks/examples/spark-execute/4-spark-job-with-pod-templates.sh b/examples/analytics/emr-on-eks/examples/spark-execute/4-spark-job-with-pod-templates.sh index 7e9274d5ba..c348a13ae8 100755 --- a/examples/analytics/emr-on-eks/examples/spark-execute/4-spark-job-with-pod-templates.sh +++ b/examples/analytics/emr-on-eks/examples/spark-execute/4-spark-job-with-pod-templates.sh @@ -13,7 +13,7 @@ S3_BUCKET='s3://' # Create your own s3 CW_LOG_GROUP="/emr-on-eks-logs/${EMR_VIRTUAL_CLUSTER_NAME}/${EMR_ON_EKS_NAMESPACE}" # Create CW Log group if not exist SPARK_JOB_S3_PATH="${S3_BUCKET}/${EMR_VIRTUAL_CLUSTER_NAME}/${EMR_ON_EKS_NAMESPACE}/${JOB_NAME}" -# Step1: COPY POD TEMPLATES TO S3 Bucket +# Step 1: COPY POD TEMPLATES TO S3 Bucket aws s3 sync ./spark-scripts/ "${SPARK_JOB_S3_PATH}/" # FIND ROLE ARN and EMR VIRTUAL CLUSTER ID diff --git a/examples/analytics/emr-on-eks/examples/spark-execute/6-spark-dynamic-resource-allocation.sh b/examples/analytics/emr-on-eks/examples/spark-execute/6-spark-dynamic-resource-allocation.sh index f7cfcff1c2..62fdc7afbd 100644 --- a/examples/analytics/emr-on-eks/examples/spark-execute/6-spark-dynamic-resource-allocation.sh +++ b/examples/analytics/emr-on-eks/examples/spark-execute/6-spark-dynamic-resource-allocation.sh @@ -13,7 +13,7 @@ S3_BUCKET='s3://' # Create your own s3 CW_LOG_GROUP="/emr-on-eks-logs/${EMR_VIRTUAL_CLUSTER_NAME}/${EMR_ON_EKS_NAMESPACE}" # Create CW Log group if not exist SPARK_JOB_S3_PATH="${S3_BUCKET}/${EMR_VIRTUAL_CLUSTER_NAME}/${EMR_ON_EKS_NAMESPACE}/${JOB_NAME}" -# Step1: COPY POD TEMPLATES TO S3 Bucket +# Step 1: COPY POD TEMPLATES TO S3 Bucket aws s3 sync ./spark-scripts/ "${SPARK_JOB_S3_PATH}/" # FIND ROLE ARN and EMR VIRTUAL CLUSTER ID diff --git a/examples/analytics/emr-on-eks/main.tf b/examples/analytics/emr-on-eks/main.tf index 4ab0391ff5..9b3b8c6dc0 100644 --- a/examples/analytics/emr-on-eks/main.tf +++ b/examples/analytics/emr-on-eks/main.tf @@ -1,62 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } +provider "aws" { + region = local.region } -provider "aws" {} - provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } -data "aws_region" "current" {} - data "aws_availability_zones" "available" {} -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -68,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -91,12 +69,12 @@ module "aws_vpc" { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/internal-elb" = "1" } - } + #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -109,7 +87,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -144,10 +122,10 @@ module "eks-blueprints" { enable_amazon_prometheus = true } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id #K8s Add-ons enable_metrics_server = true enable_cluster_autoscaler = true @@ -157,7 +135,7 @@ module "eks-blueprints-kubernetes-addons" { #--------------------------------------- # Amazon Prometheus Configuration to integrate with Prometheus Server Add-on enable_amazon_prometheus = true - amazon_prometheus_workspace_endpoint = module.eks-blueprints.amazon_prometheus_workspace_endpoint + amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint # Enabling Prometheus Server Add-on enable_prometheus = true @@ -187,10 +165,5 @@ module "eks-blueprints-kubernetes-addons" { values = [templatefile("${path.module}/helm_values/vpa-values.yaml", {})] } - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/analytics/emr-on-eks/outputs.tf b/examples/analytics/emr-on-eks/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/analytics/emr-on-eks/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/analytics/emr-on-eks/variables.tf b/examples/analytics/emr-on-eks/variables.tf index ade9248d41..b9ffe515bd 100644 --- a/examples/analytics/emr-on-eks/variables.tf +++ b/examples/analytics/emr-on-eks/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/analytics/emr-on-eks/versions.tf b/examples/analytics/emr-on-eks/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/analytics/emr-on-eks/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/analytics/spark-k8s-operator/README.md b/examples/analytics/spark-k8s-operator/README.md index e0d40fd63e..a6f7c4bea5 100644 --- a/examples/analytics/spark-k8s-operator/README.md +++ b/examples/analytics/spark-k8s-operator/README.md @@ -1,22 +1,26 @@ # Spark on K8s Operator with EKS This example deploys an EKS Cluster running the Spark K8s operator into a new VPC. - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with public endpoint (for demo reasons only) with one managed node group - - Deploys Metrics server, Cluster Autoscaler, Spark-k8s-operator, Yunikorn and Prometheus - This will install the Kubernetes Operator for Apache Spark into the namespace spark-operator. - The operator by default watches and handles SparkApplications in all namespaces. - If you would like to limit the operator to watch and handle SparkApplications in a single namespace, e.g., default instead, add the following option to the helm install command: +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with public endpoint (for demo reasons only) with one managed node group +- Deploys Metrics server, Cluster Autoscaler, Spark-k8s-operator, Yunikorn and Prometheus + +This will install the Kubernetes Operator for Apache Spark into the namespace spark-operator. +The operator by default watches and handles SparkApplications in all namespaces. +If you would like to limit the operator to watch and handle SparkApplications in a single namespace, e.g., default instead, add the following option to the helm install command: ## Prerequisites + Ensure that you have installed the following tools on your machine. + 1. [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) + +## Step 1: Deploy EKS Cluster with Spark-K8s-Operator feature -## Step1: Deploy EKS Cluster with Spark-K8s-Operator feature Clone the repository ``` @@ -46,7 +50,8 @@ terraform apply Enter `yes` to apply. ## Execute Sample Spark Job on EKS Cluster with Spark-k8s-operator: - - Create Spark Namespace, Service Account and ClusterRole and ClusterRole Binding for the jobs + +- Create Spark Namespace, Service Account and ClusterRole and ClusterRole Binding for the jobs ```shell script cd examples/analytics/spark-k8s-operator/k8s-schedular @@ -68,39 +73,40 @@ Enter `yes` to apply. kubectl describe sparkapplication pyspark-pi -n spark-ns ``` - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [helm](#requirement\_helm) | >= 2.4.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"spark"` | no | @@ -110,5 +116,4 @@ No requirements. | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/analytics/spark-k8s-operator/main.tf b/examples/analytics/spark-k8s-operator/main.tf index 3cf66b5c00..1702618bbb 100644 --- a/examples/analytics/spark-k8s-operator/main.tf +++ b/examples/analytics/spark-k8s-operator/main.tf @@ -1,46 +1,40 @@ -provider "aws" {} +provider "aws" { + region = local.region +} provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } -data "aws_region" "current" {} - data "aws_availability_zones" "available" {} -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -terraform { - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -52,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -75,12 +69,12 @@ module "aws_vpc" { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/internal-elb" = "1" } - } + #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -93,7 +87,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" #----------------------------------------------------------------------------------------------------------# # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks). @@ -148,9 +142,9 @@ module "eks-blueprints" { enable_amazon_prometheus = true } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id #K8s Add-ons enable_metrics_server = true @@ -161,7 +155,7 @@ module "eks-blueprints-kubernetes-addons" { #--------------------------------------- # Amazon Prometheus Configuration to integrate with Prometheus Server Add-on enable_amazon_prometheus = true - amazon_prometheus_workspace_endpoint = module.eks-blueprints.amazon_prometheus_workspace_endpoint + amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint #--------------------------------------- # COMMUNITY PROMETHEUS ENABLE @@ -205,10 +199,5 @@ module "eks-blueprints-kubernetes-addons" { values = [templatefile("${path.module}/helm_values/yunikorn-values.yaml", {})] } - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/analytics/spark-k8s-operator/outputs.tf b/examples/analytics/spark-k8s-operator/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/analytics/spark-k8s-operator/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/analytics/spark-k8s-operator/variables.tf b/examples/analytics/spark-k8s-operator/variables.tf index 6bf848a7d5..f9745d19b4 100644 --- a/examples/analytics/spark-k8s-operator/variables.tf +++ b/examples/analytics/spark-k8s-operator/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/analytics/spark-k8s-operator/versions.tf b/examples/analytics/spark-k8s-operator/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/analytics/spark-k8s-operator/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/aws-efs-csi-driver/README.md b/examples/aws-efs-csi-driver/README.md index 26c02c3d61..7419b77b01 100644 --- a/examples/aws-efs-csi-driver/README.md +++ b/examples/aws-efs-csi-driver/README.md @@ -1,25 +1,32 @@ # EKS Cluster Deployment with new VPC and EFS + This example deploys the following Basic EKS Cluster with VPC - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with one managed node group and fargate profile - - Creates EFS file system for backing the dynamic provisioning of persistent volumes + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with one managed node group and fargate profile +- Creates EFS file system for backing the dynamic provisioning of persistent volumes ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -27,7 +34,8 @@ cd examples/aws-efs-csi-driver/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -35,7 +43,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + To create resources ```shell script @@ -45,24 +54,25 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command aws eks --region ${AWS_REGION} update-kubeconfig --name aws001-preprod-dev-eks -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace kubectl get pods -n kube-system -#### Step8: Create a storage class to leverage the EFS file system +#### Step 8: Create a storage class to leverage the EFS file system Retrieve your Amazon EFS file system ID @@ -80,7 +90,7 @@ Deploy the storage class kubectl apply -f storageclass.yaml -#### Step9: Test automatic provisioning +#### Step 9: Test automatic provisioning Download a manifest that deploys a `Pod` and a `PersistentVolumeClaim` @@ -109,6 +119,7 @@ Confirm that the data is written to the volume Wed Feb 23 13:37:49 UTC 2022 ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -116,29 +127,29 @@ cd examples/aws-efs-csi-driver/ terraform destroy -auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | ## Resources @@ -148,15 +159,11 @@ terraform destroy -auto-approve | [aws_efs_mount_target.efs_mt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_mount_target) | resource | | [aws_security_group.efs_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -167,5 +174,4 @@ terraform destroy -auto-approve |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | | [efs\_file\_system\_id](#output\_efs\_file\_system\_id) | ID of the EFS file system to use for creating a storage class | - - + diff --git a/examples/aws-efs-csi-driver/main.tf b/examples/aws-efs-csi-driver/main.tf index 3be13a2d72..ec88161d72 100644 --- a/examples/aws-efs-csi-driver/main.tf +++ b/examples/aws-efs-csi-driver/main.tf @@ -1,65 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -71,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -97,9 +72,9 @@ module "aws_vpc" { } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -112,7 +87,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -141,16 +116,16 @@ module "eks-blueprints" { additional_tags = { ExtraTag = "Fargate" } - }, + } } } #--------------------------------------------- # Deploy Kubernetes Add-ons with sub module #--------------------------------------------- -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons enable_amazon_eks_coredns = true @@ -162,7 +137,7 @@ module "eks-blueprints-kubernetes-addons" { enable_cluster_autoscaler = true enable_aws_efs_csi_driver = true - depends_on = [module.eks-blueprints.managed_node_groups] + depends_on = [module.eks_blueprints.managed_node_groups] } #-------------- @@ -192,13 +167,3 @@ resource "aws_security_group" "efs_sg" { protocol = "tcp" } } - -output "efs_file_system_id" { - description = "ID of the EFS file system to use for creating a storage class" - value = aws_efs_file_system.efs.id -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl -} diff --git a/examples/aws-efs-csi-driver/outputs.tf b/examples/aws-efs-csi-driver/outputs.tf new file mode 100644 index 0000000000..76e5057174 --- /dev/null +++ b/examples/aws-efs-csi-driver/outputs.tf @@ -0,0 +1,9 @@ +output "efs_file_system_id" { + description = "ID of the EFS file system to use for creating a storage class" + value = aws_efs_file_system.efs.id +} + +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/aws-efs-csi-driver/variables.tf b/examples/aws-efs-csi-driver/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/aws-efs-csi-driver/variables.tf +++ b/examples/aws-efs-csi-driver/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/aws-efs-csi-driver/versions.tf b/examples/aws-efs-csi-driver/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/aws-efs-csi-driver/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/ci-cd/gitlab-ci-cd/README.md b/examples/ci-cd/gitlab-ci-cd/README.md index 8b9a441f94..4bc425c2bf 100644 --- a/examples/ci-cd/gitlab-ci-cd/README.md +++ b/examples/ci-cd/gitlab-ci-cd/README.md @@ -1,9 +1,10 @@ # GitLab CI/CD example + This pattern demonstrates a GitOps approach with IaC using Gitlab CI/CD. This shows an example of how to automate the build and deployment of an IaC code for provisioning Amazon EKS Cluster using GitLab CI/CD. - Using Gitlab for Terraform state management which allows multiple engineers to work together to develop the infrastructure - Validation checks for the code - Note : This pattern needs Gitlab version 14.5 or above +Using Gitlab for Terraform state management which allows multiple engineers to work together to develop the infrastructure +Validation checks for the code +Note : This pattern needs Gitlab version 14.5 or above ### Step 1: Clone this repo @@ -12,87 +13,90 @@ git@github.com:aws-ia/terraform-aws-eks-blueprints.git ``` ## Step 2: Create a new git repo in your GitLab group and copy files from examples/advanced/gitlab-ci-cd folder to the root of your new GitLab repo + cd examples/ci-cd/gitlab-ci-cd cp . $YOUR_GITLAB_REPO_ROOT ## Step 3: Update project settings-> CI/CD ->Variables - - Login to the GitLab console, Open your repo and navigate to `settings->ci-cd->Variables` - - Update the following variables as Key Value pairs before triggering the pipeline - AWS_ACCESS_KEY_ID e.g., access key from devops admin iam role - AWS_SECRET_ACCESS_KEY e.g., secret key from devops admin iam role - AWS_REGION e.g., eu-west-1 +- Login to the GitLab console, Open your repo and navigate to `settings->ci-cd->Variables` +- Update the following variables as Key Value pairs before triggering the pipeline + + AWS_ACCESS_KEY_ID e.g., access key from devops admin iam role + AWS_SECRET_ACCESS_KEY e.g., secret key from devops admin iam role + AWS_REGION e.g., eu-west-1 -## Step 4: Update variables in input.tfvars file - 1. Update tenant,environment,zone as per your requirement - 2. Update cluster_version to any version > "1.20" - 3. Update CIDR of your VPC, vpc_cidcr = "10.2.0.0/16" +## Step 4: Update variables in input.tfvars file +1. Update tenant,environment,zone as per your requirement +2. Update cluster_version to any version > "1.20" +3. Update CIDR of your VPC, vpc_cidcr = "10.2.0.0/16" + +## Step 5: Commit changes and push to verify the pipeline -## Step5: Commit changes and push to verify the pipeline Manually trigger the `tf-apply` to provision the resources -## Step6: Verify whether the state file update happened in your project (Infrastructure->Terraform-states) +## Step 6: Verify whether the state file update happened in your project (Infrastructure->Terraform-states) + +## Step 7: (Optional) Manually Install, Configure and Run GitLab Agent for Kubernetes (“Agent”, for short) is your active in-cluster. -## Step7: (Optional) Manually Install, Configure and Run GitLab Agent for Kubernetes (“Agent”, for short) is your active in-cluster. This is for or connecting Kubernetes clusters to GitLab. Refer https://docs.gitlab.com/ee/user/clusters/agent/install/index.html -## Step8: Cleanup the deployed resources + +## Step 8: Cleanup the deployed resources + Manually trigger the `tf-destroy` stage in the GitLab Ci/CD pipeline to destroy your deployment. ## Troubleshooting: - ### 400 Error when creating resource - - If the error contains `{message: {environment_scope: [cannot add duplicated environment scope]}}`, it is likely that an existing Kubernetes integration with the same environment scope was not removed. Remove any Kubernetes clusters with the same environment scope from the GitLab group before redeploying. + - If the error contains `{message: {environment_scope: [cannot add duplicated environment scope]}}`, it is likely that an existing Kubernetes integration with the same environment scope was not removed. Remove any Kubernetes clusters with the same environment scope from the GitLab group before redeploying. - ### What's gitlab-terraform? - - `gitlab-terraform` is a thin wrapper around the `terraform` binary. as part of the [GitLab Terraform docker image](https://gitlab.com/gitlab-org/terraform-images) used in `.gitlab-ci.yml`. + - `gitlab-terraform` is a thin wrapper around the `terraform` binary. as part of the [GitLab Terraform docker image](https://gitlab.com/gitlab-org/terraform-images) used in `.gitlab-ci.yml`. + - ### In case your tf-apply stage is failed in between - - Correct the source code ,commit and push the code or ensure you manually trigger tf-destroy stage and cleanup the provisioned resources + - Correct the source code ,commit and push the code or ensure you manually trigger tf-destroy stage and cleanup the provisioned resources + --- - + + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [gitlab](#requirement\_gitlab) | 3.7.0 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | 3.11.3 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | n/a | `string` | n/a | yes | | [environment](#input\_environment) | Environment area eg., preprod or prod | `string` | n/a | yes | | [tenant](#input\_tenant) | AWS account name or unique id for tenant | `string` | n/a | yes | -| [terraform\_version](#input\_terraform\_version) | n/a | `string` | n/a | yes | -| [vpc\_cidr](#input\_vpc\_cidr) | n/a | `string` | n/a | yes | | [zone](#input\_zone) | Environment with in one sub\_tenant or business unit | `string` | n/a | yes | ## Outputs @@ -100,5 +104,4 @@ Manually trigger the `tf-destroy` stage in the GitLab Ci/CD pipeline to destroy | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/ci-cd/gitlab-ci-cd/dev.tfvars b/examples/ci-cd/gitlab-ci-cd/dev.tfvars index 6a5999cda9..c0502066ce 100644 --- a/examples/ci-cd/gitlab-ci-cd/dev.tfvars +++ b/examples/ci-cd/gitlab-ci-cd/dev.tfvars @@ -1,6 +1,3 @@ -tenant = "aws002" -environment = "preprod" -zone = "dev" -cluster_version = "1.21" -vpc_cidr = "10.2.0.0/16" -terraform_version = "Terraform v1.1.3" +tenant = "aws002" +environment = "preprod" +zone = "dev" diff --git a/examples/ci-cd/gitlab-ci-cd/main.tf b/examples/ci-cd/gitlab-ci-cd/main.tf index 73fd7de3d8..ec3527fb36 100644 --- a/examples/ci-cd/gitlab-ci-cd/main.tf +++ b/examples/ci-cd/gitlab-ci-cd/main.tf @@ -1,79 +1,59 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - gitlab = { - source = "gitlabhq/gitlab" - version = "3.7.0" - } - } - - # storing tfstate with GitLab-managed Terraform state, read more here: https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html - backend "http" { - } +provider "gitlab" { + # Configuration options - the GitLab token that this provider requires is pulled from the variables set in the CI/CD settings of the GitLab repository } provider "aws" { - region = data.aws_region.current.id - alias = "default" -} - -provider "gitlab" { - # Configuration options - the GitLab token that this provider requires is pulled from the variables set in the CI/CD settings of the GitLab repository + region = local.region } provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } -data "aws_region" "current" {} - data "aws_availability_zones" "available" {} -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - locals { vpc_name = join("-", [var.tenant, var.environment, var.zone, "vpc"]) cluster_name = join("-", [var.tenant, var.environment, var.zone, "eks"]) + region = "us-west-2" + + vpc_cidr = "10.2.0.0/16" + + terraform_version = "Terraform v1.1.3" } module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "3.11.3" - name = local.vpc_name - cidr = var.vpc_cidr - azs = data.aws_availability_zones.available.names + version = "~> 3.0" - public_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(var.vpc_cidr, 8, k)] - private_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(var.vpc_cidr, 8, k + 10)] + name = local.vpc_name + cidr = local.vpc_cidr + azs = data.aws_availability_zones.available.names + + public_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k + 10)] enable_nat_gateway = true create_igw = true @@ -90,22 +70,24 @@ module "aws_vpc" { "kubernetes.io/role/internal-elb" = "1" } } + #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { - source = "../../.." +module "eks_blueprints" { + source = "../../.." + tenant = var.tenant environment = var.environment zone = var.zone - terraform_version = var.terraform_version + terraform_version = local.terraform_version # EKS Cluster VPC and Subnet mandatory config vpc_id = module.aws_vpc.vpc_id private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = var.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -117,8 +99,3 @@ module "eks-blueprints" { } } } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl -} diff --git a/examples/ci-cd/gitlab-ci-cd/outputs.tf b/examples/ci-cd/gitlab-ci-cd/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/ci-cd/gitlab-ci-cd/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/ci-cd/gitlab-ci-cd/variables.tf b/examples/ci-cd/gitlab-ci-cd/variables.tf index 3904209512..bb34a423e1 100644 --- a/examples/ci-cd/gitlab-ci-cd/variables.tf +++ b/examples/ci-cd/gitlab-ci-cd/variables.tf @@ -12,15 +12,3 @@ variable "zone" { type = string description = "Environment with in one sub_tenant or business unit" } - -variable "cluster_version" { - type = string -} - -variable "vpc_cidr" { - type = string -} - -variable "terraform_version" { - type = string -} diff --git a/examples/ci-cd/gitlab-ci-cd/versions.tf b/examples/ci-cd/gitlab-ci-cd/versions.tf new file mode 100644 index 0000000000..b2d0e4d8f5 --- /dev/null +++ b/examples/ci-cd/gitlab-ci-cd/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + gitlab = { + source = "gitlabhq/gitlab" + version = "3.7.0" + } + } + + # storing tfstate with GitLab-managed Terraform state, read more here: https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html + backend "http" { + } +} diff --git a/examples/complete-kubernetes-addons/README.md b/examples/complete-kubernetes-addons/README.md index 538335bf2e..5b86b56078 100644 --- a/examples/complete-kubernetes-addons/README.md +++ b/examples/complete-kubernetes-addons/README.md @@ -1,22 +1,27 @@ # Complete example with Kubernetes add-ons + This example deploys a new EKS Cluster with all node groups and add-ons with advanced configuration ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -24,7 +29,8 @@ cd examples/complete-kubernetes-addons/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -32,7 +38,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -42,24 +49,26 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -67,37 +76,30 @@ cd examples/complete-kubernetes-addons terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../modules/kubernetes-addons | n/a | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | ## Resources -| Name | Type | -|------|------| -| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | +No resources. ## Inputs @@ -105,7 +107,6 @@ terraform destroy --auto-approve |------|-------------|------|---------|:--------:| | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [private\_subnet\_ids](#input\_private\_subnet\_ids) | list of private subnets Id's for the Worker nodes | `list(string)` | n/a | yes | -| [public\_subnet\_ids](#input\_public\_subnet\_ids) | list of public subnets Id's for the Worker nodes | `list(string)` | `[]` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws"` | no | | [vpc\_id](#input\_vpc\_id) | VPC id | `string` | n/a | yes | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"test"` | no | @@ -115,5 +116,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/complete-kubernetes-addons/main.tf b/examples/complete-kubernetes-addons/main.tf index 11a351ffb0..c4bbdf29f6 100644 --- a/examples/complete-kubernetes-addons/main.tf +++ b/examples/complete-kubernetes-addons/main.tf @@ -1,77 +1,51 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } locals { - tenant = var.tenant - environment = var.environment - zone = var.zone + tenant = var.tenant + environment = var.environment + zone = var.zone + region = "us-west-2" + eks_cluster_id = join("-", [local.tenant, local.environment, local.zone, "eks"]) - cluster_version = "1.21" terraform_version = "Terraform v1.0.1" vpc_id = var.vpc_id private_subnet_ids = var.private_subnet_ids - public_subnet_ids = var.public_subnet_ids } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -84,7 +58,7 @@ module "eks-blueprints" { private_subnet_ids = local.private_subnet_ids # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -142,12 +116,12 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id - eks_worker_security_group_id = module.eks-blueprints.worker_node_security_group_id - auto_scaling_group_names = module.eks-blueprints.self_managed_node_group_autoscaling_groups + eks_cluster_id = module.eks_blueprints.eks_cluster_id + eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id + auto_scaling_group_names = module.eks_blueprints.self_managed_node_group_autoscaling_groups # EKS Addons enable_amazon_eks_vpc_cni = true # default is false @@ -289,7 +263,7 @@ module "eks-blueprints-kubernetes-addons" { #--------------------------------------- # Amazon Prometheus Configuration to integrate with Prometheus Server Add-on enable_amazon_prometheus = true - amazon_prometheus_workspace_endpoint = module.eks-blueprints.amazon_prometheus_workspace_endpoint + amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint enable_prometheus = true # Optional Map value @@ -352,7 +326,7 @@ module "eks-blueprints-kubernetes-addons" { aws_for_fluentbit_cwlog_retention_in_days = 90 create_namespace = true values = [templatefile("${path.module}/helm_values/aws-for-fluentbit-values.yaml", { - region = data.aws_region.current.name, + region = local.region aws_for_fluent_bit_cw_log_group = "/${local.eks_cluster_id}/worker-fluentbit-logs" })] set = [ @@ -387,7 +361,7 @@ module "eks-blueprints-kubernetes-addons" { [OUTPUT] Name cloudwatch_logs Match * - region ${data.aws_region.current.name} + region ${local.region} log_group_name /${local.eks_cluster_id}/fargate-fluentbit-logs log_stream_prefix "fargate-logs-" auto_create_group true @@ -464,10 +438,5 @@ module "eks-blueprints-kubernetes-addons" { values = [templatefile("${path.module}/helm_values/yunikorn-values.yaml", {})] } - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/complete-kubernetes-addons/outputs.tf b/examples/complete-kubernetes-addons/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/complete-kubernetes-addons/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/complete-kubernetes-addons/variables.tf b/examples/complete-kubernetes-addons/variables.tf index 721d4b3a85..53cf484e4a 100644 --- a/examples/complete-kubernetes-addons/variables.tf +++ b/examples/complete-kubernetes-addons/variables.tf @@ -26,9 +26,3 @@ variable "private_subnet_ids" { description = "list of private subnets Id's for the Worker nodes" type = list(string) } - -variable "public_subnet_ids" { - description = "list of public subnets Id's for the Worker nodes" - type = list(string) - default = [] -} diff --git a/examples/complete-kubernetes-addons/versions.tf b/examples/complete-kubernetes-addons/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/complete-kubernetes-addons/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/crossplane/README.md b/examples/crossplane/README.md index 8e5120da94..014bd3d256 100644 --- a/examples/crossplane/README.md +++ b/examples/crossplane/README.md @@ -1,11 +1,13 @@ # Crossplane Add-on + This example deploys the following Basic EKS Cluster with VPC - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with one managed node group - - Crossplane Add-on to EKS Cluster - - AWS Provider for Crossplane - - Terrajet AWS Provider for Crossplane + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with one managed node group +- Crossplane Add-on to EKS Cluster +- AWS Provider for Crossplane +- Terrajet AWS Provider for Crossplane ## Crossplane Design @@ -35,20 +37,25 @@ graph TD; ``` ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -56,7 +63,8 @@ cd examples/crossplane/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -64,7 +72,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -74,10 +83,11 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command @@ -85,77 +95,85 @@ This following command used to update the `kubeconfig` in your local machine whe aws eks --region update-kubeconfig --name ``` -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below ```shell script kubectl get nodes ``` -#### Step7: List all the pods running in `crossplane` namespace +#### Step 7: List all the pods running in `crossplane` namespace ```shell script kubectl get pods -n crossplane ``` ### AWS Provider for Crossplane + This example shows how to deploy S3 bucket using Crossplane AWS provider - - Open the file below +- Open the file below ```shell script vi ~/examples/crossplane/crossplane-aws-examples/aws-provider-s3.yaml ``` - - Edit the below `aws-provider-s3.yaml` to update the new bucket name - - Enter the new `bucket name` and `region` in YAML file. Save the file using :wq! +- Edit the below `aws-provider-s3.yaml` to update the new bucket name + +- Enter the new `bucket name` and `region` in YAML file. Save the file using :wq! - - Apply the K8s manifest +- Apply the K8s manifest ```shell script cd ~/examples/crossplane/crossplane-aws-examples/ kubectl apply -f aws-provider-s3.yaml ``` - - Login to AWS Console and verify the new S3 bucket +- Login to AWS Console and verify the new S3 bucket To Delete the bucket + ```shell script cd ~/examples/crossplane/crossplane-aws-examples/ kubectl delete -f aws-provider-s3.yaml ``` + ### Terrajet AWS Provider for Crossplane + This example shows how to deploy S3 bucket using Crossplane Terrajet AWS Provider - - Open the file below +- Open the file below ```shell script vi ~/examples/crossplane/crossplane-aws-examples/jet-aws-provider-s3.yaml ``` - - Edit the below `jet-aws-provider-s3.yaml` to update the new bucket name - - Enter the new `bucket name` and `region` in YAML file. Save the file using :wq! +- Edit the below `jet-aws-provider-s3.yaml` to update the new bucket name - - Apply the K8s manifest +- Enter the new `bucket name` and `region` in YAML file. Save the file using :wq! + +- Apply the K8s manifest ```shell script cd ~/examples/crossplane/crossplane-aws-examples/ kubectl apply -f jet-aws-provider-s3.yaml ``` - - Login to AWS Console and verify the new S3 bucket +- Login to AWS Console and verify the new S3 bucket To Delete the bucket + ```shell script cd ~/examples/crossplane/crossplane-aws-examples/ kubectl delete -f jet-aws-provider-s3.yaml ``` ## How to Destroy + The following command destroys the resources created by `terraform apply` -Step1: Delete resources created by Crossplane +Step 1: Delete resources created by Crossplane -Step2: Terraform Destroy +Step 2: Terraform Destroy ```shell script cd examples/crossplane @@ -164,45 +182,41 @@ terraform destroy --auto-approve --- - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubectl](#requirement\_kubectl) | >= 1.13.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -212,5 +226,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/crossplane/main.tf b/examples/crossplane/main.tf index da500a23f3..de2ba4567a 100644 --- a/examples/crossplane/main.tf +++ b/examples/crossplane/main.tf @@ -1,77 +1,54 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.13.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } provider "kubectl" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false apply_retry_count = 30 + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + load_config_file = false + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } +data "aws_availability_zones" "available" {} + locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -83,7 +60,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -108,9 +85,9 @@ module "aws_vpc" { } } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -123,7 +100,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -136,9 +113,9 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id # Refer to docs/add-ons/crossplane.md for advanced configuration enable_crossplane = true @@ -162,8 +139,3 @@ module "eks-blueprints-kubernetes-addons" { additional_irsa_policies = ["arn:aws:iam::aws:policy/AmazonS3FullAccess"] } } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl -} diff --git a/examples/crossplane/outputs.tf b/examples/crossplane/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/crossplane/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/crossplane/variables.tf b/examples/crossplane/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/crossplane/variables.tf +++ b/examples/crossplane/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/crossplane/versions.tf b/examples/crossplane/versions.tf new file mode 100644 index 0000000000..2a74218fe2 --- /dev/null +++ b/examples/crossplane/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.14" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/eks-cluster-with-external-dns/README.md b/examples/eks-cluster-with-external-dns/README.md index 2fe4edbd1a..6fc99ada22 100644 --- a/examples/eks-cluster-with-external-dns/README.md +++ b/examples/eks-cluster-with-external-dns/README.md @@ -11,28 +11,29 @@ The pattern deploys the sample workloads that reside in the [EKS Blueprints Work #### Tools Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) #### AWS Resources This example requires the following AWS resources: -* A Route53 Hosted Zone for a domain that you own. -* A SSL/TLS certificate for your domain stored in AWS Certificate Manager (ACM). +- A Route53 Hosted Zone for a domain that you own. +- A SSL/TLS certificate for your domain stored in AWS Certificate Manager (ACM). For information on Route53 Hosted Zones, [see Route53 documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html). For instructions on requesting a SSL/TLS certificate for your domain, see [ACM docs](https://docs.aws.amazon.com/acm/latest/userguide/gs.html). ### Deployment Steps -#### Step1: Clone the repo +#### Step 1: Clone the repo ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Terraform INIT +#### Step 2: Terraform INIT Initialize a working directory with configuration files @@ -41,19 +42,20 @@ cd examples/eks-cluster-with-external-dns terraform init ``` -#### Step3: Replace placeholder values in terraform.tfvars +#### Step 3: Replace placeholder values in terraform.tfvars Both values in `terraform.tfvars` must be updated. -* `eks_cluster_domain` - the domain for your cluster. Value is used to look up a Route53 Hosted Zone that you own. DNS records created by `ExternalDNS` will be created in this Hosted Zone. -* `acm_certificate_domain` - the domain for a certificate in ACM that will be leveraged by `Ingress Nginx`. Value is used to look up an ACM certificate that will be used to terminate HTTPS connections. This value should likely be a wildcard cert for your `eks_cluster_domain`. +- `eks_cluster_domain` - the domain for your cluster. Value is used to look up a Route53 Hosted Zone that you own. DNS records created by `ExternalDNS` will be created in this Hosted Zone. +- `acm_certificate_domain` - the domain for a certificate in ACM that will be leveraged by `Ingress Nginx`. Value is used to look up an ACM certificate that will be used to terminate HTTPS connections. This value should likely be a wildcard cert for your `eks_cluster_domain`. ``` eks_cluster_domain = "example.com" acm_certificate_domain = "*.example.com" ``` -#### Step3: Terraform PLAN +#### Step 3: Terraform PLAN + Verify the resources created by this execution ```shell script @@ -61,7 +63,7 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Terraform APPLY +#### Step 4: Terraform APPLY ```shell script terraform apply @@ -69,21 +71,20 @@ terraform apply Enter `yes` to apply -#### Step5: Update local kubeconfig +#### Step 5: Update local kubeconfig `~/.kube/config` file gets updated with cluster details and certificate from the below command. $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system - #### Step 8: Verify the Ingress resource was created for Team Riker $ kubectl get ingress -n team-riker @@ -98,29 +99,29 @@ The following command destroys the resources created by `terraform apply` terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | 3.2.0 | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../modules/kubernetes-addons | n/a | -| [eks\_cluster](#module\_eks\_cluster) | ../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | ## Resources @@ -128,26 +129,17 @@ terraform destroy --auto-approve |------|------| | [aws_acm_certificate.issued](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | -| [aws_route53_zone.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [acm\_certificate\_domain](#input\_acm\_certificate\_domain) | *.example.com | `string` | n/a | yes | -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [eks\_cluster\_domain](#input\_eks\_cluster\_domain) | Route53 domain for the cluster. | `string` | `"example.com"` | no | -| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | -| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | -| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | ## Outputs | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/eks-cluster-with-external-dns/main.tf b/examples/eks-cluster-with-external-dns/main.tf index fee30af044..99df77d934 100644 --- a/examples/eks-cluster-with-external-dns/main.tf +++ b/examples/eks-cluster-with-external-dns/main.tf @@ -1,74 +1,45 @@ -terraform { - required_version = ">= 1.0.1" +provider "aws" { + region = local.region +} - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } +provider "kubernetes" { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } +} - backend "local" { - path = "local_tf_state/terraform-main.tfstate" +provider "helm" { + kubernetes { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } -data "aws_region" "current" {} - data "aws_availability_zones" "available" {} -data "aws_eks_cluster" "cluster" { - name = module.eks_cluster.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks_cluster.eks_cluster_id -} - data "aws_acm_certificate" "issued" { domain = var.acm_certificate_domain statuses = ["ISSUED"] } -data "aws_route53_zone" "selected" { - name = var.eks_cluster_domain -} - -provider "aws" { - region = data.aws_region.current.id - alias = "default" -} - -provider "kubernetes" { - experiments { - manifest_resource = true - } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token -} - -provider "helm" { - kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - } -} - locals { - tenant = "aws001" # AWS account name or unique id for tenant - environment = "preprod" # Environment area eg., preprod or prod - zone = "dev" # Environment with in one sub_tenant or business unit - cluster_version = "1.21" + tenant = "aws001" # AWS account name or unique id for tenant + environment = "preprod" # Environment area eg., preprod or prod + zone = "dev" # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -84,7 +55,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -113,7 +84,7 @@ module "aws_vpc" { # Example to consume eks_cluster module #--------------------------------------------------------------- -module "eks_cluster" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -126,7 +97,7 @@ module "eks_cluster" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # Managed Node Group managed_node_groups = { @@ -139,14 +110,14 @@ module "eks_cluster" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" #--------------------------------------------------------------- # Globals #--------------------------------------------------------------- - eks_cluster_id = module.eks_cluster.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id eks_cluster_domain = var.eks_cluster_domain #--------------------------------------------------------------- @@ -157,7 +128,7 @@ module "eks-blueprints-kubernetes-addons" { argocd_applications = { workloads = { path = "envs/dev" - repo_url = "https://github.com/aws-samples/eks-blueprints-workloads.git" + repo_url = "https://github.com/aws-samples/eks_blueprints-workloads.git" values = { spec = { ingress = { @@ -189,11 +160,6 @@ module "eks-blueprints-kubernetes-addons" { depends_on = [ module.aws_vpc, - module.eks_cluster.managed_node_groups + module.eks_blueprints.managed_node_groups ] } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_cluster.configure_kubectl -} diff --git a/examples/eks-cluster-with-external-dns/outputs.tf b/examples/eks-cluster-with-external-dns/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/eks-cluster-with-external-dns/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/eks-cluster-with-external-dns/variables.tf b/examples/eks-cluster-with-external-dns/variables.tf index 80d9b22f8f..29ec1c0aaa 100644 --- a/examples/eks-cluster-with-external-dns/variables.tf +++ b/examples/eks-cluster-with-external-dns/variables.tf @@ -1,27 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "eks_cluster_domain" { type = string description = "Route53 domain for the cluster." diff --git a/examples/eks-cluster-with-external-dns/versions.tf b/examples/eks-cluster-with-external-dns/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/eks-cluster-with-external-dns/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/eks-cluster-with-import-vpc/eks/README.md b/examples/eks-cluster-with-import-vpc/eks/README.md index e54cbfd52a..f01842888b 100644 --- a/examples/eks-cluster-with-import-vpc/eks/README.md +++ b/examples/eks-cluster-with-import-vpc/eks/README.md @@ -1,46 +1,42 @@ # EKS Cluster deployment with Import VPC state + This deployment imports VPC state file from S3 bucket and deploys EKS Cluster using the imported VPC ID and Subnet IDs. - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubectl](#requirement\_kubectl) | >= 1.13.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | | [terraform](#provider\_terraform) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | | [terraform_remote_state.vpc_s3_backend](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | -| [region](#input\_region) | AWS region | `string` | n/a | yes | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws"` | no | | [tf\_state\_vpc\_s3\_bucket](#input\_tf\_state\_vpc\_s3\_bucket) | Terraform state S3 Bucket Name | `string` | n/a | yes | | [tf\_state\_vpc\_s3\_key](#input\_tf\_state\_vpc\_s3\_key) | Terraform state S3 Key path | `string` | n/a | yes | @@ -51,5 +47,4 @@ This deployment imports VPC state file from S3 bucket and deploys EKS Cluster us | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/eks-cluster-with-import-vpc/eks/main.tf b/examples/eks-cluster-with-import-vpc/eks/main.tf index 2c4b8e3732..86632c4426 100644 --- a/examples/eks-cluster-with-import-vpc/eks/main.tf +++ b/examples/eks-cluster-with-import-vpc/eks/main.tf @@ -1,61 +1,45 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.13.1" - } - } -} - provider "aws" { - region = var.region + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } provider "kubectl" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false apply_retry_count = 5 -} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + load_config_file = false -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } #--------------------------------------------------------------- @@ -66,7 +50,7 @@ data "terraform_remote_state" "vpc_s3_backend" { config = { bucket = var.tf_state_vpc_s3_bucket key = var.tf_state_vpc_s3_key - region = var.region + region = local.region } } @@ -74,16 +58,15 @@ locals { tenant = var.tenant environment = var.environment zone = var.zone + region = "us-west-2" - cluster_version = var.cluster_version terraform_version = "Terraform v1.0.1" vpc_id = data.terraform_remote_state.vpc_s3_backend.outputs.vpc_id private_subnet_ids = data.terraform_remote_state.vpc_s3_backend.outputs.private_subnets - public_subnet_ids = data.terraform_remote_state.vpc_s3_backend.outputs.public_subnets } -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -96,7 +79,7 @@ module "eks-blueprints" { private_subnet_ids = local.private_subnet_ids # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -108,11 +91,11 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id - eks_worker_security_group_id = module.eks-blueprints.worker_node_security_group_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id + eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id # EKS Managed Add-ons enable_amazon_eks_coredns = true @@ -134,10 +117,5 @@ module "eks-blueprints-kubernetes-addons" { enable_kubernetes_dashboard = true enable_yunikorn = true - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/eks-cluster-with-import-vpc/eks/outputs.tf b/examples/eks-cluster-with-import-vpc/eks/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/eks-cluster-with-import-vpc/eks/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/eks-cluster-with-import-vpc/eks/variables.tf b/examples/eks-cluster-with-import-vpc/eks/variables.tf index f47bf3acd7..1c77e45b87 100644 --- a/examples/eks-cluster-with-import-vpc/eks/variables.tf +++ b/examples/eks-cluster-with-import-vpc/eks/variables.tf @@ -1,14 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - -variable "region" { - type = string - description = "AWS region" -} - variable "tf_state_vpc_s3_bucket" { type = string description = "Terraform state S3 Bucket Name" diff --git a/examples/eks-cluster-with-import-vpc/eks/versions.tf b/examples/eks-cluster-with-import-vpc/eks/versions.tf new file mode 100644 index 0000000000..41659b9847 --- /dev/null +++ b/examples/eks-cluster-with-import-vpc/eks/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.14" + } + } +} diff --git a/examples/eks-cluster-with-import-vpc/vpc/README.md b/examples/eks-cluster-with-import-vpc/vpc/README.md index f97b42795e..548647f34a 100644 --- a/examples/eks-cluster-with-import-vpc/vpc/README.md +++ b/examples/eks-cluster-with-import-vpc/vpc/README.md @@ -1,22 +1,28 @@ # EKS Cluster deployment with Import VPC state + This deployment creates VPC and three private subnets, three public subnets, NAT gateway and Internet gateway - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [helm](#requirement\_helm) | >= 2.4.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources @@ -29,7 +35,6 @@ No requirements. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | -| [region](#input\_region) | AWS region | `string` | n/a | yes | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws"` | no | | [vpc\_cidr](#input\_vpc\_cidr) | VPC CIDR range | `string` | `"10.1.0.0/16"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"test"` | no | @@ -41,5 +46,4 @@ No requirements. | [private\_subnets](#output\_private\_subnets) | List of IDs of private subnets | | [public\_subnets](#output\_public\_subnets) | List of IDs of public subnets | | [vpc\_id](#output\_vpc\_id) | The ID of the VPC | - - + diff --git a/examples/eks-cluster-with-import-vpc/vpc/main.tf b/examples/eks-cluster-with-import-vpc/vpc/main.tf index 6fa6adcabd..b00c90e535 100644 --- a/examples/eks-cluster-with-import-vpc/vpc/main.tf +++ b/examples/eks-cluster-with-import-vpc/vpc/main.tf @@ -1,5 +1,5 @@ provider "aws" { - region = var.region + region = local.region } data "aws_availability_zones" "available" {} @@ -8,18 +8,17 @@ locals { tenant = var.tenant environment = var.environment zone = var.zone + region = "us-west-2" vpc_cidr = var.vpc_cidr vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) azs = slice(data.aws_availability_zones.available.names, 0, 3) eks_cluster_id = join("-", [local.tenant, local.environment, local.zone, "eks"]) - - terraform_version = "Terraform v1.0.1" } module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -42,5 +41,4 @@ module "aws_vpc" { "kubernetes.io/cluster/${local.eks_cluster_id}" = "shared" "kubernetes.io/role/internal-elb" = "1" } - } diff --git a/examples/eks-cluster-with-import-vpc/vpc/outputs.tf b/examples/eks-cluster-with-import-vpc/vpc/outputs.tf index 5ea54d2faf..f20b704088 100644 --- a/examples/eks-cluster-with-import-vpc/vpc/outputs.tf +++ b/examples/eks-cluster-with-import-vpc/vpc/outputs.tf @@ -1,21 +1,3 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: MIT-0 - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this - * software and associated documentation files (the "Software"), to deal in the Software - * without restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, - * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - output "vpc_id" { description = "The ID of the VPC" value = module.aws_vpc.vpc_id diff --git a/examples/eks-cluster-with-import-vpc/vpc/variables.tf b/examples/eks-cluster-with-import-vpc/vpc/variables.tf index b032d177cd..ce218c055a 100644 --- a/examples/eks-cluster-with-import-vpc/vpc/variables.tf +++ b/examples/eks-cluster-with-import-vpc/vpc/variables.tf @@ -1,8 +1,3 @@ -variable "region" { - type = string - description = "AWS region" -} - variable "vpc_cidr" { type = string default = "10.1.0.0/16" diff --git a/examples/eks-cluster-with-import-vpc/vpc/versions.tf b/examples/eks-cluster-with-import-vpc/vpc/versions.tf new file mode 100644 index 0000000000..9ac174272e --- /dev/null +++ b/examples/eks-cluster-with-import-vpc/vpc/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } +} diff --git a/examples/eks-cluster-with-new-vpc/README.md b/examples/eks-cluster-with-new-vpc/README.md index 72680e18e8..2b739ee28e 100644 --- a/examples/eks-cluster-with-new-vpc/README.md +++ b/examples/eks-cluster-with-new-vpc/README.md @@ -1,24 +1,31 @@ # EKS Cluster Deployment with new VPC + This example deploys the following Basic EKS Cluster with VPC - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with one managed node group and fargate profile + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with one managed node group and fargate profile ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -26,7 +33,8 @@ cd examples/eks-cluster-with-new-vpc/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -34,7 +42,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -44,24 +53,26 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -69,27 +80,27 @@ cd examples/eks-cluster-with-new-vpc terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | | [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | | [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | @@ -98,16 +109,12 @@ terraform destroy --auto-approve | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | -| [region](#input\_region) | AWS Region | `string` | `"us-west-2"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -116,15 +123,14 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | -| [eks\_cluster\_id](#output\_eks\_cluster\_id) | n/a | -| [eks\_managed\_nodegroup\_arns](#output\_eks\_managed\_nodegroup\_arns) | Managed Node group id | -| [eks\_managed\_nodegroup\_ids](#output\_eks\_managed\_nodegroup\_ids) | Managed Node group id | -| [eks\_managed\_nodegroup\_role\_name](#output\_eks\_managed\_nodegroup\_role\_name) | Managed Node group role name | -| [eks\_managed\_nodegroup\_status](#output\_eks\_managed\_nodegroup\_status) | Managed Node group status | -| [eks\_managed\_nodegroups](#output\_eks\_managed\_nodegroups) | Managed Node group name | +| [eks\_cluster\_id](#output\_eks\_cluster\_id) | EKS cluster ID | +| [eks\_managed\_nodegroup\_arns](#output\_eks\_managed\_nodegroup\_arns) | EKS managed node group arns | +| [eks\_managed\_nodegroup\_ids](#output\_eks\_managed\_nodegroup\_ids) | EKS managed node group ids | +| [eks\_managed\_nodegroup\_role\_name](#output\_eks\_managed\_nodegroup\_role\_name) | EKS managed node group role name | +| [eks\_managed\_nodegroup\_status](#output\_eks\_managed\_nodegroup\_status) | EKS managed node group status | +| [eks\_managed\_nodegroups](#output\_eks\_managed\_nodegroups) | EKS managed node groups | | [region](#output\_region) | AWS region | -| [vpc\_cidr](#output\_vpc\_cidr) | n/a | -| [vpc\_private\_subnet\_cidr](#output\_vpc\_private\_subnet\_cidr) | n/a | -| [vpc\_public\_subnet\_cidr](#output\_vpc\_public\_subnet\_cidr) | n/a | - - +| [vpc\_cidr](#output\_vpc\_cidr) | VPC CIDR | +| [vpc\_private\_subnet\_cidr](#output\_vpc\_private\_subnet\_cidr) | VPC private subnet CIDR | +| [vpc\_public\_subnet\_cidr](#output\_vpc\_public\_subnet\_cidr) | VPC public subnet CIDR | + diff --git a/examples/eks-cluster-with-new-vpc/eks-cluster-with-new-vpc.tfvars b/examples/eks-cluster-with-new-vpc/eks-cluster-with-new-vpc.tfvars index 41359bdca7..d6a09c5ee4 100644 --- a/examples/eks-cluster-with-new-vpc/eks-cluster-with-new-vpc.tfvars +++ b/examples/eks-cluster-with-new-vpc/eks-cluster-with-new-vpc.tfvars @@ -1,5 +1,3 @@ -tenant = "aws" -environment = "terra" -zone = "test" -region = "us-west-2" -cluster_version = "1.21" +tenant = "aws" +environment = "terra" +zone = "test" diff --git a/examples/eks-cluster-with-new-vpc/main.tf b/examples/eks-cluster-with-new-vpc/main.tf index a52d159d80..7cb9223f1b 100644 --- a/examples/eks-cluster-with-new-vpc/main.tf +++ b/examples/eks-cluster-with-new-vpc/main.tf @@ -1,75 +1,50 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks_blueprints.eks_cluster_id -} - provider "aws" { - region = var.region - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) azs = slice(data.aws_availability_zones.available.names, 0, 3) cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - - terraform_version = "Terraform v1.0.1" } module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -99,17 +74,16 @@ module "aws_vpc" { module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + tenant = local.tenant + environment = local.environment + zone = local.zone # EKS Cluster VPC and Subnet mandatory config vpc_id = module.aws_vpc.vpc_id private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { diff --git a/examples/eks-cluster-with-new-vpc/outputs.tf b/examples/eks-cluster-with-new-vpc/outputs.tf index c0a64f7886..49503255b7 100644 --- a/examples/eks-cluster-with-new-vpc/outputs.tf +++ b/examples/eks-cluster-with-new-vpc/outputs.tf @@ -1,42 +1,46 @@ output "vpc_private_subnet_cidr" { - value = module.aws_vpc.private_subnets_cidr_blocks + description = "VPC private subnet CIDR" + value = module.aws_vpc.private_subnets_cidr_blocks } output "vpc_public_subnet_cidr" { - value = module.aws_vpc.public_subnets_cidr_blocks + description = "VPC public subnet CIDR" + value = module.aws_vpc.public_subnets_cidr_blocks } output "vpc_cidr" { - value = module.aws_vpc.vpc_cidr_block + description = "VPC CIDR" + value = module.aws_vpc.vpc_cidr_block } output "eks_cluster_id" { - value = module.eks_blueprints.eks_cluster_id + description = "EKS cluster ID" + value = module.eks_blueprints.eks_cluster_id } -# Managed Node group name output "eks_managed_nodegroups" { - value = module.eks_blueprints.managed_node_groups + description = "EKS managed node groups" + value = module.eks_blueprints.managed_node_groups } -# Managed Node group id output "eks_managed_nodegroup_ids" { - value = module.eks_blueprints.managed_node_groups_id + description = "EKS managed node group ids" + value = module.eks_blueprints.managed_node_groups_id } -# Managed Node group id output "eks_managed_nodegroup_arns" { - value = module.eks_blueprints.managed_node_group_arn + description = "EKS managed node group arns" + value = module.eks_blueprints.managed_node_group_arn } -# Managed Node group role name output "eks_managed_nodegroup_role_name" { - value = module.eks_blueprints.managed_node_group_iam_role_names + description = "EKS managed node group role name" + value = module.eks_blueprints.managed_node_group_iam_role_names } -# Managed Node group status output "eks_managed_nodegroup_status" { - value = module.eks_blueprints.managed_node_groups_status + description = "EKS managed node group status" + value = module.eks_blueprints.managed_node_groups_status } output "configure_kubectl" { @@ -46,6 +50,6 @@ output "configure_kubectl" { # Region used for Terratest output "region" { - value = var.region + value = local.region description = "AWS region" } diff --git a/examples/eks-cluster-with-new-vpc/variables.tf b/examples/eks-cluster-with-new-vpc/variables.tf index e8b1a3adbc..5a6eded04a 100644 --- a/examples/eks-cluster-with-new-vpc/variables.tf +++ b/examples/eks-cluster-with-new-vpc/variables.tf @@ -1,15 +1,3 @@ -variable "region" { - type = string - description = "AWS Region" - default = "us-west-2" -} - -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/eks-cluster-with-new-vpc/versions.tf b/examples/eks-cluster-with-new-vpc/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/eks-cluster-with-new-vpc/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/fully-private-eks-cluster/README.md b/examples/fully-private-eks-cluster/README.md index 71b22b9bc2..fa5757dd9c 100644 --- a/examples/fully-private-eks-cluster/README.md +++ b/examples/fully-private-eks-cluster/README.md @@ -1,28 +1,34 @@ # Fully Private EKS Cluster with VPC and Endpoints deployment This example deploys a fully private EKS Cluster into a new VPC. - - Creates a new VPC and 3 Private Subnets - - VPC Endpoints for various services and S3 VPC Endpoint gateway - - Creates EKS Cluster Control plane with one Managed node group - - EKS Cluster API endpoint that can be set to public and private, and then into private only. + +- Creates a new VPC and 3 Private Subnets +- VPC Endpoints for various services and S3 VPC Endpoint gateway +- Creates EKS Cluster Control plane with one Managed node group + - EKS Cluster API endpoint that can be set to public and private, and then into private only. Please see this [document](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) for more details on configuring fully private EKS Clusters ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -30,7 +36,8 @@ cd examples/fully-private-eks-cluster/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -38,7 +45,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Terraform APPLY +#### Step 4: Terraform APPLY + to create resources ```shell script @@ -48,37 +56,38 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system ### Setting up private only API endpoint and accessing the cluster - To set the API endpoint to private only, on the `main.tf` file under the EKS Blueprints module: - - Set `eks_cluster_api_endpoint_public = false` - - Set `eks_cluster_api_endpoint_private = true` -- To access the private cluster, you need to access it from a machine that can access the VPC and the private subnets. Few ways to do this are: - - Create a bastion host in the VPC and then access the cluster from the bastion host - - Create a cloud9 instance in the VPC and then access the cluster from the cloud9 instance -These examples assume you do not have any other network infrastructure in place (e.g. direct connect(DX), VPN etc.). + - Set `eks_cluster_api_endpoint_public = false` + - Set `eks_cluster_api_endpoint_private = true` + +- To access the private cluster, you need to access it from a machine that can access the VPC and the private subnets. Few ways to do this are: - Create a bastion host in the VPC and then access the cluster from the bastion host - Create a cloud9 instance in the VPC and then access the cluster from the cloud9 instance + These examples assume you do not have any other network infrastructure in place (e.g. direct connect(DX), VPN etc.). Learn more about private EKS clusters [here](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -86,28 +95,28 @@ cd examples/fully-private-eks-cluster terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | | [vpc\_endpoint\_gateway](#module\_vpc\_endpoint\_gateway) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | v3.2.0 | | [vpc\_endpoints](#module\_vpc\_endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | v3.2.0 | @@ -116,16 +125,12 @@ terraform destroy --auto-approve | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | | [aws_security_group.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/security_group) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -135,5 +140,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/fully-private-eks-cluster/main.tf b/examples/fully-private-eks-cluster/main.tf index 137d2ea804..bd7f0c62f7 100644 --- a/examples/fully-private-eks-cluster/main.tf +++ b/examples/fully-private-eks-cluster/main.tf @@ -1,58 +1,26 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } +data "aws_availability_zones" "available" {} + locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -64,7 +32,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -182,9 +150,9 @@ module "vpc_endpoints" { } } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -197,7 +165,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # Step 1. Set cluster API endpoint both private and public cluster_endpoint_public_access = true @@ -215,10 +183,4 @@ module "eks-blueprints" { subnet_ids = module.aws_vpc.private_subnets } } - -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl } diff --git a/examples/fully-private-eks-cluster/outputs.tf b/examples/fully-private-eks-cluster/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/fully-private-eks-cluster/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/fully-private-eks-cluster/variables.tf b/examples/fully-private-eks-cluster/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/fully-private-eks-cluster/variables.tf +++ b/examples/fully-private-eks-cluster/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/fully-private-eks-cluster/versions.tf b/examples/fully-private-eks-cluster/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/fully-private-eks-cluster/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/game-tech/agones-game-controller/README.md b/examples/game-tech/agones-game-controller/README.md index 496c7cfad8..a95eb6fe04 100644 --- a/examples/game-tech/agones-game-controller/README.md +++ b/examples/game-tech/agones-game-controller/README.md @@ -1,34 +1,42 @@ # Amazon EKS Deployment with Agones Gaming Kubernetes Controller + This example shows how to deploy and run Gaming applications on Amazon EKS with Agones Kubernetes Controller - - Deploy Private VPC, Subnets and all the required VPC endpoints - - Deploy EKS Cluster with one managed node group in an VPC - - Deploy Agones Kubernetes Controller using Helm Providers - - Deploy a simple gaming server and test the application +- Deploy Private VPC, Subnets and all the required VPC endpoints +- Deploy EKS Cluster with one managed node group in an VPC +- Deploy Agones Kubernetes Controller using Helm Providers +- Deploy a simple gaming server and test the application # What is Agones + Agones is an Open source Kubernetes Controller with custom resource definitions and is used to create, run, manage and scale dedicated game server processes within Kubernetes clusters using standard Kubernetes tooling and APIs. This model also allows any matchmaker to interact directly with Agones via the Kubernetes API to provision a dedicated game server # What is GameLift + Amazon GameLift enables developers to deploy, operate, and scale dedicated, low-cost servers in the cloud for session-based, multiplayer games. Built on AWS global computing infrastructure, GameLift helps deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet worldwide player demand. ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -36,7 +44,8 @@ cd examples/game-tech/agones-game-controller terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -44,7 +53,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -54,24 +64,26 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `agones-system` namespace +#### Step 7: List all the pods running in `agones-system` namespace $ kubectl get pods -n agones-system -## Step8: Install K9s +## Step 8: Install K9s + This step is to install K9s client tool to interact with EKS Cluster curl -sS https://webinstall.dev/k9s | bash @@ -82,7 +94,7 @@ Just type k9s after the installation and then you will see the output like this ![Alt Text](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/9c6f8ea3e710f7b0137be07835653a2bf4f9fdfe/images/k9s-agones-cluster.png "K9s") -## Step9: Add EKS Workshop IAM role as EKS Cluster Administrator +## Step 9: Add EKS Workshop IAM role as EKS Cluster Administrator $ aws sts get-caller-identity @@ -92,7 +104,7 @@ Just type k9s after the installation and then you will see the output like this kubectl patch configmap/aws-auth -n kube-system --patch "$(cat /tmp/aws-auth-patch.yml)" -## Step10: Deploying the Sample game server +## Step 10: Deploying the Sample game server kubectl create -f https://raw.githubusercontent.com/googleforgames/agones/release-1.15.0/examples/simple-game-server/gameserver.yaml @@ -103,7 +115,7 @@ Output looks like below NAME STATE ADDRESS PORT NODE AGE simple-game-server-7r6jr Ready 34.243.345.22 7902 ip-10-1-23-233.eu-west-1.compute.internal 11h -## Step11: Testing the Sample Game Server +## Step 11: Testing the Sample Game Server sudo yum install netcat @@ -130,6 +142,7 @@ Download the shell script and execute curl -O https://raw.githubusercontent.com/awslabs/fleetiq-adapter-for-agones/master/Agones_EKS_FleetIQ_Integration_Package%5BBETA%5D/quick_install/fleet_eks_agones_quickinstall.sh ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -137,44 +150,40 @@ cd examples/eks-cluster-with-new-vpc terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../..//modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../..//modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -184,5 +193,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/game-tech/agones-game-controller/main.tf b/examples/game-tech/agones-game-controller/main.tf index 6221cc87b9..5a946146ca 100644 --- a/examples/game-tech/agones-game-controller/main.tf +++ b/examples/game-tech/agones-game-controller/main.tf @@ -1,66 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -72,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -96,10 +70,11 @@ module "aws_vpc" { "kubernetes.io/role/internal-elb" = "1" } } + #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -112,7 +87,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -157,10 +132,11 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { - source = "../../..//modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id - eks_worker_security_group_id = module.eks-blueprints.worker_node_security_group_id +module "eks_blueprints_kubernetes_addons" { + source = "../../..//modules/kubernetes-addons" + + eks_cluster_id = module.eks_blueprints.eks_cluster_id + eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id #K8s Add-ons enable_metrics_server = true @@ -186,10 +162,5 @@ module "eks-blueprints-kubernetes-addons" { })] } - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/game-tech/agones-game-controller/outputs.tf b/examples/game-tech/agones-game-controller/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/game-tech/agones-game-controller/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/game-tech/agones-game-controller/variables.tf b/examples/game-tech/agones-game-controller/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/game-tech/agones-game-controller/variables.tf +++ b/examples/game-tech/agones-game-controller/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/game-tech/agones-game-controller/versions.tf b/examples/game-tech/agones-game-controller/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/game-tech/agones-game-controller/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/gitops/argocd/README.md b/examples/gitops/argocd/README.md index b5a27ec153..a43526fbcd 100644 --- a/examples/gitops/argocd/README.md +++ b/examples/gitops/argocd/README.md @@ -1,45 +1,40 @@ # Usage - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../../ | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../../ | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | -| [region](#input\_region) | Region in which to deploy the cluster | `string` | `"us-west-2"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | Zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -48,5 +43,4 @@ | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/gitops/argocd/main.tf b/examples/gitops/argocd/main.tf index 37e32948ac..e02e2f363c 100644 --- a/examples/gitops/argocd/main.tf +++ b/examples/gitops/argocd/main.tf @@ -1,66 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = var.region - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -96,7 +70,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -122,10 +96,10 @@ module "aws_vpc" { } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../../" tenant = local.tenant @@ -138,7 +112,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # Managed Node Group managed_node_groups = { @@ -154,10 +128,10 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id #--------------------------------------------------------------- # ARGO CD ADD-ON @@ -188,5 +162,5 @@ module "eks-blueprints-kubernetes-addons" { enable_yunikorn = true enable_argo_rollouts = true - depends_on = [module.eks-blueprints.managed_node_groups] + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/gitops/argocd/outputs.tf b/examples/gitops/argocd/outputs.tf index ddca97d9f8..55552d3138 100644 --- a/examples/gitops/argocd/outputs.tf +++ b/examples/gitops/argocd/outputs.tf @@ -1,4 +1,4 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + value = module.eks_blueprints.configure_kubectl } diff --git a/examples/gitops/argocd/variables.tf b/examples/gitops/argocd/variables.tf index df45dc111c..e6bd6caeab 100644 --- a/examples/gitops/argocd/variables.tf +++ b/examples/gitops/argocd/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" @@ -21,9 +15,3 @@ variable "zone" { description = "Zone, e.g. dev or qa or load or ops etc..." default = "dev" } - -variable "region" { - type = string - description = "Region in which to deploy the cluster" - default = "us-west-2" -} diff --git a/examples/gitops/argocd/versions.tf b/examples/gitops/argocd/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/gitops/argocd/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/ingress-controllers/nginx/README.md b/examples/ingress-controllers/nginx/README.md index fc73744dfd..a401cb4398 100644 --- a/examples/ingress-controllers/nginx/README.md +++ b/examples/ingress-controllers/nginx/README.md @@ -21,13 +21,13 @@ Ensure that you have installed the following tools in your Mac or Windows Laptop ### Deployment Steps -#### Step1: Clone the repo using the command below +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT Initialize a working directory with configuration files @@ -36,7 +36,7 @@ cd examples/ingress-controllers/nginx terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN Verify the resources created by this execution @@ -45,14 +45,14 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY to create resources ```shell script terraform apply -target="module.aws_vpc" -terraform apply -target="module.eks-blueprints" -terraform apply -target="module.eks-blueprints-kubernetes-addons" +terraform apply -target="module.eks_blueprints" +terraform apply -target="module.eks_blueprints_kubernetes_addons" terraform apply -target="module.aws_load_balancer_controller" terraform apply -target="module.ingress_nginx" ``` @@ -64,23 +64,23 @@ Enter `yes` for each apply EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command -``` shell +```shell aws eks --region update-kubeconfig --name ``` -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below -``` shell +```shell kubectl get nodes ``` -#### Step7: List all the pods running in `nginx` namespace +#### Step 7: List all the pods running in `nginx` namespace -``` shell +```shell kubectl get pods -n nginx ``` @@ -97,30 +97,30 @@ terraform destroy -target="module.module.eks-blueprints" -auto-approve terraform destroy -target="module.aws_vpc" -auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| | [aws\_load\_balancer\_controller](#module\_aws\_load\_balancer\_controller) | ../../../modules/kubernetes-addons | n/a | -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | | [ingress\_nginx](#module\_ingress\_nginx) | ../../../modules/kubernetes-addons | n/a | ## Resources @@ -128,15 +128,11 @@ terraform destroy -target="module.aws_vpc" -auto-approve | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -146,8 +142,7 @@ terraform destroy -target="module.aws_vpc" -auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + ## Learn more diff --git a/examples/ingress-controllers/nginx/main.tf b/examples/ingress-controllers/nginx/main.tf index b817c235c2..2ca3cf876c 100644 --- a/examples/ingress-controllers/nginx/main.tf +++ b/examples/ingress-controllers/nginx/main.tf @@ -1,65 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -71,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -96,9 +71,9 @@ module "aws_vpc" { } } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -111,7 +86,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -124,9 +99,9 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons enable_amazon_eks_coredns = true @@ -139,14 +114,14 @@ module "eks-blueprints-kubernetes-addons" { module "aws_load_balancer_controller" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id enable_aws_load_balancer_controller = true } module "ingress_nginx" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id enable_ingress_nginx = true ingress_nginx_helm_config = { diff --git a/examples/ingress-controllers/nginx/outputs.tf b/examples/ingress-controllers/nginx/outputs.tf index ddca97d9f8..55552d3138 100644 --- a/examples/ingress-controllers/nginx/outputs.tf +++ b/examples/ingress-controllers/nginx/outputs.tf @@ -1,4 +1,4 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + value = module.eks_blueprints.configure_kubectl } diff --git a/examples/ingress-controllers/nginx/variables.tf b/examples/ingress-controllers/nginx/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/ingress-controllers/nginx/variables.tf +++ b/examples/ingress-controllers/nginx/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/ingress-controllers/nginx/versions.tf b/examples/ingress-controllers/nginx/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/ingress-controllers/nginx/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/ipv6-eks-cluster/README.md b/examples/ipv6-eks-cluster/README.md index c9a110b2e7..0e7f39713f 100644 --- a/examples/ipv6-eks-cluster/README.md +++ b/examples/ipv6-eks-cluster/README.md @@ -1,27 +1,33 @@ # IPv6 EKS Cluster This example deploys VPC, Subnets and EKS Cluster with IPv6 networking enabled - - Creates a new sample VPC with IPv6, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with one managed node group + +- Creates a new sample VPC with IPv6, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with one managed node group Checkout EKS the documentation for more details about [IPv6](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -29,7 +35,8 @@ cd examples/ipv6-eks-cluster/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -37,25 +44,28 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script terraform apply ``` + Enter `yes` to apply -#### Step5: Verify EC2 instances running with IPv6 support +#### Step 5: Verify EC2 instances running with IPv6 support ```shell script aws ec2 describe-instances --filters "Name=tag:eks:cluster-name,Values=aws-preprod-cplane-eks" --query "Reservations[].Instances[? State.Name == 'running' ][].NetworkInterfaces[].Ipv6Addresses" --output table ``` ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step6: Run `update-kubeconfig` command +#### Step 6: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command @@ -63,11 +73,12 @@ This following command used to update the `kubeconfig` in your local machine whe aws eks --region update-kubeconfig --name ``` -#### Step7: List all the PODS running in `kube-system` and observe the **IP allocated** +#### Step 7: List all the PODS running in `kube-system` and observe the **IP allocated** ```shell script -kubectl get pods -n kube-system -o wide +kubectl get pods -n kube-system -o wide ``` + Output NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES @@ -80,8 +91,8 @@ Output kube-proxy-k992g 1/1 Running 0 3h1m 2a05:d018:434:7702:3784:d6b:fc0d:e156 ip-10-0-10-23.eu-west-1.compute.internal kube-proxy-nzfrq 1/1 Running 0 3h1m 2a05:d018:434:7703:b3eb:2aa:aa4a:c838 ip-10-0-11-186.eu-west-1.compute.internal - ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -89,39 +100,35 @@ cd examples/eks-cluster-with-new-vpc terraform destroy --auto-approve ``` - - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.13.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs @@ -132,5 +139,4 @@ No inputs. | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/ipv6-eks-cluster/main.tf b/examples/ipv6-eks-cluster/main.tf index 71a4bea223..d4ae98014b 100644 --- a/examples/ipv6-eks-cluster/main.tf +++ b/examples/ipv6-eks-cluster/main.tf @@ -1,65 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { - tenant = "ipv6" # AWS account name or unique id for tenant - environment = "preprod" # Environment area eg., preprod or prod - zone = "dev" # Environment with in one sub_tenant or business unit - cluster_version = "1.21" + tenant = "ipv6" # AWS account name or unique id for tenant + environment = "preprod" # Environment area eg., preprod or prod + zone = "dev" # Environment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -71,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.13.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -103,9 +78,9 @@ module "aws_vpc" { } } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -121,7 +96,7 @@ module "eks-blueprints" { cluster_ip_family = "ipv6" # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { @@ -136,9 +111,9 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id enable_ipv6 = true # Enable Ipv6 network. Attaches new VPC CNI policy to the IRSA role # EKS Managed Add-ons @@ -148,10 +123,5 @@ module "eks-blueprints-kubernetes-addons" { #K8s Add-ons enable_aws_load_balancer_controller = true - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/ipv6-eks-cluster/outputs.tf b/examples/ipv6-eks-cluster/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/ipv6-eks-cluster/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/ipv6-eks-cluster/variables.tf b/examples/ipv6-eks-cluster/variables.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/ipv6-eks-cluster/versions.tf b/examples/ipv6-eks-cluster/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/ipv6-eks-cluster/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index 8795eda685..bfb61b7e74 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -4,14 +4,16 @@ Karpenter is an open-source node provisioning project built for Kubernetes. Karp This example shows how to deploy and leverage Karpenter for Autoscaling. The following resources will be deployed by this example. - - Creates a new VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Creates EKS Cluster Control plane with one Self-managed node group with Max ASG of 1 - - Deploys Karpenter Helm Chart - - Deploys default Karpenter Provisioner +- Creates a new VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Creates EKS Cluster Control plane with one Self-managed node group with Max ASG of 1 +- Deploys Karpenter Helm Chart +- Deploys default Karpenter Provisioner # How to Deploy + ## Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply 1. [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) @@ -20,13 +22,15 @@ Ensure that you have installed the following tools in your Mac or Windows Laptop 4. [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ## Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + to initialize a working directory with configuration files ```shell script @@ -34,7 +38,8 @@ cd examples/eks-cluster-with-karpenter/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + to verify the resources created by this execution ```shell script @@ -42,7 +47,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -52,19 +58,22 @@ terraform apply Enter `yes` to apply ### Configure kubectl and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run update-kubeconfig command. +#### Step 5: Run update-kubeconfig command. + `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below + You should see one Self-managed node up and running $ kubectl get nodes -#### Step7: List all the pods running in karpenter namespace +#### Step 7: List all the pods running in karpenter namespace $ kubectl get pods -n karpenter @@ -83,32 +92,32 @@ cd examples/eks-cluster-with-karpenter terraform destroy ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubectl](#requirement\_kubectl) | >= 1.13.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | -| [kubectl](#provider\_kubectl) | >= 1.13.1 | +| [aws](#provider\_aws) | >= 3.72 | +| [kubectl](#provider\_kubectl) | >= 1.14 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../modules/kubernetes-addons | n/a | -| [karpenter-launch-templates](#module\_karpenter-launch-templates) | ../../modules/launch-templates | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | +| [karpenter\_launch\_templates](#module\_karpenter\_launch\_templates) | ../../modules/launch-templates | n/a | ## Resources @@ -118,16 +127,12 @@ terraform destroy | [aws_ami.amazonlinux2eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | | [kubectl_path_documents.karpenter_provisioners](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/data-sources/path_documents) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -137,5 +142,4 @@ terraform destroy | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index e3c789167d..4e5c724fbc 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -1,41 +1,48 @@ -terraform { - required_version = ">= 1.0.1" +provider "aws" { + region = local.region +} - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.13.1" - } +provider "kubernetes" { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } +} - backend "local" { - path = "local_tf_state/terraform-main.tfstate" +provider "helm" { + kubernetes { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} +provider "kubectl" { + apply_retry_count = 10 + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + load_config_file = false -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} +data "aws_availability_zones" "available" {} data "aws_ami" "amazonlinux2eks" { most_recent = true @@ -55,57 +62,28 @@ data "aws_ami" "bottlerocket" { owners = ["amazon"] } -provider "aws" { - region = data.aws_region.current.id - alias = "default" -} - -provider "kubernetes" { - experiments { - manifest_resource = true - } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token -} - -provider "helm" { - kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - } -} - -provider "kubectl" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false - apply_retry_count = 10 -} - locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit + region = "us-west-2" azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_version = var.cluster_version + cluster_version = "1.21" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) node_group_name = "self-ondemand" - amazonlinux2eks = "amazon-eks-node-${var.cluster_version}-*" - bottlerocket = "bottlerocket-aws-k8s-${var.cluster_version}-x86_64-*" + amazonlinux2eks = "amazon-eks-node-${local.cluster_version}-*" + bottlerocket = "bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*" terraform_version = "Terraform v1.0.1" } module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -129,10 +107,11 @@ module "aws_vpc" { "kubernetes.io/role/internal-elb" = "1" } } + #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -173,17 +152,17 @@ module "eks-blueprints" { # Creates Launch templates for Karpenter # Launch template outputs will be used in Karpenter Provisioners yaml files. Checkout this examples/karpenter/provisioners/default_provisioner_with_launch_templates.yaml -module "karpenter-launch-templates" { +module "karpenter_launch_templates" { source = "../../modules/launch-templates" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id tags = { Name = "karpenter" } launch_template_config = { linux = { ami = data.aws_ami.amazonlinux2eks.id launch_template_prefix = "karpenter" - iam_instance_profile = module.eks-blueprints.self_managed_node_group_iam_instance_profile_id[0] - vpc_security_group_ids = [module.eks-blueprints.worker_node_security_group_id] + iam_instance_profile = module.eks_blueprints.self_managed_node_group_iam_instance_profile_id[0] + vpc_security_group_ids = [module.eks_blueprints.worker_node_security_group_id] block_device_mappings = [ { device_name = "/dev/xvda" @@ -196,8 +175,8 @@ module "karpenter-launch-templates" { ami = data.aws_ami.bottlerocket.id launch_template_os = "bottlerocket" launch_template_prefix = "bottle" - iam_instance_profile = module.eks-blueprints.self_managed_node_group_iam_instance_profile_id[0] - vpc_security_group_ids = [module.eks-blueprints.worker_node_security_group_id] + iam_instance_profile = module.eks_blueprints.self_managed_node_group_iam_instance_profile_id[0] + vpc_security_group_ids = [module.eks_blueprints.worker_node_security_group_id] block_device_mappings = [ { device_name = "/dev/xvda" @@ -209,15 +188,15 @@ module "karpenter-launch-templates" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id # Deploys Karpenter add-on enable_karpenter = true - depends_on = [module.eks-blueprints.self_managed_node_groups] + depends_on = [module.eks_blueprints.self_managed_node_groups] } # Deploying default provisioner for Karpenter autoscaler @@ -239,10 +218,5 @@ resource "kubectl_manifest" "karpenter_provisioner" { for_each = toset(data.kubectl_path_documents.karpenter_provisioners.documents) yaml_body = each.value - depends_on = [module.eks-blueprints-kubernetes-addons] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints_kubernetes_addons] } diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/karpenter/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/karpenter/variables.tf b/examples/karpenter/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/karpenter/variables.tf +++ b/examples/karpenter/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf new file mode 100644 index 0000000000..2a74218fe2 --- /dev/null +++ b/examples/karpenter/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.14" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/multi-tenancy-with-teams/README.md b/examples/multi-tenancy-with-teams/README.md index 8abede15b0..8bdef2ea89 100644 --- a/examples/multi-tenancy-with-teams/README.md +++ b/examples/multi-tenancy-with-teams/README.md @@ -11,19 +11,23 @@ This example deploys a new EKS Cluster with Teams to a new VPC. ## How to Deploy ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run `terraform init` +#### Step 2: Run `terraform init` + to initialize a working directory with configuration files ```shell script @@ -31,7 +35,8 @@ cd examples/multi-tenancy-with-teams/ terraform init ``` -#### Step3: Run `terraform plan` +#### Step 3: Run `terraform plan` + to verify the resources created by this execution ```shell script @@ -39,7 +44,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, `terraform apply` +#### Step 4: Finally, `terraform apply` + to create resources ```shell script @@ -49,51 +55,53 @@ terraform apply Enter `yes` to apply ### Configure kubectl and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run update-kubeconfig command. +#### Step 5: Run update-kubeconfig command. `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in kube-system namespace +#### Step 7: List all the pods running in kube-system namespace $ kubectl get pods -n kube-system ## How to Destroy + ```shell script cd examples/multi-tenancy-with-teams terraform destroy ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubectl](#requirement\_kubectl) | >= 1.7.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | ## Resources @@ -101,15 +109,11 @@ terraform destroy |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"teams"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -121,5 +125,4 @@ terraform destroy | [application\_teams\_configure\_kubectl](#output\_application\_teams\_configure\_kubectl) | Configure kubectl for each Application Teams: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | | [eks\_blueprints\_output](#output\_eks\_blueprints\_output) | EKS Blueprints module outputs | | [platform\_teams\_configure\_kubectl](#output\_platform\_teams\_configure\_kubectl) | Configure kubectl for each Platform Team: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/multi-tenancy-with-teams/main.tf b/examples/multi-tenancy-with-teams/main.tf index 47249605b0..5f83968069 100644 --- a/examples/multi-tenancy-with-teams/main.tf +++ b/examples/multi-tenancy-with-teams/main.tf @@ -1,79 +1,55 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.7.0" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_caller_identity" "current" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } provider "kubectl" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token + apply_retry_count = 10 + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) load_config_file = false + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } +data "aws_availability_zones" "available" {} +data "aws_caller_identity" "current" {} + locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -85,7 +61,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -108,14 +84,13 @@ module "aws_vpc" { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/internal-elb" = "1" } - } #------------------------------------------------------------------------------- -# Example to consume eks-blueprints module with Teams (Application and Platform) +# Example to consume eks_blueprints module with Teams (Application and Platform) #------------------------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -128,7 +103,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUPS managed_node_groups = { diff --git a/examples/multi-tenancy-with-teams/outputs.tf b/examples/multi-tenancy-with-teams/outputs.tf index 4342e61a0b..56219ae80f 100644 --- a/examples/multi-tenancy-with-teams/outputs.tf +++ b/examples/multi-tenancy-with-teams/outputs.tf @@ -1,14 +1,14 @@ output "eks_blueprints_output" { description = "EKS Blueprints module outputs" - value = module.eks-blueprints + value = module.eks_blueprints } output "platform_teams_configure_kubectl" { description = "Configure kubectl for each Platform Team: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.teams[0].platform_teams_configure_kubectl + value = module.eks_blueprints.teams[0].platform_teams_configure_kubectl } output "application_teams_configure_kubectl" { description = "Configure kubectl for each Application Teams: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.teams[0].application_teams_configure_kubectl + value = module.eks_blueprints.teams[0].application_teams_configure_kubectl } diff --git a/examples/multi-tenancy-with-teams/variables.tf b/examples/multi-tenancy-with-teams/variables.tf index c799bbde81..e87940a03d 100644 --- a/examples/multi-tenancy-with-teams/variables.tf +++ b/examples/multi-tenancy-with-teams/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/multi-tenancy-with-teams/versions.tf b/examples/multi-tenancy-with-teams/versions.tf new file mode 100644 index 0000000000..2a74218fe2 --- /dev/null +++ b/examples/multi-tenancy-with-teams/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.14" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/node-groups/fargate-profiles/README.md b/examples/node-groups/fargate-profiles/README.md index 26f1b89c3b..1626b9ddf8 100644 --- a/examples/node-groups/fargate-profiles/README.md +++ b/examples/node-groups/fargate-profiles/README.md @@ -1,25 +1,31 @@ # EKS Cluster with Fargate Profiles This example deploys a new EKS Cluster into a new VPC and crates a Fargate profile. - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets - - Creates an EKS Cluster Control plane with one Fargate profile + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets +- Creates an EKS Cluster Control plane with one Fargate profile ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -27,7 +33,8 @@ cd examples/node-groups/fargate-profiles/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -35,7 +42,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -45,29 +53,32 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: Create a simple pod with respective match labels. +#### Step 6: Create a simple pod with respective match labels. $ kubectl run test-pod --image=nginx --labels="Zone=dev,Environment=preprod,env=fargate" -#### Step7: List all the nodes by running the command below and verify the fargate nodes +#### Step 7: List all the nodes by running the command below and verify the fargate nodes $ kubectl get nodes -#### Step8: List all the pods running in `kube-system` namespace +#### Step 8: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system Note : CoreDNS requires [additional setup](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html) if customers use only Fargate + ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -75,37 +86,34 @@ cd examples/node-groups/fargate-profiles terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs @@ -116,5 +124,4 @@ No inputs. | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/node-groups/fargate-profiles/main.tf b/examples/node-groups/fargate-profiles/main.tf index 95ad486e0b..829b194113 100644 --- a/examples/node-groups/fargate-profiles/main.tf +++ b/examples/node-groups/fargate-profiles/main.tf @@ -1,57 +1,26 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } +data "aws_availability_zones" "available" {} + locals { tenant = "aws001" # AWS account name or unique id for tenant environment = "preprod" # Environment area eg., preprod or prod zone = "dev" # Environment with in one sub_tenant or business unit - - cluster_version = "1.21" + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -63,7 +32,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -88,9 +57,9 @@ module "aws_vpc" { } } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -103,7 +72,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" #---------------------------------------------------------# # FARGATE PROFILES #---------------------------------------------------------# @@ -127,8 +96,3 @@ module "eks-blueprints" { }, } # END OF FARGATE PROFILES } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl -} diff --git a/examples/node-groups/fargate-profiles/outputs.tf b/examples/node-groups/fargate-profiles/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/node-groups/fargate-profiles/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/node-groups/fargate-profiles/variables.tf b/examples/node-groups/fargate-profiles/variables.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/node-groups/fargate-profiles/versions.tf b/examples/node-groups/fargate-profiles/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/node-groups/fargate-profiles/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/node-groups/managed-node-groups-tfvars/README.md b/examples/node-groups/managed-node-groups-tfvars/README.md index f54291b935..ea1e158c4b 100644 --- a/examples/node-groups/managed-node-groups-tfvars/README.md +++ b/examples/node-groups/managed-node-groups-tfvars/README.md @@ -2,10 +2,10 @@ This example deploys a new EKS Cluster with a Managed node group into a new VPC. -* Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets -* Creates an Internet gateway for the Public Subnets and a NAT Gateway for the +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets -* Creates an EKS Cluster Control plane with Managed node groups +- Creates an EKS Cluster Control plane with Managed node groups ## How to Deploy @@ -14,19 +14,19 @@ This example deploys a new EKS Cluster with a Managed node group into a new VPC. Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply -* [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -* [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -* [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +- [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT Initialize a working directory with configuration files @@ -35,7 +35,7 @@ cd examples/node-groups/managed-node-groups-tfvars/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN Verify the resources created by this execution @@ -44,7 +44,7 @@ export AWS_REGION=eu-central-1 # Select your own region terraform plan -var-file="variables.tfvars" ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY to create resources @@ -61,7 +61,7 @@ to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command Get the list of your clusters @@ -76,13 +76,13 @@ the below command aws eks --region "${AWS_REGION}" update-kubeconfig --name "aws-preprod-dev-eks" ``` -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below ```shell script kubectl get nodes ``` -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace ```shell script kubectl get pods -n kube-system @@ -99,37 +99,34 @@ terraform destroy --auto-approve --- - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 4.4.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.4.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.12.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs @@ -146,5 +143,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/node-groups/managed-node-groups-tfvars/data.tf b/examples/node-groups/managed-node-groups-tfvars/data.tf deleted file mode 100644 index 524663adea..0000000000 --- a/examples/node-groups/managed-node-groups-tfvars/data.tf +++ /dev/null @@ -1,14 +0,0 @@ -#------------------------------------------------------------------------ -# Data Resources -#------------------------------------------------------------------------ -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} diff --git a/examples/node-groups/managed-node-groups-tfvars/eks.tf b/examples/node-groups/managed-node-groups-tfvars/eks.tf index 1bdf5a289b..b8c60029f8 100644 --- a/examples/node-groups/managed-node-groups-tfvars/eks.tf +++ b/examples/node-groups/managed-node-groups-tfvars/eks.tf @@ -3,7 +3,7 @@ #------------------------------------------------------------------------ module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.12.0" + version = "~> 3.0" name = local.vpc_name cidr = var.vpc_cidr @@ -30,7 +30,7 @@ module "aws_vpc" { #------------------------------------------------------------------------ # AWS EKS Blueprints Module #------------------------------------------------------------------------ -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." # EKS Cluster VPC and Subnet mandatory config diff --git a/examples/node-groups/managed-node-groups-tfvars/main.tf b/examples/node-groups/managed-node-groups-tfvars/main.tf index 26e753eb6f..5f1394f0d6 100644 --- a/examples/node-groups/managed-node-groups-tfvars/main.tf +++ b/examples/node-groups/managed-node-groups-tfvars/main.tf @@ -1,51 +1,35 @@ -#------------------------------------------------------------------------ -# Terraform Provider Versions -#------------------------------------------------------------------------ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 4.4.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } +provider "aws" { + region = local.region } -#------------------------------------------------------------------------ -# Terraform Providers -#------------------------------------------------------------------------ -provider "aws" {} - provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + #------------------------------------------------------------------------ # Local Variables #------------------------------------------------------------------------ @@ -54,9 +38,5 @@ locals { azs = slice(data.aws_availability_zones.available.names, 0, local.count_availability_zone) vpc_name = join("-", [var.tenant, var.environment, var.zone, "vpc"]) cluster_name = join("-", [var.tenant, var.environment, var.zone, "eks"]) -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + region = "us-west-2" } diff --git a/examples/node-groups/managed-node-groups-tfvars/outputs.tf b/examples/node-groups/managed-node-groups-tfvars/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/node-groups/managed-node-groups-tfvars/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/node-groups/managed-node-groups-tfvars/versions.tf b/examples/node-groups/managed-node-groups-tfvars/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/node-groups/managed-node-groups-tfvars/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/node-groups/managed-node-groups/README.md b/examples/node-groups/managed-node-groups/README.md index 0712e5ce5f..d736ea43c3 100644 --- a/examples/node-groups/managed-node-groups/README.md +++ b/examples/node-groups/managed-node-groups/README.md @@ -1,24 +1,31 @@ # EKS Cluster with Managed Node Group + This example deploys a new EKS Cluster with a Managed node group into a new VPC. - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets - - Creates an EKS Cluster Control plane with Managed node groups + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets +- Creates an EKS Cluster Control plane with Managed node groups ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -26,7 +33,8 @@ cd examples/node-groups/managed-node-groups/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -34,7 +42,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -44,55 +53,58 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script cd examples/node-groups/managed-node-groups terraform destroy --auto-approve ``` + --- - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources @@ -100,15 +112,11 @@ terraform destroy --auto-approve |------|------| | [aws_ami.amazonlinux2eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"managed"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -118,5 +126,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/node-groups/managed-node-groups/main.tf b/examples/node-groups/managed-node-groups/main.tf index 10edd83b98..ebf7f43261 100644 --- a/examples/node-groups/managed-node-groups/main.tf +++ b/examples/node-groups/managed-node-groups/main.tf @@ -1,72 +1,43 @@ -#------------------------------------------------------------------------ -# Terraform Provider Versions -#------------------------------------------------------------------------ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } +provider "aws" { + region = local.region } -#------------------------------------------------------------------------ -# Terraform Providers -#------------------------------------------------------------------------ -provider "aws" {} - provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } -#------------------------------------------------------------------------ -# Data Resources -#------------------------------------------------------------------------ -data "aws_region" "current" {} - data "aws_availability_zones" "available" {} -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - data "aws_ami" "amazonlinux2eks" { most_recent = true + filter { name = "name" - values = [local.amazonlinux2eks] + values = ["amazon-eks-node-${local.cluster_version}-*"] } + owners = ["amazon"] } @@ -74,17 +45,17 @@ data "aws_ami" "amazonlinux2eks" { # Local Variables #------------------------------------------------------------------------ locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Evironment with in one sub_tenant or business unit + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) count_availability_zone = (length(data.aws_availability_zones.available.names) <= 3) ? length(data.aws_availability_zones.available.zone_ids) : 3 azs = slice(data.aws_availability_zones.available.names, 0, local.count_availability_zone) cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - amazonlinux2eks = "amazon-eks-node-${var.cluster_version}-*" + cluster_version = "1.21" terraform_version = "Terraform v1.0.1" } @@ -94,7 +65,7 @@ locals { #------------------------------------------------------------------------ module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -122,7 +93,7 @@ module "aws_vpc" { #------------------------------------------------------------------------ # AWS EKS Blueprints Module #------------------------------------------------------------------------ -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -301,17 +272,12 @@ module "eks-blueprints" { #------------------------------------------------------------------------ # Kubernetes Add-on Module #------------------------------------------------------------------------ -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id enable_metrics_server = true enable_cluster_autoscaler = true } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl -} diff --git a/examples/node-groups/managed-node-groups/outputs.tf b/examples/node-groups/managed-node-groups/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/node-groups/managed-node-groups/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/node-groups/managed-node-groups/variables.tf b/examples/node-groups/managed-node-groups/variables.tf index 9261d5699f..87d1355214 100644 --- a/examples/node-groups/managed-node-groups/variables.tf +++ b/examples/node-groups/managed-node-groups/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/node-groups/managed-node-groups/versions.tf b/examples/node-groups/managed-node-groups/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/node-groups/managed-node-groups/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/node-groups/self-managed-node-groups/README.md b/examples/node-groups/self-managed-node-groups/README.md index 3ce4ffd8ea..21b1a74452 100644 --- a/examples/node-groups/self-managed-node-groups/README.md +++ b/examples/node-groups/self-managed-node-groups/README.md @@ -1,24 +1,31 @@ # EKS Cluster with Self-managed Node Group + This example deploys a new EKS Cluster with a [self-managed node group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) into a new VPC. - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets - - Creates an EKS Cluster Control plane with a self-managed node group + +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets +- Creates an EKS Cluster Control plane with a self-managed node group ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -26,7 +33,8 @@ cd examples/node-groups/self-managed-node-groups/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -34,7 +42,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -44,28 +53,30 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `kube-system` namespace +#### Step 7: List all the pods running in `kube-system` namespace $ kubectl get pods -n kube-system -#### Step8: List the auto scaling group created for the self-managed node group +#### Step 8: List the auto scaling group created for the self-managed node group $ aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names aws001-preprod-dev-eks-self-managed-ondemand ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -73,43 +84,39 @@ cd examples/node-groups/self-managed-node-groups terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -119,5 +126,4 @@ terraform destroy --auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/node-groups/self-managed-node-groups/main.tf b/examples/node-groups/self-managed-node-groups/main.tf index 11a996891a..f2565af0f5 100644 --- a/examples/node-groups/self-managed-node-groups/main.tf +++ b/examples/node-groups/self-managed-node-groups/main.tf @@ -1,57 +1,26 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } +data "aws_availability_zones" "available" {} + locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -63,7 +32,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -88,9 +57,9 @@ module "aws_vpc" { } } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -103,7 +72,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" self_managed_node_groups = { #---------------------------------------------------------# @@ -172,10 +141,5 @@ module "eks-blueprints" { subnet_type = "private" } }, - } # END OF SELF MANAGED NODE GROUPS -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + } } diff --git a/examples/node-groups/self-managed-node-groups/outputs.tf b/examples/node-groups/self-managed-node-groups/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/node-groups/self-managed-node-groups/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/node-groups/self-managed-node-groups/variables.tf b/examples/node-groups/self-managed-node-groups/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/node-groups/self-managed-node-groups/variables.tf +++ b/examples/node-groups/self-managed-node-groups/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/node-groups/self-managed-node-groups/versions.tf b/examples/node-groups/self-managed-node-groups/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/node-groups/self-managed-node-groups/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/node-groups/windows-node-groups/README.md b/examples/node-groups/windows-node-groups/README.md index 36ad299086..986b07b5d5 100644 --- a/examples/node-groups/windows-node-groups/README.md +++ b/examples/node-groups/windows-node-groups/README.md @@ -1,26 +1,32 @@ # EKS Cluster with Windows support This example deploys the following AWS resources. - - A new VPC, 3 AZs with private and public subnets - - Necessary VPC endpoints for node groups in private subnets - - An Internet gateway for the VPC and a NAT gateway in each public subnet - - An EKS cluster with an AWS-managed node group of spot Linux worker nodes and a self-managed node group of on-demand Windows worker nodes + +- A new VPC, 3 AZs with private and public subnets +- Necessary VPC endpoints for node groups in private subnets +- An Internet gateway for the VPC and a NAT gateway in each public subnet +- An EKS cluster with an AWS-managed node group of spot Linux worker nodes and a self-managed node group of on-demand Windows worker nodes # How to deploy + ## Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run `terraform plan` and `terraform apply` + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ## Deployment steps -### Step1: Clone the repo using the command below + +### Step 1: Clone the repo using the command below ```bash git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` -### Step2: Run `terraform init` +### Step 2: Run `terraform init` + to initialize a working directory with configuration files ```bash @@ -28,7 +34,8 @@ cd examples/node-groups/windows-node-groups terraform init ``` -### Step3: Run `terraform plan` +### Step 3: Run `terraform plan` + to verify the resources created by this execution ```bash @@ -38,7 +45,8 @@ terraform plan If you want to use a region other than `us-west-2`, update the `aws_region` name and `aws_availability_zones` filter in the data sources in [main.tf](./main.tf) accordingly. -### Step4: Run `terraform apply` +### Step 4: Run `terraform apply` + to create resources ```bash @@ -46,18 +54,21 @@ terraform apply -auto-approve ``` ## Configure kubectl and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -### Step5: Run `update-kubeconfig` command. +### Step 5: Run `update-kubeconfig` command. `~/.kube/config` file gets updated with EKS cluster context from the below command. Replace the region name and EKS cluster name with your cluster's name. (If you did not change the `tenant`, `environment`, and `zone` values in this example, the EKS cluster name will be `aws001-preprod-dev-eks`.) $ aws eks --region us-west-2 update-kubeconfig --name aws001-preprod-dev-eks -### Step6: (Optional) Deploy sample Windows and Linux workloads to verify support for both operating systems +### Step 6: (Optional) Deploy sample Windows and Linux workloads to verify support for both operating systems + When Windows support is enabled in the cluster, it is necessary to use one of the ways to assign pods to specific nodes, such as `nodeSelector` or `affinity`. See the [K8s documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more info. This example uses `nodeSelector`s to select nodes with appropriate OS for pods. #### Sample Windows deployment + ```bash cd examples/node-groups/windows-node-groups @@ -72,6 +83,7 @@ watch -n 1 "kubectl get po -n windows" # When the pod starts running, create a proxy to the K8s API kubectl proxy ``` + Now visit [http://127.0.0.1:8001/api/v1/namespaces/windows/services/aspnet/proxy/demo](http://127.0.0.1:8001/api/v1/namespaces/windows/services/aspnet/proxy/demo) in your browser. If everything went well, the page should display text "Hello, World!". Use Ctrl+C in your terminal to stop the `kubectl` proxy. Note: The `aspnet` service created by above example is a `LoadBalancer` service, so you can also visit the Network Load Balancer (NLB) endpoint in your browser instead of using `kubectl proxy` as mentioned above. To be able to access the NLB endpoint, update the security group attached to the Windows node where the `aspnet` pod is running to allow inbound access to port 80 from your IP address. You can grab the NLB endpoint from the service using the following command: @@ -81,6 +93,7 @@ kubectl get svc -n windows -o jsonpath="{.items[0].status.loadBalancer.ingress[0 ``` #### Sample Linux deployment + ```bash # Sample Linux deployment kubectl apply -f ./k8s/linux-nginx.yaml @@ -91,7 +104,7 @@ kubectl apply -f ./k8s/linux-nginx.yaml ```bash cd examples/node-groups/windows-node-groups -# If you deployed sample Windows & Linux workloads from Step6 +# If you deployed sample Windows & Linux workloads from Step 6 kubectl delete svc,deploy -n windows --all kubectl delete svc,deploy -n linux --all @@ -100,46 +113,43 @@ terraform destroy -auto-approve ``` # See also -* [EKS Windows support considerations](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) - +- [EKS Windows support considerations](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_version](#input\_cluster\_version) | Kubernetes Version | `string` | `"1.21"` | no | | [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"aws001"` | no | | [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | @@ -149,5 +159,4 @@ terraform destroy -auto-approve | Name | Description | |------|-------------| | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | - - + diff --git a/examples/node-groups/windows-node-groups/main.tf b/examples/node-groups/windows-node-groups/main.tf index f1aee68af8..21a31751c3 100644 --- a/examples/node-groups/windows-node-groups/main.tf +++ b/examples/node-groups/windows-node-groups/main.tf @@ -1,78 +1,40 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" { - # Change this per your need - name = "us-west-2" -} - -data "aws_availability_zones" "available" { - state = "available" - # Specify AZs to avoid EKS cluster creation error due to reduced capacity in an AZ. - # Change the AZ names per capacity available in the region you selected. - # https://aws.amazon.com/premiumsupport/knowledge-center/eks-cluster-creation-errors/ - filter { - name = "zone-name" - values = ["us-west-2a", "us-west-2b", "us-west-2c"] - } -} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} + locals { tenant = var.tenant # AWS account name or unique id for tenant environment = var.environment # Environment area eg., preprod or prod zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = var.cluster_version + region = "us-west-2" vpc_cidr = "10.1.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -84,7 +46,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -110,9 +72,9 @@ module "aws_vpc" { } #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -125,7 +87,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + cluster_version = "1.21" # EKS MANAGED NODE GROUP # with Spot instances @@ -154,10 +116,10 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons enable_amazon_eks_coredns = true @@ -195,10 +157,5 @@ module "eks-blueprints-kubernetes-addons" { ] } - depends_on = [module.eks-blueprints.managed_node_groups] -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + depends_on = [module.eks_blueprints.managed_node_groups] } diff --git a/examples/node-groups/windows-node-groups/outputs.tf b/examples/node-groups/windows-node-groups/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/node-groups/windows-node-groups/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/node-groups/windows-node-groups/variables.tf b/examples/node-groups/windows-node-groups/variables.tf index d2c4833cb0..adb3fd6e26 100644 --- a/examples/node-groups/windows-node-groups/variables.tf +++ b/examples/node-groups/windows-node-groups/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" diff --git a/examples/node-groups/windows-node-groups/versions.tf b/examples/node-groups/windows-node-groups/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/node-groups/windows-node-groups/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/README.md b/examples/observability/eks-cluster-with-amp-amg-opensearch/README.md index 4ffbc9f26b..3d6a4b1979 100644 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/README.md +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/README.md @@ -6,16 +6,14 @@ Prometheus server collects these metrics and writes to remote Amazon Managed Pro AWS FluentBit Addon is configured to collect the container logs from EKS Cluster nodes and write to Amazon Open Search service. ---- **NOTE** For the sake of simplicity in this example, we store sensitive information and credentials in `dev.tfvars`. This should not be done in a production environment. Instead, use an external secret store such as AWS Secrets Manager and use the [aws_secretsmanager_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) data source to retrieve them. ---- - ## How to Deploy ### Prerequisites + - Terraform - An AWS Account - kubectl @@ -25,39 +23,50 @@ For the sake of simplicity in this example, we store sensitive information and c - As of this writing (February 3, 2022), the AWS Terraform Provider does not support Amazon Managed Grafana, so it must be manually created beforehand. Follow the instructions [here](https://docs.aws.amazon.com/grafana/latest/userguide/getting-started-with-AMG.html) to deploy an Amazon Managed Grafana workspace. #### Generate a Grafana API Key + - Give admin access to the SSO user you set up when creating the Amazon Managed Grafana Workspace: - - In the AWS Console, navigate to Amazon Grafana. In the left navigation bar, click __All workspaces__, then click on the workspace name you are using for this example. - - Under __Authentication__ within __AWS Single Sign-On (SSO)__, click __Configure users and user groups__ - - Check the box next to the SSO user you created and click __Make admin__ + - In the AWS Console, navigate to Amazon Grafana. In the left navigation bar, click **All workspaces**, then click on the workspace name you are using for this example. + - Under **Authentication** within **AWS Single Sign-On (SSO)**, click **Configure users and user groups** + - Check the box next to the SSO user you created and click **Make admin** - Navigate back to the Grafana Dashboard. If you don't see the gear icon in the left navigation bar, log out and log back in. -- Click on the gear icon, then click on the __API keys__ tab. -- Click __Add API key__, fill in the _Key name_ field and select _Admin_ as the Role. +- Click on the gear icon, then click on the **API keys** tab. +- Click **Add API key**, fill in the _Key name_ field and select _Admin_ as the Role. - Copy your API key into `dev.tfvars` under `grafana_api_key` ### Deployment Steps + - Clone this repository: + ``` git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git ``` + - Initialize a working directory + ``` cd examples/observability/eks-cluster-with-observability terraform init ``` + - Fill-in the values for the variables in `dev.tfvars` - The password for OpenSearch must be a minimum of eight characters with at least one uppercase, one lowercase, one digit, and one special character. - If the `AWSServiceRoleForAmazonElasticsearchService` role already exists in your account, set `create_iam_service_linked_role = false`. - Verify the resources created by this execution: + ``` export AWS_REGION= # Select your own region terraform validate terraform plan -var-file=dev.tfvars ``` + - Deploy resources: - ``` - terraform apply -var-file=dev.tfvars --auto-approve - ``` + +``` +terraform apply -var-file=dev.tfvars --auto-approve +``` + - Add the cluster to your kubeconfig: + ``` aws eks --region $AWS_REGION update-kubeconfig --name aws001-preprod-observability-eks ``` @@ -65,6 +74,7 @@ aws eks --region $AWS_REGION update-kubeconfig --name aws001-preprod-observabili `terraform apply` will provision a new EKS cluster with Fluent Bit, Prometheus, and a sample workload. It will also provision Amazon Managed Prometheus to ingest metrics from Prometheus, an Amazon OpenSearch service domain for ingesting logs from Fluent Bit, and a bastion host so we can test OpenSearch. --- + **NOTE** This example automatically generates a key-pair for you and saves the private key to your current directory to make the next steps simpler. In production workloads, it is best practice to use your own key-pair instead of using Terraform to generate one for you. @@ -76,41 +86,51 @@ This example automatically generates a key-pair for you and saves the private ke - Check that the bastion host we use to test OpenSearch is running in the EC2 Console. - Check that the status of OpenSearch is green: -Navigate to Amazon OpenSearch in the AWS Console and select the __opensearch__ domain. Verify that *Cluster Health* under *General Information* lists Green. + Navigate to Amazon OpenSearch in the AWS Console and select the **opensearch** domain. Verify that _Cluster Health_ under _General Information_ lists Green. - Verify that Amazon Managed Prometheus workspace was created successfully: + - Check the status of Amazon Managed Prometheus workspace through the AWS console. - Check that Prometheus Server is healthy: + - The following command gets the pod that is running the Prometheus server and sets up port fowarding to http://localhost:8080 + ``` kubectl port-forward $(kubectl get pods --namespace=prometheus --selector='component=server' --output=name) 8080:9090 -n prometheus ``` + - Navigate to http://localhost:8080 and confirm that the dashboard webpage loads. - Press `CTRL+C` to stop port forwarding. - To check that Fluent Bit is working: - - Fluent Bit is provisioned properly if you see the option to add an index pattern while following the steps for the section below named __Set up an Index Pattern in OpenSearch to Explore Log Data__ + + - Fluent Bit is provisioned properly if you see the option to add an index pattern while following the steps for the section below named **Set up an Index Pattern in OpenSearch to Explore Log Data** - Check that the sample workload is running: - Run the command below, then navigate to http://localhost:4040 and confirm the webpage loads. + ``` kubectl port-forward svc/guestbook-ui -n team-riker 4040:80 ``` #### Map the Fluent Bit Role as a Backend Role in OpenSearch -OpenSearch roles are the core method for controlling access within your OpenSearch cluster. Backend roles are a method for mapping an external identity (such as an IAM role) to an OpenSearch role. Mapping the external identity to an OpenSearch role allows that identity to gain the permissions of that role. Here we map the Fluent Bit IAM role as a backend role to OpenSearch's *all_access* role. This gives the Fluent Bit IAM role permission to send logs to OpenSearch. Read more about OpenSearch roles [here](https://opensearch.org/docs/latest/security-plugin/access-control/users-roles/). + +OpenSearch roles are the core method for controlling access within your OpenSearch cluster. Backend roles are a method for mapping an external identity (such as an IAM role) to an OpenSearch role. Mapping the external identity to an OpenSearch role allows that identity to gain the permissions of that role. Here we map the Fluent Bit IAM role as a backend role to OpenSearch's _all_access_ role. This gives the Fluent Bit IAM role permission to send logs to OpenSearch. Read more about OpenSearch roles [here](https://opensearch.org/docs/latest/security-plugin/access-control/users-roles/). - In a different terminal window, navigate back to the example directory and establish and SSH tunnel from https://localhost:9200 to your OpenSearch Service domain through the bastion host: - Because we provisioned OpenSearch within our VPC, we connect to a bastion host with an SSH tunnel to test and access our OpenSearch endpoints. Refer to the [Amazon OpenSearch Developer Guide](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html#vpc-test) for more information. - For connecting with additional access control using Amazon Cognito, see this [page](https://aws.amazon.com/premiumsupport/knowledge-center/opensearch-outside-vpc-ssh/). + ``` export PRIVATE_KEY_FILE=bastion_host_private_key.pem export BASTION_HOST_IP=$(terraform output -raw bastion_host_public_ip) export OS_VPC_ENDPOINT=$(terraform output -raw opensearch_vpc_endpoint) ssh -i $PRIVATE_KEY_FILE ec2-user@$BASTION_HOST_IP -N -L "9200:${OS_VPC_ENDPOINT}:443" ``` + - Back in your first terminal window: + ``` export BASTION_HOST_IP=$(terraform output -raw bastion_host_public_ip) export OS_DOMAIN_USER=$(terraform output -raw opensearch_user) @@ -135,53 +155,57 @@ curl --insecure -sS -u "${OS_DOMAIN_USER}:${OS_DOMAIN_PASSWORD}" \ You must set up an index pattern before you can explore data in the OpenSearch Dashboards. An index pattern selects which data to use. Read more about index patterns [here](https://www.elastic.co/guide/en/kibana/current/index-patterns.html). - Follow the steps outlined in **Configure the SOCKS proxy** and **Create the SSH tunnel** sections of this [Knowledge Center](https://aws.amazon.com/premiumsupport/knowledge-center/opensearch-outside-vpc-ssh/) article to establish a SOCKS5 tunnel from localhost to OpenSearch via the bastion host. -- Log into the AWS console, navigate to Amazon OpenSearch Service, click on the "opensearch" domain and click on the link under __OpenSearch Dashboards URL__ to access the OpenSearch Dashboards. +- Log into the AWS console, navigate to Amazon OpenSearch Service, click on the "opensearch" domain and click on the link under **OpenSearch Dashboards URL** to access the OpenSearch Dashboards. - Log into the OpenSearch Dashboards with the credentials you set in `dev.tfvars` -- From the OpenSearch Dashboards Welcome screen select __Explore on my own__ -- On _Select your tenant_ screen, select Private and click __Confirm__ +- From the OpenSearch Dashboards Welcome screen select **Explore on my own** +- On _Select your tenant_ screen, select Private and click **Confirm** - On the next screen click on the _OpenSearch Dashboards_ tile -- Click __Add your data__ -- Click __Create index Pattern__ -- Add __\*fluent-bit\*__ as the Index pattern and click __Next step__ -- Select __@timestamp__ as the Time filter field name and close the Configuration window by clicking on __Create index pattern__ -- Select __Discover__ from the left panel and start exploring the logs +- Click **Add your data** +- Click **Create index Pattern** +- Add **\*fluent-bit\*** as the Index pattern and click **Next step** +- Select **@timestamp** as the Time filter field name and close the Configuration window by clicking on **Create index pattern** +- Select **Discover** from the left panel and start exploring the logs ## Cleanup - - Run `terraform destroy -var-file=dev.tfvars` to remove all resources except for your Amazon Managed Grafana workspace. - - Delete your Amazon Managed Grafana workspace through the AWS console. - - Delete the private key file: `bastion_host_private_key.pem`. + +- Run `terraform destroy -var-file=dev.tfvars` to remove all resources except for your Amazon Managed Grafana workspace. +- Delete your Amazon Managed Grafana workspace through the AWS console. +- Delete the private key file: `bastion_host_private_key.pem`. ## Troubleshooting - - When running `terraform apply` or `terraform destroy`, the process will sometimes time-out. If that happens, run the command again and the operation will continue where it left off. - - If your connection times out when trying to establish an SSH tunnel with the bastion host, check that you are disconnected from any VPNs. - +- When running `terraform apply` or `terraform destroy`, the process will sometimes time-out. If that happens, run the command again and the operation will continue where it left off. +- If your connection times out when trying to establish an SSH tunnel with the bastion host, check that you are disconnected from any VPNs. + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.73.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [grafana](#requirement\_grafana) | >= 1.13.3 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [local](#requirement\_local) | >= 2.1 | +| [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.73.0 | +| [aws](#provider\_aws) | >= 3.72 | | [grafana](#provider\_grafana) | >= 1.13.3 | -| [local](#provider\_local) | n/a | -| [tls](#provider\_tls) | n/a | +| [local](#provider\_local) | >= 2.1 | +| [tls](#provider\_tls) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.11.3 | -| [eks-blueprints](#module\_eks-blueprints) | ../../.. | n/a | -| [eks-blueprints-kubernetes-addons](#module\_eks-blueprints-kubernetes-addons) | ../../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../../modules/kubernetes-addons | n/a | ## Resources @@ -189,7 +213,7 @@ You must set up an index pattern before you can explore data in the OpenSearch D |------|------| | [aws_elasticsearch_domain.opensearch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain) | resource | | [aws_elasticsearch_domain_policy.opensearch_access_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain_policy) | resource | -| [aws_iam_policy.fluentbit-opensearch-access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.fluentbit_opensearch_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_service_linked_role.opensearch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource | | [aws_instance.bastion_host](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | | [aws_key_pair.bastion_host_key_pair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | @@ -199,12 +223,8 @@ You must set up an index pattern before you can explore data in the OpenSearch D | [tls_private_key.bastion_host_private_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.amazon_linux_2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_elasticsearch_domain.opensearch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/elasticsearch_domain) | data source | -| [aws_iam_policy_document.fluentbit-opensearch-access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.fluentbit_opensearch_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.opensearch_access_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs @@ -212,19 +232,18 @@ You must set up an index pattern before you can explore data in the OpenSearch D |------|-------------|------|---------|:--------:| | [create\_iam\_service\_linked\_role](#input\_create\_iam\_service\_linked\_role) | Whether to create the AWSServiceRoleForAmazonElasticsearchService role used by the OpenSearch service | `bool` | `true` | no | | [grafana\_api\_key](#input\_grafana\_api\_key) | Api key for authorizing the Grafana provider to make changes to Amazon Managed Grafana | `string` | n/a | yes | -| [grafana\_endpoint](#input\_grafana\_endpoint) | n/a | `string` | n/a | yes | +| [grafana\_endpoint](#input\_grafana\_endpoint) | Grafana endpoint | `string` | n/a | yes | | [local\_computer\_ip](#input\_local\_computer\_ip) | IP Address of the computer you are running and testing this example from | `string` | n/a | yes | -| [opensearch\_dashboard\_pw](#input\_opensearch\_dashboard\_pw) | n/a | `string` | n/a | yes | -| [opensearch\_dashboard\_user](#input\_opensearch\_dashboard\_user) | n/a | `string` | n/a | yes | +| [opensearch\_dashboard\_pw](#input\_opensearch\_dashboard\_pw) | OpenSearch dashboard user password | `string` | n/a | yes | +| [opensearch\_dashboard\_user](#input\_opensearch\_dashboard\_user) | OpenSearch dashboard user | `string` | n/a | yes | ## Outputs | Name | Description | |------|-------------| -| [bastion\_host\_public\_ip](#output\_bastion\_host\_public\_ip) | n/a | +| [bastion\_host\_public\_ip](#output\_bastion\_host\_public\_ip) | Public IP address of the bastion host | | [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | | [opensearch\_pw](#output\_opensearch\_pw) | Amazon OpenSearch Service Domain password | | [opensearch\_user](#output\_opensearch\_user) | Amazon OpenSearch Service Domain username | | [opensearch\_vpc\_endpoint](#output\_opensearch\_vpc\_endpoint) | Amazon OpenSearch Service Domain-specific endpoint | - - + diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/data.tf b/examples/observability/eks-cluster-with-amp-amg-opensearch/data.tf index f81206b15c..cd0d95a720 100644 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/data.tf +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/data.tf @@ -1,16 +1,4 @@ -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_iam_policy_document" "fluentbit-opensearch-access" { +data "aws_iam_policy_document" "fluentbit_opensearch_access" { statement { sid = "OpenSearchAccess" effect = "Allow" @@ -40,7 +28,3 @@ data "aws_ami" "amazon_linux_2" { values = ["amzn2-ami-hvm*"] } } - -data "aws_elasticsearch_domain" "opensearch" { - domain_name = aws_elasticsearch_domain.opensearch.domain_name -} diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/dev.tfvars b/examples/observability/eks-cluster-with-amp-amg-opensearch/dev.tfvars index a4eafa6e55..9b84526b3d 100644 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/dev.tfvars +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/dev.tfvars @@ -1,6 +1,5 @@ -grafana_endpoint = "" -grafana_api_key = "" -opensearch_dashboard_user = "" -opensearch_dashboard_pw = "" # Password must be a minimum of eight characters with at least one uppercase, one lowercase, one digit, and one special character -local_computer_ip = "" # We configure a route table and security group that allows your computer to access an EC2 instance. -create_iam_service_linked_role = true # set this to false if yor account already has the AWSServiceRoleForAmazonElasticsearchService role created +grafana_endpoint = "" +grafana_api_key = "" +opensearch_dashboard_user = "" +opensearch_dashboard_pw = "" # Password must be a minimum of eight characters with at least one uppercase, one lowercase, one digit, and one special character +local_computer_ip = "" # We configure a route table and security group that allows your computer to access an EC2 instance. # set this to false if yor account already has the AWSServiceRoleForAmazonElasticsearchService role created diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/main.tf b/examples/observability/eks-cluster-with-amp-amg-opensearch/main.tf index dba33b746c..6da558a3c7 100644 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/main.tf +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/main.tf @@ -1,36 +1,45 @@ -terraform { - required_version = ">= 1.0.1" +provider "aws" { + region = local.region +} - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.73.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.7.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - grafana = { - source = "grafana/grafana" - version = ">= 1.13.3" - } +provider "kubernetes" { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } +} - backend "local" { - path = "local_tf_state/terraform-main.tfstate" +provider "helm" { + kubernetes { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +provider "grafana" { + url = var.grafana_endpoint + auth = var.grafana_api_key +} + +data "aws_availability_zones" "available" {} + locals { tenant = "aws001" # AWS account name or unique id for tenant environment = "preprod" # Environment area eg., preprod or prod zone = "observability" # Environment within one sub_tenant or business unit - - cluster_version = "1.21" + region = "us-west-2" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) @@ -45,8 +54,6 @@ locals { repo_url = "https://github.com/aws-samples/eks-blueprints-workloads.git" add_on_application = false } - - aws_iam_instance_profile_name = "bastion_host_profile" } #--------------------------------------------------------------- @@ -54,7 +61,7 @@ locals { #--------------------------------------------------------------- module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.11.3" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -82,7 +89,7 @@ module "aws_vpc" { #--------------------------------------------------------------- # Provision EKS and Helm Charts #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../../.." tenant = local.tenant @@ -95,7 +102,7 @@ module "eks-blueprints" { private_subnet_ids = module.aws_vpc.private_subnets # EKS Control Plane Variables - cluster_version = local.cluster_version + cluster_version = "1.21" managed_node_groups = { mg_4 = { @@ -110,9 +117,9 @@ module "eks-blueprints" { enable_amazon_prometheus = true } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id #K8s Add-ons enable_metrics_server = true @@ -124,10 +131,10 @@ module "eks-blueprints-kubernetes-addons" { # Fluentbit enable_aws_for_fluentbit = true - aws_for_fluentbit_irsa_policies = [aws_iam_policy.fluentbit-opensearch-access.arn] + aws_for_fluentbit_irsa_policies = [aws_iam_policy.fluentbit_opensearch_access.arn] aws_for_fluentbit_helm_config = { values = [templatefile("${path.module}/helm_values/aws-for-fluentbit-values.yaml", { - aws_region = data.aws_region.current.name, + aws_region = local.region host = aws_elasticsearch_domain.opensearch.endpoint })] } @@ -135,10 +142,10 @@ module "eks-blueprints-kubernetes-addons" { # Prometheus and Amazon Managed Prometheus integration enable_prometheus = true enable_amazon_prometheus = true - amazon_prometheus_workspace_endpoint = module.eks-blueprints.amazon_prometheus_workspace_endpoint + amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint depends_on = [ - module.eks-blueprints.managed_node_groups, + module.eks_blueprints.managed_node_groups, module.aws_vpc ] } @@ -150,12 +157,13 @@ resource "grafana_data_source" "prometheus" { type = "prometheus" name = "amp" is_default = true - url = module.eks-blueprints.amazon_prometheus_workspace_endpoint + url = module.eks_blueprints.amazon_prometheus_workspace_endpoint + json_data { http_method = "POST" sigv4_auth = true sigv4_auth_type = "workspace-iam-role" - sigv4_region = data.aws_region.current.name + sigv4_region = local.region } } @@ -213,10 +221,10 @@ resource "aws_iam_service_linked_role" "opensearch" { aws_service_name = "es.amazonaws.com" } -resource "aws_iam_policy" "fluentbit-opensearch-access" { - name = "fluentbit-opensearch-access" +resource "aws_iam_policy" "fluentbit_opensearch_access" { + name = "fluentbit_opensearch_access" description = "IAM policy to allow Fluentbit access to OpenSearch" - policy = data.aws_iam_policy_document.fluentbit-opensearch-access.json + policy = data.aws_iam_policy_document.fluentbit_opensearch_access.json } resource "aws_elasticsearch_domain_policy" "opensearch_access_policy" { diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/outputs.tf b/examples/observability/eks-cluster-with-amp-amg-opensearch/outputs.tf index 3e10d40dc0..64366b2224 100644 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/outputs.tf +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/outputs.tf @@ -15,10 +15,11 @@ output "opensearch_vpc_endpoint" { } output "bastion_host_public_ip" { - value = aws_instance.bastion_host.public_ip + description = "Public IP address of the bastion host" + value = aws_instance.bastion_host.public_ip } output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl + value = module.eks_blueprints.configure_kubectl } diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/providers.tf b/examples/observability/eks-cluster-with-amp-amg-opensearch/providers.tf deleted file mode 100644 index 1c06cb829f..0000000000 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/providers.tf +++ /dev/null @@ -1,23 +0,0 @@ -provider "aws" {} - -provider "kubernetes" { - experiments { - manifest_resource = true - } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token -} - -provider "helm" { - kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - } -} - -provider "grafana" { - url = var.grafana_endpoint - auth = var.grafana_api_key -} diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/variables.tf b/examples/observability/eks-cluster-with-amp-amg-opensearch/variables.tf index 80bb2e433e..fd578d0f85 100644 --- a/examples/observability/eks-cluster-with-amp-amg-opensearch/variables.tf +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/variables.tf @@ -1,29 +1,32 @@ variable "grafana_endpoint" { - type = string + description = "Grafana endpoint" + type = string } variable "grafana_api_key" { + description = "Api key for authorizing the Grafana provider to make changes to Amazon Managed Grafana" type = string sensitive = true - description = "Api key for authorizing the Grafana provider to make changes to Amazon Managed Grafana" } variable "opensearch_dashboard_user" { - type = string + description = "OpenSearch dashboard user" + type = string } variable "opensearch_dashboard_pw" { - type = string - sensitive = true + description = "OpenSearch dashboard user password" + type = string + sensitive = true } variable "local_computer_ip" { - type = string description = "IP Address of the computer you are running and testing this example from" + type = string } variable "create_iam_service_linked_role" { - type = bool description = "Whether to create the AWSServiceRoleForAmazonElasticsearchService role used by the OpenSearch service" + type = bool default = true } diff --git a/examples/observability/eks-cluster-with-amp-amg-opensearch/versions.tf b/examples/observability/eks-cluster-with-amp-amg-opensearch/versions.tf new file mode 100644 index 0000000000..d3d795c4be --- /dev/null +++ b/examples/observability/eks-cluster-with-amp-amg-opensearch/versions.tf @@ -0,0 +1,34 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + grafana = { + source = "grafana/grafana" + version = ">= 1.13.3" + } + tls = { + source = "hashicorp/tls" + version = ">= 3.0" + } + local = { + source = "hashicorp/local" + version = ">= 2.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/examples/tls-with-aws-pca-issuer/README.md b/examples/tls-with-aws-pca-issuer/README.md index da12c8b16b..d20e053e28 100644 --- a/examples/tls-with-aws-pca-issuer/README.md +++ b/examples/tls-with-aws-pca-issuer/README.md @@ -1,28 +1,35 @@ # TLS with AWS PCA Issuer -This example deploys the following + +This example deploys the following + - Basic EKS Cluster with VPC - - Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets - - Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - - Enables cert-manager module - - Enables aws-privateca-issuer module +- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets +- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets +- Enables cert-manager module +- Enables aws-privateca-issuer module - Creates AWS Certificate Manager Private Certificate Authority, enables and activates it - Creates the CRDs to fetch `tls.crt`, `tls.key` and `ca.crt` , which will be available as Kubernetes Secret. Now you may mount the secret in the application for end to end TLS. ## How to Deploy + ### Prerequisites: + Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply + 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -3. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -4. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) ### Deployment Steps -#### Step1: Clone the repo using the command below + +#### Step 1: Clone the repo using the command below ```shell script git clone https://github.com/aws-samples/aws-eks-accelerator-for-terraform.git ``` -#### Step2: Run Terraform INIT +#### Step 2: Run Terraform INIT + Initialize a working directory with configuration files ```shell script @@ -30,7 +37,8 @@ cd examples/tls-with-aws-pca-issuer/ terraform init ``` -#### Step3: Run Terraform PLAN +#### Step 3: Run Terraform PLAN + Verify the resources created by this execution ```shell script @@ -38,7 +46,8 @@ export AWS_REGION= # Select your own region terraform plan ``` -#### Step4: Finally, Terraform APPLY +#### Step 4: Finally, Terraform APPLY + to create resources ```shell script @@ -48,29 +57,31 @@ terraform apply Enter `yes` to apply ### Configure `kubectl` and test cluster + EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. -#### Step5: Run `update-kubeconfig` command +#### Step 5: Run `update-kubeconfig` command `~/.kube/config` file gets updated with cluster details and certificate from the below command $ aws eks --region update-kubeconfig --name -#### Step6: List all the worker nodes by running the command below +#### Step 6: List all the worker nodes by running the command below $ kubectl get nodes -#### Step7: List all the pods running in `aws-privateca-issuer` and `cert-manager` namespace +#### Step 7: List all the pods running in `aws-privateca-issuer` and `cert-manager` namespace $ kubectl get pods -n aws-privateca-issuer $ kubectl get pods -n cert-manager -#### Step8: View the `Certificate` status. It should be in 'Ready' state. +#### Step 8: View the `Certificate` status. It should be in 'Ready' state. $ kubectl get Certificate ## How to Destroy + The following command destroys the resources created by `terraform apply` ```shell script @@ -78,45 +89,56 @@ cd examples/tls-with-aws-pca-issuer terraform destroy --auto-approve ``` - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.1 | -| [aws](#requirement\_aws) | >= 3.66.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | | [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws-eks-accelerator-for-terraform](#module\_aws-eks-accelerator-for-terraform) | ../.. | n/a | -| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | v3.2.0 | -| [kubernetes-addons](#module\_kubernetes-addons) | ../../modules/kubernetes-addons | n/a | +| [aws\_vpc](#module\_aws\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | +| [eks\_blueprints](#module\_eks\_blueprints) | ../.. | n/a | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | ../../modules/kubernetes-addons | n/a | ## Resources | Name | Type | |------|------| +| [aws_acmpca_certificate.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/acmpca_certificate) | resource | +| [aws_acmpca_certificate_authority.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/acmpca_certificate_authority) | resource | +| [aws_acmpca_certificate_authority_certificate.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/acmpca_certificate_authority_certificate) | resource | +| [kubernetes_manifest.cluster_pca_issuer](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_manifest.example_pca_certificate](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | ## Inputs -No inputs. +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [certificate\_dns](#input\_certificate\_dns) | CommonName used in the Certificate, usually DNS | `string` | `"example.com"` | no | +| [certificate\_name](#input\_certificate\_name) | name for the certificate | `string` | `"example"` | no | +| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | +| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | `"pca001"` | no | +| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | ## Outputs -No outputs. - - \ No newline at end of file +| Name | Description | +|------|-------------| +| [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | + diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf index 32c88988c3..229c679e06 100644 --- a/examples/tls-with-aws-pca-issuer/main.tf +++ b/examples/tls-with-aws-pca-issuer/main.tf @@ -1,65 +1,44 @@ -terraform { - required_version = ">= 1.0.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.6.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks-blueprints.eks_cluster_id -} - provider "aws" { - region = data.aws_region.current.id - alias = "default" + region = local.region } provider "kubernetes" { - experiments { - manifest_resource = true + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] } - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token } provider "helm" { kubernetes { - host = data.aws_eks_cluster.cluster.endpoint - token = data.aws_eks_cluster_auth.cluster.token - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } } } +data "aws_availability_zones" "available" {} +data "aws_partition" "current" {} + locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - cluster_version = var.cluster_version + tenant = var.tenant # AWS account name or unique id for tenant + environment = var.environment # Environment area eg., preprod or prod + zone = var.zone # Environment with in one sub_tenant or business unit + + cluster_version = "1.21" + region = "us-west-2" + certificate_name = var.certificate_name certificate_dns = var.certificate_dns @@ -73,7 +52,7 @@ locals { module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" - version = "v3.2.0" + version = "~> 3.0" name = local.vpc_name cidr = local.vpc_cidr @@ -97,10 +76,11 @@ module "aws_vpc" { "kubernetes.io/role/internal-elb" = "1" } } + #--------------------------------------------------------------- -# Example to consume eks-blueprints module +# Example to consume eks_blueprints module #--------------------------------------------------------------- -module "eks-blueprints" { +module "eks_blueprints" { source = "../.." tenant = local.tenant @@ -126,9 +106,9 @@ module "eks-blueprints" { } } -module "eks-blueprints-kubernetes-addons" { +module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks-blueprints.eks_cluster_id + eks_cluster_id = module.eks_blueprints.eks_cluster_id aws_privateca_acmca_arn = aws_acmpca_certificate_authority.example.arn # EKS Managed Add-ons @@ -140,10 +120,9 @@ module "eks-blueprints-kubernetes-addons" { enable_cert_manager = true enable_aws_privateca_issuer = true - depends_on = [module.eks-blueprints.managed_node_groups] + depends_on = [module.eks_blueprints.managed_node_groups] } - #------------------------------- # This resource creates a AWS Certificate Manager Private Certificate Authority (ACM PCA) #------------------------------- @@ -173,8 +152,8 @@ resource "aws_acmpca_certificate" "example" { } #------------------------------- -# Associates a certificate with an AWS Certificate Manager Private Certificate Authority (ACM PCA Certificate Authority). -# An ACM PCA Certificate Authority is unable to issue certificates until it has a certificate associated with it. +# Associates a certificate with an AWS Certificate Manager Private Certificate Authority (ACM PCA Certificate Authority). +# An ACM PCA Certificate Authority is unable to issue certificates until it has a certificate associated with it. # A root level ACM PCA Certificate Authority is able to self-sign its own root certificate. #------------------------------- @@ -191,28 +170,26 @@ resource "aws_acmpca_certificate_authority" "example" { } } -data "aws_partition" "current" {} - - #------------------------------- # This resource creates a CRD of AWSPCAClusterIssuer Kind, which then represents the ACM PCA in K8 #------------------------------- -resource "kubernetes_manifest" "cluster-pca-issuer" { +resource "kubernetes_manifest" "cluster_pca_issuer" { manifest = { apiVersion = "awspca.cert-manager.io/v1beta1" kind = "AWSPCAClusterIssuer" metadata = { - name = module.eks-blueprints.eks_cluster_id + name = module.eks_blueprints.eks_cluster_id } spec = { arn = aws_acmpca_certificate_authority.example.arn - region : data.aws_region.current.id + region : local.region } } - depends_on = [module.eks-blueprints-kubernetes-addons] + + depends_on = [module.eks_blueprints_kubernetes_addons] } #------------------------------- @@ -236,7 +213,7 @@ resource "kubernetes_manifest" "example_pca_certificate" { issuerRef = { group = "awspca.cert-manager.io" kind = "AWSPCAClusterIssuer" - name : module.eks-blueprints.eks_cluster_id + name : module.eks_blueprints.eks_cluster_id } renewBefore = "360h0m0s" secretName = join("-", [local.certificate_name, "clusterissuer"]) # This is the name with which the K8 Secret will be available @@ -251,12 +228,8 @@ resource "kubernetes_manifest" "example_pca_certificate" { } } - depends_on = [module.eks-blueprints-kubernetes-addons, kubernetes_manifest.cluster-pca-issuer] + depends_on = [ + module.eks_blueprints_kubernetes_addons, + kubernetes_manifest.cluster_pca_issuer, + ] } - - - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks-blueprints.configure_kubectl -} \ No newline at end of file diff --git a/examples/tls-with-aws-pca-issuer/outputs.tf b/examples/tls-with-aws-pca-issuer/outputs.tf new file mode 100644 index 0000000000..55552d3138 --- /dev/null +++ b/examples/tls-with-aws-pca-issuer/outputs.tf @@ -0,0 +1,4 @@ +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = module.eks_blueprints.configure_kubectl +} diff --git a/examples/tls-with-aws-pca-issuer/variables.tf b/examples/tls-with-aws-pca-issuer/variables.tf index 438f6df80d..74fe16c2d3 100644 --- a/examples/tls-with-aws-pca-issuer/variables.tf +++ b/examples/tls-with-aws-pca-issuer/variables.tf @@ -1,9 +1,3 @@ -variable "cluster_version" { - type = string - description = "Kubernetes Version" - default = "1.21" -} - variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" @@ -32,4 +26,4 @@ variable "certificate_dns" { type = string description = "CommonName used in the Certificate, usually DNS " default = "example.com" -} \ No newline at end of file +} diff --git a/examples/tls-with-aws-pca-issuer/versions.tf b/examples/tls-with-aws-pca-issuer/versions.tf new file mode 100644 index 0000000000..df9b71a0f0 --- /dev/null +++ b/examples/tls-with-aws-pca-issuer/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } + + backend "local" { + path = "local_tf_state/terraform-main.tfstate" + } +} diff --git a/modules/aws-eks-fargate-profiles/README.md b/modules/aws-eks-fargate-profiles/README.md index 7b662d709f..7c9730d60a 100644 --- a/modules/aws-eks-fargate-profiles/README.md +++ b/modules/aws-eks-fargate-profiles/README.md @@ -4,18 +4,21 @@ The Fargate profile allows you to declare which pods run on Fargate for Amazon EKS Cluster. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and optional labels. You must define a namespace for every selector. The label field consists of multiple optional key-value pairs -Checkout the usage docs for Fargate Profiles [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/node-groups/) +Checkout the usage docs for Fargate Profiles [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/node-groups/) - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -46,5 +49,4 @@ No modules. |------|-------------| | [eks\_fargate\_profile\_id](#output\_eks\_fargate\_profile\_id) | EKS Cluster name and EKS Fargate Profile name separated by a colon | | [eks\_fargate\_profile\_role\_name](#output\_eks\_fargate\_profile\_role\_name) | Name of the EKS Fargate Profile IAM role | - - + diff --git a/modules/aws-eks-fargate-profiles/versions.tf b/modules/aws-eks-fargate-profiles/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/aws-eks-fargate-profiles/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/aws-eks-managed-node-groups/README.md b/modules/aws-eks-managed-node-groups/README.md index 23719946c6..1a23e1a1e3 100644 --- a/modules/aws-eks-managed-node-groups/README.md +++ b/modules/aws-eks-managed-node-groups/README.md @@ -6,22 +6,26 @@ Amazon EKS Managed Node Groups lets you create, update, scale, and terminate wor This module allows you to create ON-DEMAND, SPOT and BOTTLEROCKET(with custom ami) managed nodegroups. You can instantiate the module once with map of node group values to create multiple node groups. -*NOTE*: - - You can't create managed nodes in an AWS Region where you have AWS Outposts, AWS Wavelength, or AWS Local Zones enabled. - - You can create self-managed nodes in an AWS Region where you have AWS Outposts, AWS Wavelength, or AWS Local Zones enabled +_NOTE_: -Checkout the usage docs for Managed Node groups [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/node-groups/) +- You can't create managed nodes in an AWS Region where you have AWS Outposts, AWS Wavelength, or AWS Local Zones enabled. +- You can create self-managed nodes in an AWS Region where you have AWS Outposts, AWS Wavelength, or AWS Local Zones enabled - +Checkout the usage docs for Managed Node groups [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/node-groups/) + + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -63,5 +67,4 @@ No modules. | [managed\_nodegroup\_launch\_template\_id](#output\_managed\_nodegroup\_launch\_template\_id) | Launch Template ID for EKS Managed Node Group | | [managed\_nodegroup\_launch\_template\_latest\_version](#output\_managed\_nodegroup\_launch\_template\_latest\_version) | Launch Template version for EKS Managed Node Group | | [managed\_nodegroup\_status](#output\_managed\_nodegroup\_status) | EKS Managed Node Group status | - - + diff --git a/modules/aws-eks-managed-node-groups/iam.tf b/modules/aws-eks-managed-node-groups/iam.tf index 5482fe8e91..fad85023db 100644 --- a/modules/aws-eks-managed-node-groups/iam.tf +++ b/modules/aws-eks-managed-node-groups/iam.tf @@ -20,21 +20,29 @@ resource "aws_iam_instance_profile" "managed_ng" { } } +# TODO - fix at next breaking change +# tflint-ignore: terraform_naming_convention resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEKSWorkerNodePolicy" { policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy" role = aws_iam_role.managed_ng.name } +# TODO - fix at next breaking change +# tflint-ignore: terraform_naming_convention resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEKS_CNI_Policy" { policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" role = aws_iam_role.managed_ng.name } +# TODO - fix at next breaking change +# tflint-ignore: terraform_naming_convention resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEC2ContainerRegistryReadOnly" { policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly" role = aws_iam_role.managed_ng.name } +# TODO - fix at next breaking change +# tflint-ignore: terraform_naming_convention resource "aws_iam_role_policy_attachment" "managed_ng_AmazonSSMManagedInstanceCore" { policy_arn = "${local.policy_arn_prefix}/AmazonSSMManagedInstanceCore" role = aws_iam_role.managed_ng.name diff --git a/modules/aws-eks-managed-node-groups/versions.tf b/modules/aws-eks-managed-node-groups/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/aws-eks-managed-node-groups/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/aws-eks-self-managed-node-groups/README.md b/modules/aws-eks-self-managed-node-groups/README.md index 36224171e5..84c460b900 100644 --- a/modules/aws-eks-self-managed-node-groups/README.md +++ b/modules/aws-eks-self-managed-node-groups/README.md @@ -1,22 +1,26 @@ # AWS Self-Managed Node Groups # Introduction + Amazon EKS Self Managed Node Groups lets you create, update, scale, and terminate worker nodes for your EKS cluster. All Self managed nodes are provisioned as part of an Amazon EC2 Auto Scaling group that's managed for you by this module. Moreover, all resources including Amazon EC2 instances and Auto Scaling groups run within your AWS account. This module allows you to create on-demand or spot self managed Linux or Windows nodegroups. You can instantiate the module once with map of node group values to create multiple self managed node groups. By default, the module uses the latest available version of Amazon-provided EKS-optimized AMIs for Amazon Linux 2, Bottlerocket, or Windows 2019 Server Core operating systems. You can override the image via the custom_ami_id input variable. -Checkout the usage docs for Self-managed Node groups [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/node-groups/) +Checkout the usage docs for Self-managed Node groups [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/node-groups/) - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -58,5 +62,4 @@ No requirements. | [self\_managed\_nodegroup\_iam\_instance\_profile\_id](#output\_self\_managed\_nodegroup\_iam\_instance\_profile\_id) | IAM Instance Profile ID for EKS Self Managed Node Group | | [self\_managed\_nodegroup\_iam\_role\_arns](#output\_self\_managed\_nodegroup\_iam\_role\_arns) | Self managed groups IAM role arns | | [self\_managed\_nodegroup\_name](#output\_self\_managed\_nodegroup\_name) | EKS Self Managed node group id | - - + diff --git a/modules/aws-eks-self-managed-node-groups/versions.tf b/modules/aws-eks-self-managed-node-groups/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/aws-eks-self-managed-node-groups/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/aws-eks-teams/README.md b/modules/aws-eks-teams/README.md index 771e32949a..908dea7f35 100644 --- a/modules/aws-eks-teams/README.md +++ b/modules/aws-eks-teams/README.md @@ -113,21 +113,23 @@ aws eks update-kubeconfig --name ${eks_cluster_id} --region ${AWS_REGION} --role Make sure to replace the `${eks_cluster_id}`, `${AWS_REGION}` and `${TEAM_ROLE_ARN}` with the actual values. - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13 | -| [kubectl](#requirement\_kubectl) | >= 1.7.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [kubectl](#provider\_kubectl) | >= 1.7.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [kubectl](#provider\_kubectl) | >= 1.14 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -177,5 +179,4 @@ No modules. | [platform\_teams\_configure\_kubectl](#output\_platform\_teams\_configure\_kubectl) | Configure kubectl for each Platform Team: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | | [platform\_teams\_iam\_role\_arn](#output\_platform\_teams\_iam\_role\_arn) | IAM role ARN for Platform Teams | | [team\_sa\_irsa\_iam\_role\_arn](#output\_team\_sa\_irsa\_iam\_role\_arn) | IAM role ARN for Teams EKS Service Account (IRSA) | - - + diff --git a/modules/aws-eks-teams/versions.tf b/modules/aws-eks-teams/versions.tf index 8b8c281fba..c91b6fba83 100644 --- a/modules/aws-eks-teams/versions.tf +++ b/modules/aws-eks-teams/versions.tf @@ -1,10 +1,18 @@ terraform { - required_version = ">= 0.13" + required_version = ">= 1.0.0" required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } kubectl = { source = "gavinbunney/kubectl" - version = ">= 1.7.0" + version = ">= 1.14" } } } diff --git a/modules/aws-kms/README.md b/modules/aws-kms/README.md index 7866c5dfc4..5320fa721c 100644 --- a/modules/aws-kms/README.md +++ b/modules/aws-kms/README.md @@ -1,27 +1,10 @@ - # AWS KMS module Terraform module that creates an AWS KMS key and assigns it an alias, policy, and tags. -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: MIT-0 - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ## Usage -``` terraform +```terraform module "kms" { source = "./modules/aws-kms" @@ -30,19 +13,21 @@ module "kms" { policy = data.aws_iam_policy_document.key.json tags = local.tags } - ``` - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -72,5 +57,4 @@ No modules. |------|-------------| | [key\_arn](#output\_key\_arn) | The Amazon Resource Name (ARN) of the key. | | [key\_id](#output\_key\_id) | The globally unique identifier for the key. | - - + diff --git a/modules/aws-kms/versions.tf b/modules/aws-kms/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/aws-kms/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/aws-managed-prometheus/README.md b/modules/aws-managed-prometheus/README.md index d230774dd7..d8315cf939 100644 --- a/modules/aws-managed-prometheus/README.md +++ b/modules/aws-managed-prometheus/README.md @@ -1,17 +1,18 @@ # AWS Managed Prometheus - - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -37,5 +38,4 @@ No modules. | [amazon\_prometheus\_workspace\_arn](#output\_amazon\_prometheus\_workspace\_arn) | Amazon Managed Prometheus Workspace ARN | | [amazon\_prometheus\_workspace\_endpoint](#output\_amazon\_prometheus\_workspace\_endpoint) | Amazon Managed Prometheus Workspace Endpoint | | [amazon\_prometheus\_workspace\_id](#output\_amazon\_prometheus\_workspace\_id) | Amazon Managed Prometheus Workspace ID | - - + diff --git a/modules/aws-managed-prometheus/versions.tf b/modules/aws-managed-prometheus/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/aws-managed-prometheus/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/aws-resource-tags/README.md b/modules/aws-resource-tags/README.md index 662f8f77a2..722008cb54 100644 --- a/modules/aws-resource-tags/README.md +++ b/modules/aws-resource-tags/README.md @@ -1,7 +1,9 @@ - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | ## Providers @@ -32,5 +34,4 @@ No resources. |------|-------------| | [id](#output\_id) | aws resource id | | [tags](#output\_tags) | aws resource tags | - - + diff --git a/modules/aws-resource-tags/variables.tf b/modules/aws-resource-tags/variables.tf index 7020701bce..494bd7df65 100644 --- a/modules/aws-resource-tags/variables.tf +++ b/modules/aws-resource-tags/variables.tf @@ -1,28 +1,30 @@ -/*----------------------------------------------------------------*/ -//NAMING FIELDS -/*----------------------------------------------------------------*/ variable "org" { type = string description = "tenant, which could be your organization name, e.g. aws'" default = "" } + variable "tenant" { type = string description = "Account Name or unique account unique id e.g., apps or management or aws007" } + variable "environment" { type = string description = "zone, e.g. 'prod', 'preprod' " } + variable "zone" { type = string description = "Environment, e.g. 'load', 'zone', 'dev', 'uat'" } + variable "resource" { type = string description = "Solution name, e.g. 'app' or 'cluster'" default = "" } + variable "tags" { type = map(string) default = {} diff --git a/modules/aws-resource-tags/versions.tf b/modules/aws-resource-tags/versions.tf new file mode 100644 index 0000000000..429c0b36d0 --- /dev/null +++ b/modules/aws-resource-tags/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 1.0.0" +} diff --git a/modules/emr-on-eks/README.md b/modules/emr-on-eks/README.md index 8e04c1de4d..8e3d4d759a 100644 --- a/modules/emr-on-eks/README.md +++ b/modules/emr-on-eks/README.md @@ -1,18 +1,23 @@ # EMR on EKS Module Amazon EMR on EKS provides a deployment option for Amazon EMR that allows you to run open-source big data frameworks on Amazon Elastic Kubernetes Service (Amazon EKS). With this deployment option, you can focus on running analytics workloads while Amazon EMR on EKS builds, configures, and manages containers for open-source applications. - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [null](#requirement\_null) | >= 3.1 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [kubernetes](#provider\_kubernetes) | n/a | -| [null](#provider\_null) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | +| [null](#provider\_null) | >= 3.1 | ## Modules @@ -50,5 +55,4 @@ No modules. |------|-------------| | [emr\_on\_eks\_role\_arn](#output\_emr\_on\_eks\_role\_arn) | IAM execution role ARN for EMR on EKS | | [emr\_on\_eks\_role\_id](#output\_emr\_on\_eks\_role\_id) | IAM execution role ID for EMR on EKS | - - + diff --git a/modules/emr-on-eks/versions.tf b/modules/emr-on-eks/versions.tf new file mode 100644 index 0000000000..8620e6f3c4 --- /dev/null +++ b/modules/emr-on-eks/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + null = { + source = "hashicorp/null" + version = ">= 3.1" + } + } +} diff --git a/modules/irsa/README.md b/modules/irsa/README.md index 48b615a798..75fa1cf3e2 100644 --- a/modules/irsa/README.md +++ b/modules/irsa/README.md @@ -2,23 +2,35 @@ This Terraform module creates the following resources - 1. Kubernetes Namespace for Kubernetes Addon - 2. Service Account for Kubernetes Addon - 3. IAM Role for Service Account with OIDC assume role policy - 4. Creates default policy required for Addon - 5. Attaches the additional IAM policies provided by consumer module +1. Kubernetes Namespace for Kubernetes Addon +2. Service Account for Kubernetes Addon +3. IAM Role for Service Account with OIDC assume role policy +4. Creates default policy required for Addon +5. Attaches the additional IAM policies provided by consumer module - +## Learn more + +## Blogs + +- [Introducing fine-grained IAM roles for service accounts](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) +- [Cross account IAM roles for Kubernetes service accounts](https://aws.amazon.com/blogs/containers/cross-account-iam-roles-for-kubernetes-service-accounts/) +- [Enabling cross-account access to Amazon EKS cluster resources](https://aws.amazon.com/blogs/containers/enabling-cross-account-access-to-amazon-eks-cluster-resources/) + + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -50,13 +62,4 @@ No modules. |------|-------------| | [irsa\_iam\_role\_arn](#output\_irsa\_iam\_role\_arn) | IAM role ARN for your service account | | [irsa\_iam\_role\_name](#output\_irsa\_iam\_role\_name) | IAM role name for your service account | - - - -## Learn more - -## Blogs - -* [Introducing fine-grained IAM roles for service accounts](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) -* [Cross account IAM roles for Kubernetes service accounts](https://aws.amazon.com/blogs/containers/cross-account-iam-roles-for-kubernetes-service-accounts/) -* [Enabling cross-account access to Amazon EKS cluster resources](https://aws.amazon.com/blogs/containers/enabling-cross-account-access-to-amazon-eks-cluster-resources/) + diff --git a/modules/irsa/main.tf b/modules/irsa/main.tf index abbad4e593..52f2c80dd1 100644 --- a/modules/irsa/main.tf +++ b/modules/irsa/main.tf @@ -4,7 +4,7 @@ resource "kubernetes_namespace_v1" "irsa" { name = var.kubernetes_namespace labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } @@ -16,7 +16,7 @@ resource "kubernetes_service_account_v1" "irsa" { namespace = var.kubernetes_namespace annotations = var.irsa_iam_policies != null ? { "eks.amazonaws.com/role-arn" : aws_iam_role.irsa[0].arn } : null labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } @@ -52,7 +52,7 @@ resource "aws_iam_role" "irsa" { tags = merge( { "Name" = format("%s-%s-%s", var.addon_context.eks_cluster_id, trim(var.kubernetes_service_account, "-*"), "irsa"), - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" }, var.addon_context.tags ) diff --git a/modules/irsa/versions.tf b/modules/irsa/versions.tf new file mode 100644 index 0000000000..d2ddf87cc2 --- /dev/null +++ b/modules/irsa/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/README.md b/modules/kubernetes-addons/README.md index 18090a84d0..09823abb83 100644 --- a/modules/kubernetes-addons/README.md +++ b/modules/kubernetes-addons/README.md @@ -1,20 +1,18 @@ - +# Kubernetes Addons + + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.66.0 | -| [helm](#requirement\_helm) | >= 2.4.1 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | -| [local](#requirement\_local) | 2.1.0 | -| [null](#requirement\_null) | 3.1.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.66.0 | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -31,6 +29,7 @@ | [aws\_kube\_proxy](#module\_aws\_kube\_proxy) | ./aws-kube-proxy | n/a | | [aws\_load\_balancer\_controller](#module\_aws\_load\_balancer\_controller) | ./aws-load-balancer-controller | n/a | | [aws\_node\_termination\_handler](#module\_aws\_node\_termination\_handler) | ./aws-node-termination-handler | n/a | +| [aws\_privateca\_issuer](#module\_aws\_privateca\_issuer) | ./aws-privateca-issuer | n/a | | [aws\_vpc\_cni](#module\_aws\_vpc\_cni) | ./aws-vpc-cni | n/a | | [cert\_manager](#module\_cert\_manager) | ./cert-manager | n/a | | [cluster\_autoscaler](#module\_cluster\_autoscaler) | ./cluster-autoscaler | n/a | @@ -87,6 +86,9 @@ | [aws\_load\_balancer\_controller\_helm\_config](#input\_aws\_load\_balancer\_controller\_helm\_config) | AWS Load Balancer Controller Helm Chart config | `any` | `{}` | no | | [aws\_node\_termination\_handler\_helm\_config](#input\_aws\_node\_termination\_handler\_helm\_config) | AWS Node Termination Handler Helm Chart config | `any` | `{}` | no | | [aws\_node\_termination\_handler\_irsa\_policies](#input\_aws\_node\_termination\_handler\_irsa\_policies) | Additional IAM policies for a IAM role for service accounts | `list(string)` | `[]` | no | +| [aws\_privateca\_acmca\_arn](#input\_aws\_privateca\_acmca\_arn) | ARN of AWS ACM PCA | `string` | `""` | no | +| [aws\_privateca\_issuer\_helm\_config](#input\_aws\_privateca\_issuer\_helm\_config) | PCA Issuer Helm Chart config | `any` | `{}` | no | +| [aws\_privateca\_issuer\_irsa\_policies](#input\_aws\_privateca\_issuer\_irsa\_policies) | IAM policy ARNs for AWS ACM PCA IRSA | `list(string)` | `[]` | no | | [cert\_manager\_helm\_config](#input\_cert\_manager\_helm\_config) | Cert Manager Helm Chart config | `any` | `{}` | no | | [cluster\_autoscaler\_helm\_config](#input\_cluster\_autoscaler\_helm\_config) | Cluster Autoscaler Helm Chart config | `any` | `{}` | no | | [crossplane\_aws\_provider](#input\_crossplane\_aws\_provider) | AWS Provider config for Crossplane |
object({
enable = bool
provider_aws_version = string
additional_irsa_policies = list(string)
})
|
{
"additional_irsa_policies": [],
"enable": false,
"provider_aws_version": "v0.24.1"
}
| no | @@ -108,6 +110,7 @@ | [enable\_aws\_for\_fluentbit](#input\_enable\_aws\_for\_fluentbit) | Enable AWS for FluentBit add-on | `bool` | `false` | no | | [enable\_aws\_load\_balancer\_controller](#input\_enable\_aws\_load\_balancer\_controller) | Enable AWS Load Balancer Controller add-on | `bool` | `false` | no | | [enable\_aws\_node\_termination\_handler](#input\_enable\_aws\_node\_termination\_handler) | Enable AWS Node Termination Handler add-on | `bool` | `false` | no | +| [enable\_aws\_privateca\_issuer](#input\_enable\_aws\_privateca\_issuer) | Enable PCA Issuer | `bool` | `false` | no | | [enable\_cert\_manager](#input\_enable\_cert\_manager) | Enable Cert Manager add-on | `bool` | `false` | no | | [enable\_cluster\_autoscaler](#input\_enable\_cluster\_autoscaler) | Enable Cluster autoscaler add-on | `bool` | `false` | no | | [enable\_crossplane](#input\_enable\_crossplane) | Enable Crossplane add-on | `bool` | `false` | no | @@ -139,9 +142,7 @@ | [keda\_helm\_config](#input\_keda\_helm\_config) | KEDA Event-based autoscaler add-on config | `any` | `{}` | no | | [keda\_irsa\_policies](#input\_keda\_irsa\_policies) | Additional IAM policies for a IAM role for service accounts | `list(string)` | `[]` | no | | [kubernetes\_dashboard\_helm\_config](#input\_kubernetes\_dashboard\_helm\_config) | Kubernetes Dashboard Helm Chart config | `any` | `null` | no | -| [kubernetes\_dashboard\_irsa\_policies](#input\_kubernetes\_dashboard\_irsa\_policies) | IAM policy ARNs for Kubernetes Dashboard IRSA | `list(string)` | `[]` | no | | [metrics\_server\_helm\_config](#input\_metrics\_server\_helm\_config) | Metrics Server Helm Chart config | `any` | `{}` | no | -| [node\_groups\_iam\_role\_arn](#input\_node\_groups\_iam\_role\_arn) | Node Groups IAM role ARNs | `list(string)` | `[]` | no | | [ondat\_admin\_password](#input\_ondat\_admin\_password) | Password for Ondat admin user | `string` | `"storageos"` | no | | [ondat\_admin\_username](#input\_ondat\_admin\_username) | Username for Ondat admin user | `string` | `"storageos"` | no | | [ondat\_create\_cluster](#input\_ondat\_create\_cluster) | Create cluster resources | `bool` | `true` | no | @@ -168,10 +169,8 @@ | [vault\_helm\_config](#input\_vault\_helm\_config) | HashiCorp Vault Helm Chart config | `any` | `null` | no | | [vpa\_helm\_config](#input\_vpa\_helm\_config) | VPA Helm Chart config | `any` | `null` | no | | [yunikorn\_helm\_config](#input\_yunikorn\_helm\_config) | YuniKorn Helm Chart config | `any` | `null` | no | -| [yunikorn\_irsa\_policies](#input\_yunikorn\_irsa\_policies) | IAM policy ARNs for Yunikorn IRSA | `list(string)` | `[]` | no | ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/agones/README.md b/modules/kubernetes-addons/agones/README.md index 1da2c83169..4e5f629fda 100644 --- a/modules/kubernetes-addons/agones/README.md +++ b/modules/kubernetes-addons/agones/README.md @@ -1,33 +1,39 @@ # Agones Helm Chart ## Agones + Agones is an Open source Kubernetes Controller with custom resource definitions and is used to create, run, manage and scale dedicated game server processes within Kubernetes clusters using standard Kubernetes tooling and APIs. This model also allows any matchmaker to interact directly with Agones via the Kubernetes API to provision a dedicated game server ## GameLift -Amazon GameLift enables developers to deploy, operate, and scale dedicated, low-cost servers in the cloud for session-based, multiplayer games. Built on AWS global computing infrastructure, GameLift helps deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet worldwide player demand. - https://github.com/googleforgames/agones/tree/main/install/helm/agones - https://artifacthub.io/packages/helm/agones/agones - -DOCKER IMAGES +Amazon GameLift enables developers to deploy, operate, and scale dedicated, low-cost servers in the cloud for session-based, multiplayer games. Built on AWS global computing infrastructure, GameLift helps deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet worldwide player demand. - gcr.io/agones-images/agones-controller:1.15.0-rc - gcr.io/agones-images/agones-ping:1.15.0-rc - gcr.io/agones-images/agones-allocator:1.15.0-rc +https://github.com/googleforgames/agones/tree/main/install/helm/agones +https://artifacthub.io/packages/helm/agones/agones +### Docker Images +``` +gcr.io/agones-images/agones-controller:1.15.0-rc +gcr.io/agones-images/agones-ping:1.15.0-rc +gcr.io/agones-images/agones-allocator:1.15.0-rc +``` - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -57,5 +63,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/agones/main.tf b/modules/kubernetes-addons/agones/main.tf index 332d4f1914..05a4771edf 100644 --- a/modules/kubernetes-addons/agones/main.tf +++ b/modules/kubernetes-addons/agones/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "this" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/agones/versions.tf b/modules/kubernetes-addons/agones/versions.tf new file mode 100644 index 0000000000..d2ddf87cc2 --- /dev/null +++ b/modules/kubernetes-addons/agones/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/argo-rollouts/README.md b/modules/kubernetes-addons/argo-rollouts/README.md index 711b8d8aae..6079ac7af5 100644 --- a/modules/kubernetes-addons/argo-rollouts/README.md +++ b/modules/kubernetes-addons/argo-rollouts/README.md @@ -6,16 +6,19 @@ Argo Rollouts (optionally) integrates with ingress controllers and service meshes, leveraging their traffic shaping abilities to gradually shift traffic to the new version during an update. Additionally, Rollouts can query and interpret metrics from various providers to verify key KPIs and drive automated promotion or rollback during an update. - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -42,5 +45,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/argo-rollouts/main.tf b/modules/kubernetes-addons/argo-rollouts/main.tf index ccb30fdb6f..f622028c10 100644 --- a/modules/kubernetes-addons/argo-rollouts/main.tf +++ b/modules/kubernetes-addons/argo-rollouts/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "this" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/argo-rollouts/versions.tf b/modules/kubernetes-addons/argo-rollouts/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/argo-rollouts/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/argocd/README.md b/modules/kubernetes-addons/argocd/README.md index 3586932ac4..9aa7ee5ad5 100644 --- a/modules/kubernetes-addons/argocd/README.md +++ b/modules/kubernetes-addons/argocd/README.md @@ -6,19 +6,23 @@ Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes. Application definitions, configurations, and environments should be declarative and version controlled. Application deployment and lifecycle management should be automated, auditable, and easy to understand. - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [helm](#requirement\_helm) | >= 2.4.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [helm](#provider\_helm) | n/a | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [helm](#provider\_helm) | >= 2.4.1 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -51,5 +55,4 @@ No requirements. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/argocd/main.tf b/modules/kubernetes-addons/argocd/main.tf index 1063be5d19..ff55f4fddc 100644 --- a/modules/kubernetes-addons/argocd/main.tf +++ b/modules/kubernetes-addons/argocd/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "this" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/argocd/outputs.tf b/modules/kubernetes-addons/argocd/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/argocd/versions.tf b/modules/kubernetes-addons/argocd/versions.tf new file mode 100644 index 0000000000..9ac174272e --- /dev/null +++ b/modules/kubernetes-addons/argocd/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } +} diff --git a/modules/kubernetes-addons/aws-cloudwatch-metrics/README.md b/modules/kubernetes-addons/aws-cloudwatch-metrics/README.md index 10e1408a08..64a2773ffd 100644 --- a/modules/kubernetes-addons/aws-cloudwatch-metrics/README.md +++ b/modules/kubernetes-addons/aws-cloudwatch-metrics/README.md @@ -2,10 +2,15 @@ This add-on configures [AWS CloudWatch Agent](https://github.com/aws/eks-charts/tree/master/stable/aws-cloudwatch-metrics) used for CloudWatch [Container Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights.html). Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects. - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [helm](#requirement\_helm) | >= 2.4.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers @@ -35,5 +40,4 @@ No resources. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/aws-cloudwatch-metrics/versions.tf b/modules/kubernetes-addons/aws-cloudwatch-metrics/versions.tf new file mode 100644 index 0000000000..9ac174272e --- /dev/null +++ b/modules/kubernetes-addons/aws-cloudwatch-metrics/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } +} diff --git a/modules/kubernetes-addons/aws-coredns/README.md b/modules/kubernetes-addons/aws-coredns/README.md index da9181872a..562b5677dd 100644 --- a/modules/kubernetes-addons/aws-coredns/README.md +++ b/modules/kubernetes-addons/aws-coredns/README.md @@ -1,17 +1,21 @@ # coredns + [coredns](https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html) CoreDNS is a flexible, extensible DNS server that can serve as the Kubernetes cluster DNS. The CoreDNS Pods provide name resolution for all Pods in the cluster. The CoreDNS Pods can be deployed to Fargate nodes as well. - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -33,5 +37,4 @@ No modules. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/aws-coredns/outputs.tf b/modules/kubernetes-addons/aws-coredns/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/aws-coredns/versions.tf b/modules/kubernetes-addons/aws-coredns/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-coredns/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-ebs-csi-driver/README.md b/modules/kubernetes-addons/aws-ebs-csi-driver/README.md index bd62bbd1b5..2e533aafcd 100644 --- a/modules/kubernetes-addons/aws-ebs-csi-driver/README.md +++ b/modules/kubernetes-addons/aws-ebs-csi-driver/README.md @@ -3,16 +3,19 @@ [aws-ebs-csi-driver](https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html) The EBS CSI driver provides a CSI interface used by container orchestrators to manage the lifecycle of Amazon EBS volumes. Availability in EKS add-ons in preview enables a simple experience for attaching persistent storage to an EKS cluster. The EBS CSI driver can now be installed, managed, and updated directly through the EKS console, CLI, and API - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -38,5 +41,4 @@ No requirements. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/aws-ebs-csi-driver/outputs.tf b/modules/kubernetes-addons/aws-ebs-csi-driver/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/aws-ebs-csi-driver/versions.tf b/modules/kubernetes-addons/aws-ebs-csi-driver/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-ebs-csi-driver/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-efs-csi-driver/README.md b/modules/kubernetes-addons/aws-efs-csi-driver/README.md index 500002563c..a79f98a35a 100644 --- a/modules/kubernetes-addons/aws-efs-csi-driver/README.md +++ b/modules/kubernetes-addons/aws-efs-csi-driver/README.md @@ -1,15 +1,18 @@ # AWS EFS CSI driver Helm Chart - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -38,5 +41,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/aws-efs-csi-driver/versions.tf b/modules/kubernetes-addons/aws-efs-csi-driver/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-efs-csi-driver/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-for-fluentbit/README.md b/modules/kubernetes-addons/aws-for-fluentbit/README.md index fb9b941c00..e87593f816 100644 --- a/modules/kubernetes-addons/aws-for-fluentbit/README.md +++ b/modules/kubernetes-addons/aws-for-fluentbit/README.md @@ -1,21 +1,24 @@ # AWS for Fluent Bit + Fluent Bit is an open source Log Processor and Forwarder which allows you to collect any data like metrics and logs from different sources, enrich them with filters and send them to multiple destinations. AWS provides a Fluent Bit image with plugins for CloudWatch Logs, Kinesis Data Firehose, Kinesis Data Stream and Amazon OpenSearch Service. This add-on is configured to stream the worker node logs to CloudWatch Logs by default. It can be configured to stream the logs to additional destinations like Kinesis Data Firehose, Kinesis Data Streams and Amazon OpenSearch Service by passing the custom `values.yaml`. See this [Helm Chart](https://github.com/aws/eks-charts/tree/master/stable/aws-for-fluent-bit) for more details. - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -53,5 +56,4 @@ No requirements. | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | | [cw\_log\_group\_arn](#output\_cw\_log\_group\_arn) | AWS Fluent Bit CloudWatch Log Group ARN | | [cw\_log\_group\_name](#output\_cw\_log\_group\_name) | AWS Fluent Bit CloudWatch Log Group Name | - - + diff --git a/modules/kubernetes-addons/aws-for-fluentbit/versions.tf b/modules/kubernetes-addons/aws-for-fluentbit/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-for-fluentbit/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-kube-proxy/README.md b/modules/kubernetes-addons/aws-kube-proxy/README.md index 0c59184e18..bacfcdd010 100644 --- a/modules/kubernetes-addons/aws-kube-proxy/README.md +++ b/modules/kubernetes-addons/aws-kube-proxy/README.md @@ -1,17 +1,21 @@ # kube-proxy + [kube-proxy](https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html) Kube-proxy maintains network rules on each Amazon EC2 node. It enables network communication to your pods. Kube-proxy is not deployed to Fargate nodes - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -33,5 +37,4 @@ No modules. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/aws-kube-proxy/outputs.tf b/modules/kubernetes-addons/aws-kube-proxy/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/aws-kube-proxy/versions.tf b/modules/kubernetes-addons/aws-kube-proxy/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-kube-proxy/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-load-balancer-controller/README.md b/modules/kubernetes-addons/aws-load-balancer-controller/README.md index 4d9ba04cf8..41fdcd2b03 100644 --- a/modules/kubernetes-addons/aws-load-balancer-controller/README.md +++ b/modules/kubernetes-addons/aws-load-balancer-controller/README.md @@ -4,8 +4,8 @@ AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. -* It satisfies Kubernetes Ingress resources by provisioning Application Load Balancers. -* It satisfies Kubernetes Service resources by provisioning Network Load Balancers. +- It satisfies Kubernetes Ingress resources by provisioning Application Load Balancers. +- It satisfies Kubernetes Service resources by provisioning Network Load Balancers. # Helm Chart @@ -19,54 +19,55 @@ Add Helm repo for LB Ingress Controller https://artifacthub.io/packages/helm/aws/aws-load-balancer-controller - # Docker Image for LB ingress controller ###### Instructions to upload LB ingress controller Docker image to AWS ECR -Step1: Get the latest docker image from this link +Step 1: Get the latest docker image from this link https://github.com/aws/eks-charts/blob/master/stable/aws-load-balancer-controller/values.yaml -Step2: Download the docker image to your local Mac/Laptop +Step 2: Download the docker image to your local Mac/Laptop $ aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 602401143452.dkr.ecr.us-west-2.amazonaws.com $ docker pull 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller:v2.2.1 -Step3: Retrieve an authentication token and authenticate your Docker client to your registry. Use the AWS CLI: +Step 3: Retrieve an authentication token and authenticate your Docker client to your registry. Use the AWS CLI: $ aws ecr get-login-password --region eu-west-1 | docker login --username AWS --password-stdin .dkr.ecr.eu-west-1.amazonaws.com -Step4: Create an ECR repo for LB ingress controller if you don't have one +Step 4: Create an ECR repo for LB ingress controller if you don't have one $ aws ecr create-repository \ --repository-name amazon/aws-load-balancer-controller \ --image-scanning-configuration scanOnPush=true -Step5: After the build completes, tag your image so, you can push the image to this repository: +Step 5: After the build completes, tag your image so, you can push the image to this repository: $ docker tag 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller:v2.2.1 .dkr.ecr.eu-west-1.amazonaws.com/amazon/aws-load-balancer-controller:v2.2.1 -Step6: Run the following command to push this image to your newly created AWS repository: +Step 6: Run the following command to push this image to your newly created AWS repository: $ docker push .dkr.ecr.eu-west-1.amazonaws.com/amazon/aws-load-balancer-controller:v2.2.1 - #### AWS Service annotations for LB Ingress Controller -Here is the link to get the AWS ELB [service annotations](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/) for LB Ingress controller +Here is the link to get the AWS ELB [service annotations](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/) for LB Ingress controller - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -96,5 +97,4 @@ No requirements. | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | | [ingress\_name](#output\_ingress\_name) | AWS LoadBalancer Controller Ingress Name | | [ingress\_namespace](#output\_ingress\_namespace) | AWS LoadBalancer Controller Ingress Namespace | - - + diff --git a/modules/kubernetes-addons/aws-load-balancer-controller/versions.tf b/modules/kubernetes-addons/aws-load-balancer-controller/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-load-balancer-controller/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-node-termination-handler/README.md b/modules/kubernetes-addons/aws-node-termination-handler/README.md index 01a656f754..4378b0bb79 100644 --- a/modules/kubernetes-addons/aws-node-termination-handler/README.md +++ b/modules/kubernetes-addons/aws-node-termination-handler/README.md @@ -1,20 +1,23 @@ # AWS Node Termination handler Helm Chart ## What is aws-node-termination-handler + [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down. - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -48,5 +51,4 @@ No requirements. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/aws-node-termination-handler/outputs.tf b/modules/kubernetes-addons/aws-node-termination-handler/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/aws-node-termination-handler/versions.tf b/modules/kubernetes-addons/aws-node-termination-handler/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-node-termination-handler/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-privateca-issuer/README.md b/modules/kubernetes-addons/aws-privateca-issuer/README.md index 739e0c3a2b..57e4029d04 100644 --- a/modules/kubernetes-addons/aws-privateca-issuer/README.md +++ b/modules/kubernetes-addons/aws-privateca-issuer/README.md @@ -1,25 +1,26 @@ # AWS PCA issuer Helm Chart -# Introduction +## Introduction AWS ACM Private CA is a module of the [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) that can setup and manage private CAs. `cert-manager` is a Kubernetes add-on to automate the management and issuance of TLS certificates from various issuing sources. It will ensure certificates are valid and up to date periodically, and attempt to renew certificates at an appropriate time before expiry. The current module `aws-privateca-issuer` is a addon for `cert-manager` that issues certificates using AWS ACM PCA. -# Helm Chart - -### Instructions to use the Helm Chart +## Helm Chart See the [aws-pca-issuer documentation](https://cert-manager.github.io/aws-privateca-issuer/). - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -31,16 +32,17 @@ No requirements. | Name | Type | |------|------| -| [aws_iam_policy.aws_pca_issuer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_policy_document.aws_pca_issuer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy.aws_privateca_issuer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy_document.aws_privateca_issuer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [addon\_context](#input\_addon\_context) | Input configuration for the addon |
object({
aws_caller_identity_account_id = string
aws_caller_identity_arn = string
aws_eks_cluster_endpoint = string
aws_partition_id = string
aws_region_name = string
eks_cluster_id = string
eks_oidc_issuer_url = string
eks_oidc_provider_arn = string
tags = map(string)
})
| n/a | yes | -| [helm\_config](#input\_helm\_config) | Cluster Autoscaler Helm Config | `any` | `{}` | no | -| [irsa\_iam\_permissions\_boundary](#input\_irsa\_iam\_permissions\_boundary) | IAM Policy ARN for IRSA IAM role permissions boundary | `string` | `""` | no | +| [addon\_context](#input\_addon\_context) | Input configuration for the addon |
object({
aws_caller_identity_account_id = string
aws_caller_identity_arn = string
aws_eks_cluster_endpoint = string
aws_partition_id = string
aws_region_name = string
eks_cluster_id = string
eks_oidc_issuer_url = string
eks_oidc_provider_arn = string
tags = map(string)
irsa_iam_role_path = string
irsa_iam_permissions_boundary = string
})
| n/a | yes | +| [aws\_privateca\_acmca\_arn](#input\_aws\_privateca\_acmca\_arn) | ARN of AWS ACM PCA | `string` | n/a | yes | +| [helm\_config](#input\_helm\_config) | AWS PCA Issuer Helm Config | `any` | `{}` | no | +| [irsa\_policies](#input\_irsa\_policies) | Additional IAM policies for a IAM role for service accounts | `list(string)` | `[]` | no | | [manage\_via\_gitops](#input\_manage\_via\_gitops) | Determines if the add-on should be managed via GitOps. | `bool` | `false` | no | ## Outputs @@ -48,5 +50,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - \ No newline at end of file + diff --git a/modules/kubernetes-addons/aws-privateca-issuer/data.tf b/modules/kubernetes-addons/aws-privateca-issuer/data.tf deleted file mode 100644 index e8979326be..0000000000 --- a/modules/kubernetes-addons/aws-privateca-issuer/data.tf +++ /dev/null @@ -1,12 +0,0 @@ -data "aws_iam_policy_document" "aws_privateca_issuer" { - statement { - sid = "" - effect = "Allow" - resources = ["${var.aws_privateca_acmca_arn}"] - actions = [ - "acm-pca:DescribeCertificateAuthority", - "acm-pca:GetCertificate", - "acm-pca:IssueCertificate" - ] - } -} \ No newline at end of file diff --git a/modules/kubernetes-addons/aws-privateca-issuer/locals.tf b/modules/kubernetes-addons/aws-privateca-issuer/locals.tf index 6bb3e3ccab..66ebb069cd 100644 --- a/modules/kubernetes-addons/aws-privateca-issuer/locals.tf +++ b/modules/kubernetes-addons/aws-privateca-issuer/locals.tf @@ -43,4 +43,4 @@ locals { enable = true serviceAccountName = local.service_account_name } -} \ No newline at end of file +} diff --git a/modules/kubernetes-addons/aws-privateca-issuer/main.tf b/modules/kubernetes-addons/aws-privateca-issuer/main.tf index 0bb032624b..b14d2476e7 100644 --- a/modules/kubernetes-addons/aws-privateca-issuer/main.tf +++ b/modules/kubernetes-addons/aws-privateca-issuer/main.tf @@ -7,9 +7,22 @@ module "helm_addon" { addon_context = var.addon_context } +data "aws_iam_policy_document" "aws_privateca_issuer" { + statement { + sid = "" + effect = "Allow" + resources = ["${var.aws_privateca_acmca_arn}"] + actions = [ + "acm-pca:DescribeCertificateAuthority", + "acm-pca:GetCertificate", + "acm-pca:IssueCertificate" + ] + } +} + resource "aws_iam_policy" "aws_privateca_issuer" { description = "AWS PCA issuer IAM policy" name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa" policy = data.aws_iam_policy_document.aws_privateca_issuer.json tags = var.addon_context.tags -} \ No newline at end of file +} diff --git a/modules/kubernetes-addons/aws-privateca-issuer/outputs.tf b/modules/kubernetes-addons/aws-privateca-issuer/outputs.tf index c5df07850f..b30c86b380 100644 --- a/modules/kubernetes-addons/aws-privateca-issuer/outputs.tf +++ b/modules/kubernetes-addons/aws-privateca-issuer/outputs.tf @@ -1,4 +1,4 @@ output "argocd_gitops_config" { description = "Configuration used for managing the add-on with ArgoCD" value = var.manage_via_gitops ? local.argocd_gitops_config : null -} \ No newline at end of file +} diff --git a/modules/kubernetes-addons/aws-privateca-issuer/variables.tf b/modules/kubernetes-addons/aws-privateca-issuer/variables.tf index b84669e6d6..af80a85b1e 100644 --- a/modules/kubernetes-addons/aws-privateca-issuer/variables.tf +++ b/modules/kubernetes-addons/aws-privateca-issuer/variables.tf @@ -36,4 +36,4 @@ variable "irsa_policies" { type = list(string) description = "Additional IAM policies for a IAM role for service accounts" default = [] -} \ No newline at end of file +} diff --git a/modules/kubernetes-addons/aws-privateca-issuer/versions.tf b/modules/kubernetes-addons/aws-privateca-issuer/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-privateca-issuer/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/aws-vpc-cni/README.md b/modules/kubernetes-addons/aws-vpc-cni/README.md index bbcb9ba407..2ece8bb18b 100644 --- a/modules/kubernetes-addons/aws-vpc-cni/README.md +++ b/modules/kubernetes-addons/aws-vpc-cni/README.md @@ -1,17 +1,21 @@ # vpc-cni + [vpc-cni](https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html) The Amazon VPC CNI plugin for Kubernetes is the networking plugin for pod networking in Amazon EKS clusters. The plugin is responsible for allocating VPC IP addresses to Kubernetes nodes and configuring the necessary networking for pods on each node. - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -38,5 +42,4 @@ No requirements. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/aws-vpc-cni/outputs.tf b/modules/kubernetes-addons/aws-vpc-cni/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/aws-vpc-cni/versions.tf b/modules/kubernetes-addons/aws-vpc-cni/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/aws-vpc-cni/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/cert-manager/README.md b/modules/kubernetes-addons/cert-manager/README.md index 750cf3b687..e15fc8b953 100644 --- a/modules/kubernetes-addons/cert-manager/README.md +++ b/modules/kubernetes-addons/cert-manager/README.md @@ -10,24 +10,25 @@ Cert Manager adds certificates and certificate issuers as resource types in Kube See the [cert-manager documentation](https://cert-manager.io/docs/installation/helm/). - # Docker Image for Cert Manager cert-manager docker image is available at this repo: https://quay.io/repository/jetstack/cert-manager-controller?tag=latest&tab=tags - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [helm](#requirement\_helm) | >= 2.4.1 | ## Providers | Name | Version | |------|---------| -| [helm](#provider\_helm) | n/a | +| [helm](#provider\_helm) | >= 2.4.1 | ## Modules @@ -54,5 +55,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/cert-manager/versions.tf b/modules/kubernetes-addons/cert-manager/versions.tf new file mode 100644 index 0000000000..278a4fbb4d --- /dev/null +++ b/modules/kubernetes-addons/cert-manager/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } +} diff --git a/modules/kubernetes-addons/cluster-autoscaler/README.md b/modules/kubernetes-addons/cluster-autoscaler/README.md index c9c48de5c1..7154ca66df 100644 --- a/modules/kubernetes-addons/cluster-autoscaler/README.md +++ b/modules/kubernetes-addons/cluster-autoscaler/README.md @@ -1,17 +1,18 @@ # Cluster Autoscaler Helm Chart - - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -39,5 +40,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/cluster-autoscaler/versions.tf b/modules/kubernetes-addons/cluster-autoscaler/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/cluster-autoscaler/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/crossplane/README.md b/modules/kubernetes-addons/crossplane/README.md index 1050c20f15..86be4d63fd 100644 --- a/modules/kubernetes-addons/crossplane/README.md +++ b/modules/kubernetes-addons/crossplane/README.md @@ -1,14 +1,16 @@ # Crossplane + Crossplane is an open source Kubernetes add-on that enables platform teams to assemble infrastructure from multiple vendors, and expose higher level self-service APIs for application teams to consume, without having to write any code. - - Crossplane is a control plane - - Allow engineers to model their infrastructure as declarative configuration - - Support managing a myriad of diverse infrastructure using "provider" plugins - - It's an open source tool with strong communities +- Crossplane is a control plane +- Allow engineers to model their infrastructure as declarative configuration +- Support managing a myriad of diverse infrastructure using "provider" plugins +- It's an open source tool with strong communities Please find more details from [Crossplane](https://crossplane.io/) ## Usage + Crossplane Add-on can be deployed as follows ```hcl @@ -17,28 +19,30 @@ Crossplane Add-on can be deployed as follows This module allows you to deploy the following AWS providers for Crossplane. These providers disabled by default. - - [AWS Provider](https://github.com/crossplane/provider-aws) - - [Provider Jet AWS](https://github.com/crossplane-contrib/provider-jet-aws) +- [AWS Provider](https://github.com/crossplane/provider-aws) +- [Provider Jet AWS](https://github.com/crossplane-contrib/provider-jet-aws) Refer to [docs](../../../docs/add-ons/crossplane.md) on how to deploy AWS Providers. -___ - + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [kubectl](#requirement\_kubectl) | >= 1.13.1 | +| [aws](#requirement\_aws) | >= 3.72 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [time](#requirement\_time) | >= 0.7 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [kubectl](#provider\_kubectl) | >= 1.13.1 | -| [kubernetes](#provider\_kubernetes) | n/a | -| [time](#provider\_time) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [kubectl](#provider\_kubectl) | >= 1.14 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | +| [time](#provider\_time) | >= 0.7 | ## Modules @@ -78,5 +82,4 @@ ___ ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/crossplane/main.tf b/modules/kubernetes-addons/crossplane/main.tf index d001dcd7b8..0f8568520a 100644 --- a/modules/kubernetes-addons/crossplane/main.tf +++ b/modules/kubernetes-addons/crossplane/main.tf @@ -3,7 +3,7 @@ resource "kubernetes_namespace_v1" "crossplane" { name = local.namespace labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/crossplane/outputs.tf b/modules/kubernetes-addons/crossplane/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/crossplane/versions.tf b/modules/kubernetes-addons/crossplane/versions.tf index 1141ad38b0..96ea58d622 100644 --- a/modules/kubernetes-addons/crossplane/versions.tf +++ b/modules/kubernetes-addons/crossplane/versions.tf @@ -2,9 +2,21 @@ terraform { required_version = ">= 1.0.0" required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } kubectl = { source = "gavinbunney/kubectl" - version = ">= 1.13.1" + version = ">= 1.14" + } + time = { + source = "hashicorp/time" + version = ">= 0.7" } } } diff --git a/modules/kubernetes-addons/external-dns/README.md b/modules/kubernetes-addons/external-dns/README.md index 04fa88f9fa..6e283a2c78 100644 --- a/modules/kubernetes-addons/external-dns/README.md +++ b/modules/kubernetes-addons/external-dns/README.md @@ -6,16 +6,19 @@ For complete project documentation, please visit the [ExternalDNS Github repository](https://github.com/kubernetes-sigs/external-dns). - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -47,5 +50,4 @@ No requirements. |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with GitOps | | [zone\_filter\_ids](#output\_zone\_filter\_ids) | Zone Filter Ids for the add-on | - - + diff --git a/modules/kubernetes-addons/external-dns/main.tf b/modules/kubernetes-addons/external-dns/main.tf index 4a70c1a358..095e4d3e0d 100644 --- a/modules/kubernetes-addons/external-dns/main.tf +++ b/modules/kubernetes-addons/external-dns/main.tf @@ -1,6 +1,6 @@ -//------------------------------------- -// Helm Add-on -//------------------------------------- +#------------------------------------- +# Helm Add-on +#------------------------------------- module "helm_addon" { source = "../helm-addon" @@ -11,9 +11,9 @@ module "helm_addon" { manage_via_gitops = var.manage_via_gitops } -//------------------------------------ -// IAM Policy -//------------------------------------ +#------------------------------------ +# IAM Policy +#------------------------------------ resource "aws_iam_policy" "external_dns" { description = "External DNS IAM policy." diff --git a/modules/kubernetes-addons/external-dns/versions.tf b/modules/kubernetes-addons/external-dns/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/external-dns/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/fargate-fluentbit/README.md b/modules/kubernetes-addons/fargate-fluentbit/README.md index 4107599021..c5b760ceeb 100644 --- a/modules/kubernetes-addons/fargate-fluentbit/README.md +++ b/modules/kubernetes-addons/fargate-fluentbit/README.md @@ -18,6 +18,7 @@ You can stream logs from Fargate directly to `Amazon CloudWatch`, `Amazon OpenSe You can also stream logs to destinations such as `Amazon S3`, `Amazon Kinesis Data Streams`, and partner tools through Amazon Kinesis Data Firehose. ## Fluent Bit CloudWatch Config + Please find the updated configuration from [AWS Docs](https://docs.aws.amazon.com/eks/latest/userguide/fargate-logging.html) ```hcl @@ -58,16 +59,19 @@ Please find the updated configuration from [AWS Docs](https://docs.aws.amazon.co } ``` - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -90,5 +94,4 @@ No modules. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/fargate-fluentbit/main.tf b/modules/kubernetes-addons/fargate-fluentbit/main.tf index 8b750bd590..ae52e0035e 100755 --- a/modules/kubernetes-addons/fargate-fluentbit/main.tf +++ b/modules/kubernetes-addons/fargate-fluentbit/main.tf @@ -8,7 +8,7 @@ resource "kubernetes_namespace" "aws_observability" { labels = { aws-observability = "enabled" - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/fargate-fluentbit/outputs.tf b/modules/kubernetes-addons/fargate-fluentbit/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/fargate-fluentbit/versions.tf b/modules/kubernetes-addons/fargate-fluentbit/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/fargate-fluentbit/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/helm-addon/README.md b/modules/kubernetes-addons/helm-addon/README.md index 79ee72cb84..6ac3627d6d 100644 --- a/modules/kubernetes-addons/helm-addon/README.md +++ b/modules/kubernetes-addons/helm-addon/README.md @@ -7,16 +7,19 @@ Helm Addon module can be used to provision a generic Helm Chart as an Add-On for 1. Create an IAM role for Service Accounts with the provided configuration for the [`irsa`](./../../irsa) module. 2. If `manage_via_gitops` is set to `false`, provision the helm chart for the add-on based on the configuration provided for the `helm_config` as defined in the [helm provider](https://registry.terraform.io/providers/hashicorp/helm/latest/docs) documentation. - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [helm](#requirement\_helm) | >= 2.4.1 | ## Providers | Name | Version | |------|---------| -| [helm](#provider\_helm) | n/a | +| [helm](#provider\_helm) | >= 2.4.1 | ## Modules @@ -44,5 +47,4 @@ No requirements. ## Outputs No outputs. - - + diff --git a/modules/kubernetes-addons/helm-addon/locals.tf b/modules/kubernetes-addons/helm-addon/locals.tf deleted file mode 100644 index a0ca0b851a..0000000000 --- a/modules/kubernetes-addons/helm-addon/locals.tf +++ /dev/null @@ -1,4 +0,0 @@ -terraform { - experiments = [module_variable_optional_attrs] -} - diff --git a/modules/kubernetes-addons/helm-addon/outputs.tf b/modules/kubernetes-addons/helm-addon/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/helm-addon/versions.tf b/modules/kubernetes-addons/helm-addon/versions.tf new file mode 100644 index 0000000000..b07efcd9f6 --- /dev/null +++ b/modules/kubernetes-addons/helm-addon/versions.tf @@ -0,0 +1,12 @@ +terraform { + required_version = ">= 1.0.0" + + experiments = [module_variable_optional_attrs] + + required_providers { + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } +} diff --git a/modules/kubernetes-addons/ingress-nginx/README.md b/modules/kubernetes-addons/ingress-nginx/README.md index 222f422158..aef01113f1 100644 --- a/modules/kubernetes-addons/ingress-nginx/README.md +++ b/modules/kubernetes-addons/ingress-nginx/README.md @@ -2,21 +2,22 @@ ## Introduction - Nginx Ingress Controller is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. - For more details [Ingress-Nginx can be found here](https://kubernetes.github.io/ingress-nginx/) +Nginx Ingress Controller is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. +For more details [Ingress-Nginx can be found here](https://kubernetes.github.io/ingress-nginx/) -### AWS Service annotations for Nginx Ingress Controller - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -43,5 +44,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/ingress-nginx/main.tf b/modules/kubernetes-addons/ingress-nginx/main.tf index 0c99a7d1be..76463356db 100644 --- a/modules/kubernetes-addons/ingress-nginx/main.tf +++ b/modules/kubernetes-addons/ingress-nginx/main.tf @@ -1,6 +1,6 @@ -//------------------------------------- -// Helm Add-on -//------------------------------------- +#------------------------------------- +# Helm Add-on +#------------------------------------- module "helm_addon" { source = "../helm-addon" @@ -12,16 +12,16 @@ module "helm_addon" { depends_on = [kubernetes_namespace_v1.this] } -//------------------------------------- -// Helm Namespace -//------------------------------------- +#------------------------------------- +# Helm Namespace +#------------------------------------- resource "kubernetes_namespace_v1" "this" { metadata { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/ingress-nginx/versions.tf b/modules/kubernetes-addons/ingress-nginx/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/ingress-nginx/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/karpenter/README.md b/modules/kubernetes-addons/karpenter/README.md index c2b46a3f35..feee2e1da7 100644 --- a/modules/kubernetes-addons/karpenter/README.md +++ b/modules/kubernetes-addons/karpenter/README.md @@ -4,17 +4,19 @@ Karpenter is an open-source node provisioning project built for Kubernetes. Karp For more details checkout [Karpenter](https://karpenter.sh/docs/getting-started/) docs - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -44,5 +46,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/karpenter/versions.tf b/modules/kubernetes-addons/karpenter/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/karpenter/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/keda/README.md b/modules/kubernetes-addons/keda/README.md index ee89b45225..927cb7517b 100644 --- a/modules/kubernetes-addons/keda/README.md +++ b/modules/kubernetes-addons/keda/README.md @@ -1,12 +1,13 @@ # KEDA Helm Chart ## What is Keda + [KEDA](https://keda.sh/) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA is a single-purpose and lightweight component that can be added into any Kubernetes cluster. KEDA works alongside standard Kubernetes components like the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) and can extend functionality without overwriting or duplication. With KEDA you can explicitly map the apps you want to use event-driven scale, with other apps continuing to function. This makes KEDA a flexible and safe option to run alongside any number of any other Kubernetes applications or frameworks - ## WARNING + Sometimes kubernetes namespace `keda-ns` created by this helm chart failed to delete due to the [defect](https://github.com/kedacore/keda/issues/1231) Workaround is to run the following command manually to clean up the namespace and rerun the `terraform apply` @@ -15,17 +16,19 @@ Workaround is to run the following command manually to clean up the namespace an kubectl delete apiservice v1beta1.external.metrics.k8s.io ``` - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -54,5 +57,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/keda/versions.tf b/modules/kubernetes-addons/keda/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/kubernetes-addons/keda/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/modules/kubernetes-addons/kubernetes-dashboard/README.md b/modules/kubernetes-addons/kubernetes-dashboard/README.md index f958f0fc8f..e9ed049d75 100644 --- a/modules/kubernetes-addons/kubernetes-dashboard/README.md +++ b/modules/kubernetes-addons/kubernetes-dashboard/README.md @@ -6,19 +6,19 @@ This add-on bootstraps the Kubernetes Dashboard on the EKS cluster using a [helm chart](https://artifacthub.io/packages/helm/k8s-dashboard/kubernetes-dashboard) with the default configuration. - + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [kubectl](#requirement\_kubectl) | >= 1.13.1 | +| [kubectl](#requirement\_kubectl) | >= 1.14 | ## Providers | Name | Version | |------|---------| -| [kubectl](#provider\_kubectl) | >= 1.13.1 | +| [kubectl](#provider\_kubectl) | >= 1.14 | ## Modules @@ -45,5 +45,4 @@ This add-on bootstraps the Kubernetes Dashboard on the EKS cluster using a [helm | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/kubernetes-dashboard/versions.tf b/modules/kubernetes-addons/kubernetes-dashboard/versions.tf index 1141ad38b0..5e3b002b42 100644 --- a/modules/kubernetes-addons/kubernetes-dashboard/versions.tf +++ b/modules/kubernetes-addons/kubernetes-dashboard/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { kubectl = { source = "gavinbunney/kubectl" - version = ">= 1.13.1" + version = ">= 1.14" } } } diff --git a/modules/kubernetes-addons/main.tf b/modules/kubernetes-addons/main.tf index 9a88aa6a42..4b1bf27b56 100644 --- a/modules/kubernetes-addons/main.tf +++ b/modules/kubernetes-addons/main.tf @@ -277,7 +277,6 @@ module "yunikorn" { count = var.enable_yunikorn ? 1 : 0 source = "./yunikorn" helm_config = var.yunikorn_helm_config - irsa_policies = var.yunikorn_irsa_policies manage_via_gitops = var.argocd_manage_add_ons addon_context = local.addon_context } diff --git a/modules/kubernetes-addons/metrics-server/README.md b/modules/kubernetes-addons/metrics-server/README.md index 4707773f7c..de106e1f99 100644 --- a/modules/kubernetes-addons/metrics-server/README.md +++ b/modules/kubernetes-addons/metrics-server/README.md @@ -1,15 +1,18 @@ # Metrics Server Helm Chart - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -36,5 +39,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/metrics-server/main.tf b/modules/kubernetes-addons/metrics-server/main.tf index 665de9a0c3..c61ed3526b 100644 --- a/modules/kubernetes-addons/metrics-server/main.tf +++ b/modules/kubernetes-addons/metrics-server/main.tf @@ -14,7 +14,7 @@ resource "kubernetes_namespace_v1" "this" { metadata { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/metrics-server/versions.tf b/modules/kubernetes-addons/metrics-server/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/metrics-server/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/outputs.tf b/modules/kubernetes-addons/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/kubernetes-addons/prometheus/README.md b/modules/kubernetes-addons/prometheus/README.md index 371313b091..89b3fb6047 100644 --- a/modules/kubernetes-addons/prometheus/README.md +++ b/modules/kubernetes-addons/prometheus/README.md @@ -1,12 +1,12 @@ # Prometheus Helm Chart -###### Instructions to upload Prometheus Docker image to AWS ECR +## Instructions to upload Prometheus Docker image to AWS ECR -Step1: Get the latest docker image from this link +Step 1: Get the latest docker image from this link https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus/values.yaml -Step2: Download the docker image to your local Mac/Laptop +Step 2: Download the docker image to your local Mac/Laptop $ docker pull quay.io/prometheus/prometheus:v2.31.1 $ docker pull quay.io/prometheus/alertmanager:v0.23.0 @@ -14,12 +14,11 @@ Step2: Download the docker image to your local Mac/Laptop $ docker pull quay.io/prometheus/node-exporter:v1.3.0 $ docker pull prom/pushgateway:v1.4.2 - -Step3: Retrieve an authentication token and authenticate your Docker client to your registry. Use the AWS CLI: +Step 3: Retrieve an authentication token and authenticate your Docker client to your registry. Use the AWS CLI: $ aws ecr get-login-password --region eu-west-1 | docker login --username AWS --password-stdin .dkr.ecr.eu-west-1.amazonaws.com -Step4: Create an ECR repo for each image mentioned in Step2 with the same in ECR. See example for +Step 4: Create an ECR repo for each image mentioned in Step 2 with the same in ECR. See example for $ aws ecr create-repository \ --repository-name quay.io/prometheus/prometheus \ @@ -27,36 +26,33 @@ Step4: Create an ECR repo for each image mentioned in Step2 with the same in ECR Repeat the above steps for other 4 images -Step5: After the build completes, tag your image so, you can push the image to this repository: +Step 5: After the build completes, tag your image so, you can push the image to this repository: $ docker tag quay.io/prometheus/prometheus:v2.31.1 .dkr.ecr.eu-west-1.amazonaws.com/quay.io/prometheus/prometheus:v2.31.1 Repeat the above steps for other 4 images -Step6: Run the following command to push this image to your newly created AWS repository: +Step 6: Run the following command to push this image to your newly created AWS repository: $ docker push .dkr.ecr.eu-west-1.amazonaws.com/quay.io/prometheus/prometheus:v2.31.1 Repeat the above steps for other 4 images -### Instructions to download Helm Charts - -Helm Chart - - https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus/values.yaml - - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 3.72 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -91,5 +87,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/prometheus/locals.tf b/modules/kubernetes-addons/prometheus/locals.tf index 21507673d5..7d0f25cf54 100644 --- a/modules/kubernetes-addons/prometheus/locals.tf +++ b/modules/kubernetes-addons/prometheus/locals.tf @@ -15,10 +15,6 @@ locals { var.helm_config ) - default_helm_values = [templatefile("${path.module}/values.yaml", { - operating_system = "linux", - })] - amazon_prometheus_workspace_url = var.amazon_prometheus_workspace_endpoint != null ? "${var.amazon_prometheus_workspace_endpoint}api/v1/remote_write" : "" amazon_prometheus_ingest_iam_role_arn = var.enable_amazon_prometheus ? module.irsa_amp_ingest[0].irsa_iam_role_arn : "" amazon_prometheus_ingest_service_account = "amp-ingest" diff --git a/modules/kubernetes-addons/prometheus/main.tf b/modules/kubernetes-addons/prometheus/main.tf index bab3eef3c6..8771963918 100644 --- a/modules/kubernetes-addons/prometheus/main.tf +++ b/modules/kubernetes-addons/prometheus/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "prometheus" { metadata { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/prometheus/versions.tf b/modules/kubernetes-addons/prometheus/versions.tf new file mode 100644 index 0000000000..d2ddf87cc2 --- /dev/null +++ b/modules/kubernetes-addons/prometheus/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/spark-k8s-operator/README.md b/modules/kubernetes-addons/spark-k8s-operator/README.md index c84f2a5a60..ab713a9468 100644 --- a/modules/kubernetes-addons/spark-k8s-operator/README.md +++ b/modules/kubernetes-addons/spark-k8s-operator/README.md @@ -1,20 +1,21 @@ # Spark on K8s Operator + The Kubernetes Operator for Apache Spark aims to make specifying and running Spark applications as easy and idiomatic as running other workloads on Kubernetes. It uses Kubernetes custom resources for specifying, running, and surfacing status of Spark applications. - -#### AWS Service annotations for Nginx Ingress Controller - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -41,5 +42,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/spark-k8s-operator/main.tf b/modules/kubernetes-addons/spark-k8s-operator/main.tf index ccb30fdb6f..f622028c10 100644 --- a/modules/kubernetes-addons/spark-k8s-operator/main.tf +++ b/modules/kubernetes-addons/spark-k8s-operator/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "this" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/spark-k8s-operator/versions.tf b/modules/kubernetes-addons/spark-k8s-operator/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/spark-k8s-operator/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/traefik/README.md b/modules/kubernetes-addons/traefik/README.md index 6216fb2a6b..8f199e3ce5 100644 --- a/modules/kubernetes-addons/traefik/README.md +++ b/modules/kubernetes-addons/traefik/README.md @@ -2,19 +2,21 @@ ## Introduction - Traefik is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. Fore more detials about [Traefik can be found here](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) +Traefik is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. Fore more detials about [Traefik can be found here](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -41,5 +43,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/traefik/main.tf b/modules/kubernetes-addons/traefik/main.tf index ccb30fdb6f..f622028c10 100644 --- a/modules/kubernetes-addons/traefik/main.tf +++ b/modules/kubernetes-addons/traefik/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "this" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/traefik/versions.tf b/modules/kubernetes-addons/traefik/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/traefik/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/variables.tf b/modules/kubernetes-addons/variables.tf index b2e6d6a056..999e53f857 100644 --- a/modules/kubernetes-addons/variables.tf +++ b/modules/kubernetes-addons/variables.tf @@ -21,12 +21,6 @@ variable "auto_scaling_group_names" { type = list(string) } -variable "node_groups_iam_role_arn" { - type = list(string) - default = [] - description = "Node Groups IAM role ARNs" -} - variable "tags" { type = map(string) default = {} @@ -622,12 +616,6 @@ variable "kubernetes_dashboard_helm_config" { description = "Kubernetes Dashboard Helm Chart config" } -variable "kubernetes_dashboard_irsa_policies" { - type = list(string) - default = [] - description = "IAM policy ARNs for Kubernetes Dashboard IRSA" -} - #-----------HashiCorp Vault------------- variable "enable_vault" { type = bool @@ -667,12 +655,6 @@ variable "yunikorn_helm_config" { description = "YuniKorn Helm Chart config" } -variable "yunikorn_irsa_policies" { - type = list(string) - default = [] - description = "IAM policy ARNs for Yunikorn IRSA" -} - #-----------AWS PCA ISSUER------------- variable "enable_aws_privateca_issuer" { type = bool @@ -696,4 +678,4 @@ variable "aws_privateca_issuer_irsa_policies" { type = list(string) default = [] description = "IAM policy ARNs for AWS ACM PCA IRSA" -} \ No newline at end of file +} diff --git a/modules/kubernetes-addons/versions.tf b/modules/kubernetes-addons/versions.tf index 25108d7bd5..f92f41b9e7 100644 --- a/modules/kubernetes-addons/versions.tf +++ b/modules/kubernetes-addons/versions.tf @@ -1,25 +1,10 @@ terraform { + required_version = ">= 1.0.0" + required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.66.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.7.1" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - local = { - source = "hashicorp/local" - version = "2.1.0" - } - null = { - source = "hashicorp/null" - version = "3.1.0" + version = ">= 3.72" } } - required_version = ">= 1.0.0" } diff --git a/modules/kubernetes-addons/vpa/README.md b/modules/kubernetes-addons/vpa/README.md index 9c1cf5f4fd..258d3626cd 100644 --- a/modules/kubernetes-addons/vpa/README.md +++ b/modules/kubernetes-addons/vpa/README.md @@ -1,23 +1,27 @@ # Vertical Pod Autoscaling (VPA) ## What is VPA + [VPA](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) Vertical Pod Autoscaler (VPA) frees the users from necessity of setting up-to-date resource limits and requests for the containers in their pods. When configured, it will set the requests automatically based on usage and thus allow proper scheduling onto nodes so that appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial containers configuration. [VPA Helm Chart Repo](https://github.com/FairwindsOps/charts/blob/master/stable/vpa/values.yaml) ## Prerequisites - - Metrics Server Helm chart installed +- Metrics Server Helm chart installed - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -44,5 +48,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/vpa/main.tf b/modules/kubernetes-addons/vpa/main.tf index 8f3f050d4b..b84e6ded4f 100644 --- a/modules/kubernetes-addons/vpa/main.tf +++ b/modules/kubernetes-addons/vpa/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "vpa" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/vpa/versions.tf b/modules/kubernetes-addons/vpa/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/vpa/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/kubernetes-addons/yunikorn/README.md b/modules/kubernetes-addons/yunikorn/README.md index ac5cb57c47..5d7bed5d42 100644 --- a/modules/kubernetes-addons/yunikorn/README.md +++ b/modules/kubernetes-addons/yunikorn/README.md @@ -1,23 +1,26 @@ # YuniKorn Helm Chart ## What is YuniKorn + [YuniKorn](https://yunikorn.apache.org/) Resource Scheduler for K8s. Fully K8s compatible, an alternative of the default K8s scheduler, but more powerful. Transparent for the existing K8s applications. [Apache YuniKorn (Incubating)](https://yunikorn.apache.org/) is a new Apache incubator project that offers rich scheduling capabilities on Kubernetes. It fills the scheduling gap while running Big Data workloads on Kubernetes, with a ton of useful features such as hierarchical queues, elastic queue quotas, resource fairness, and job ordering. -Helm Chart Repo https://github.com/apache/incubator-yunikorn-release/tree/master/helm-charts - +Helm Chart Repo https://github.com/apache/incubator-yunikorn-release/tree/master/helm-charts - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [kubernetes](#provider\_kubernetes) | n/a | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules @@ -37,7 +40,6 @@ No requirements. |------|-------------|------|---------|:--------:| | [addon\_context](#input\_addon\_context) | Input configuration for the addon |
object({
aws_caller_identity_account_id = string
aws_caller_identity_arn = string
aws_eks_cluster_endpoint = string
aws_partition_id = string
aws_region_name = string
eks_cluster_id = string
eks_oidc_issuer_url = string
eks_oidc_provider_arn = string
tags = map(string)
irsa_iam_role_path = string
irsa_iam_permissions_boundary = string
})
| n/a | yes | | [helm\_config](#input\_helm\_config) | Helm provider config for Yunikorn | `any` | `{}` | no | -| [irsa\_policies](#input\_irsa\_policies) | IAM Policy ARN list for any IRSA policies | `list(string)` | `[]` | no | | [manage\_via\_gitops](#input\_manage\_via\_gitops) | Determines if the add-on should be managed via GitOps | `bool` | `false` | no | ## Outputs @@ -45,5 +47,4 @@ No requirements. | Name | Description | |------|-------------| | [argocd\_gitops\_config](#output\_argocd\_gitops\_config) | Configuration used for managing the add-on with ArgoCD | - - + diff --git a/modules/kubernetes-addons/yunikorn/main.tf b/modules/kubernetes-addons/yunikorn/main.tf index 91f7462a07..0e11d4c556 100644 --- a/modules/kubernetes-addons/yunikorn/main.tf +++ b/modules/kubernetes-addons/yunikorn/main.tf @@ -13,7 +13,7 @@ resource "kubernetes_namespace_v1" "yunikorn" { name = local.helm_config["namespace"] labels = { - "app.kubernetes.io/managed-by" = "terraform-eks-blueprints" + "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" } } } diff --git a/modules/kubernetes-addons/yunikorn/variables.tf b/modules/kubernetes-addons/yunikorn/variables.tf index f3095d2cee..6d43696b77 100644 --- a/modules/kubernetes-addons/yunikorn/variables.tf +++ b/modules/kubernetes-addons/yunikorn/variables.tf @@ -10,12 +10,6 @@ variable "manage_via_gitops" { description = "Determines if the add-on should be managed via GitOps" } -variable "irsa_policies" { - type = list(string) - default = [] - description = "IAM Policy ARN list for any IRSA policies" -} - variable "addon_context" { type = object({ aws_caller_identity_account_id = string diff --git a/modules/kubernetes-addons/yunikorn/versions.tf b/modules/kubernetes-addons/yunikorn/versions.tf new file mode 100644 index 0000000000..55fba733ab --- /dev/null +++ b/modules/kubernetes-addons/yunikorn/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/modules/launch-templates/README.md b/modules/launch-templates/README.md index 01dc9b56f9..900fc92fed 100644 --- a/modules/launch-templates/README.md +++ b/modules/launch-templates/README.md @@ -1,7 +1,9 @@ # AWS Launch Templates + This module used to create Launch Templates for Node groups or Karpenter. ## Usage + This example shows how to consume the `launch-templates` module. See this full [example](../../examples/karpenter/main.tf). ```hcl @@ -90,17 +92,19 @@ module "launch_templates" { } ``` - - + ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 3.72 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | n/a | +| [aws](#provider\_aws) | >= 3.72 | ## Modules @@ -112,7 +116,6 @@ No modules. |------|------| | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | | [aws_eks_cluster.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs @@ -132,5 +135,4 @@ No modules. | [launch\_template\_image\_id](#output\_launch\_template\_image\_id) | Launch Template Image IDs | | [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | Launch Template Latest Versions | | [launch\_template\_name](#output\_launch\_template\_name) | Launch Template Names | - - + diff --git a/modules/launch-templates/data.tf b/modules/launch-templates/data.tf index 7385f5d0e0..81be7d557f 100644 --- a/modules/launch-templates/data.tf +++ b/modules/launch-templates/data.tf @@ -1,7 +1,3 @@ data "aws_eks_cluster" "eks" { name = var.eks_cluster_id } - -data "aws_eks_cluster_auth" "eks" { - name = var.eks_cluster_id -} diff --git a/modules/launch-templates/versions.tf b/modules/launch-templates/versions.tf new file mode 100644 index 0000000000..f92f41b9e7 --- /dev/null +++ b/modules/launch-templates/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + } +} diff --git a/outputs.tf b/outputs.tf index 1cfcfc05ea..7b5edee18a 100644 --- a/outputs.tf +++ b/outputs.tf @@ -3,51 +3,60 @@ #------------------------------- output "eks_cluster_id" { description = "Amazon EKS Cluster Name" - value = try(module.aws_eks.cluster_id, "EKS Cluster not enabled") + value = module.aws_eks.cluster_id +} + +output "eks_cluster_certificate_authority_data" { + description = "Base64 encoded certificate data required to communicate with the cluster" + value = module.aws_eks.cluster_certificate_authority_data +} + +output "eks_cluster_endpoint" { + description = "Endpoint for your Kubernetes API server" + value = module.aws_eks.cluster_endpoint } output "eks_oidc_issuer_url" { description = "The URL on the EKS cluster OIDC Issuer" - value = try(split("//", module.aws_eks.cluster_oidc_issuer_url)[1], "EKS Cluster not enabled") + value = try(split("//", module.aws_eks.cluster_oidc_issuer_url)[1], "EKS Cluster not enabled") # TODO - remove `split()` since `oidc_provider` coverss https:// removal } output "oidc_provider" { description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = try(module.aws_eks.oidc_provider, "EKS Cluster not enabled") + value = module.aws_eks.oidc_provider } output "eks_oidc_provider_arn" { description = "The ARN of the OIDC Provider if `enable_irsa = true`." - value = try(module.aws_eks.oidc_provider_arn, "EKS Cluster not enabled") + value = module.aws_eks.oidc_provider_arn } output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = try("aws eks --region ${local.context.aws_region_name} update-kubeconfig --name ${module.aws_eks.cluster_id}", "EKS Cluster not enabled") + value = "aws eks --region ${local.context.aws_region_name} update-kubeconfig --name ${module.aws_eks.cluster_id}" } output "eks_cluster_status" { description = "Amazon EKS Cluster Name" - value = try(module.aws_eks.cluster_status, "EKS Cluster not enabled") + value = module.aws_eks.cluster_status } - #------------------------------- # Cluster Security Group #------------------------------- output "cluster_primary_security_group_id" { description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = try(module.aws_eks.cluster_primary_security_group_id, "EKS Cluster not enabled") + value = module.aws_eks.cluster_primary_security_group_id } output "cluster_security_group_id" { description = "EKS Control Plane Security Group ID" - value = try(module.aws_eks.cluster_security_group_id, "EKS Cluster not enabled") + value = module.aws_eks.cluster_security_group_id } output "cluster_security_group_arn" { description = "Amazon Resource Name (ARN) of the cluster security group" - value = try(module.aws_eks.cluster_security_group_arn, "EKS Cluster not enabled") + value = module.aws_eks.cluster_security_group_arn } #------------------------------- @@ -95,6 +104,7 @@ output "windows_node_group_aws_auth_config_map" { description = "Windows node groups AWS auth map" value = local.windows_node_group_aws_auth_config_map.* } + #------------------------------- # Managed Node Groups Outputs #------------------------------- diff --git a/test/README.md b/test/README.md index f62852d511..927f159a04 100644 --- a/test/README.md +++ b/test/README.md @@ -1,19 +1,20 @@ # Terratest for EKS Blueprints ## Configure and run Terratest + The following steps can be used to configure Go lang and run Terratests locally(Mac/Windows machine)). -### Step1: Install +### Step 1: Install [golang](https://go.dev/doc/install) (for macos you can use brew) -### Step2: Change directory into the test folder. +### Step 2: Change directory into the test folder. ```shell cd test ``` -### Step3: Initialize your test +### Step 3: Initialize your test ```shell go mod init github.com/aws-ia/terraform-aws-eks-blueprints @@ -21,7 +22,7 @@ go mod init github.com/aws-ia/terraform-aws-eks-blueprints go mod tidy -go=1.17 ``` -### Step4: Build and Run E2E Test +### Step 4: Build and Run E2E Test ```shell cd src diff --git a/versions.tf b/versions.tf index 1d02db5247..aa47e5554d 100644 --- a/versions.tf +++ b/versions.tf @@ -1,12 +1,14 @@ terraform { + required_version = ">= 1.0.0" + required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.66.0" + version = ">= 3.72" } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.7.1" + version = ">= 2.10" } helm = { source = "hashicorp/helm" @@ -14,11 +16,11 @@ terraform { } local = { source = "hashicorp/local" - version = "2.1.0" + version = ">= 2.1" } null = { source = "hashicorp/null" - version = "3.1.0" + version = ">= 3.1" } http = { source = "terraform-aws-modules/http" @@ -26,9 +28,7 @@ terraform { } kubectl = { source = "gavinbunney/kubectl" - version = ">= 1.7.0" + version = ">= 1.14" } } - - required_version = ">= 1.0.0" }