diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml new file mode 100644 index 0000000..3cbe527 --- /dev/null +++ b/.github/workflows/readme.yml @@ -0,0 +1,54 @@ +name: 'Create README.md file' +on: + push: + branches: + - master + +jobs: + readme-create: + name: 'readme-create' + runs-on: ubuntu-latest + steps: + - name: 'Checkout' + uses: actions/checkout@master + + - name: Set up Python 3.7. + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: 'create readme' + uses: 'clouddrove/github-actions@v8.0' + with: + actions_subcommand: 'readme' + github_token: '${{ secrets.GITHUB}}' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN}} + + + - name: pre-commit check errors + uses: pre-commit/action@v2.0.0 + continue-on-error: true + + - name: pre-commit fix erros + uses: pre-commit/action@v2.0.0 + continue-on-error: true + + - name: 'push readme' + uses: 'clouddrove/github-actions@v8.0' + continue-on-error: true + with: + actions_subcommand: 'push' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN}} + + - name: 'Slack Notification' + uses: clouddrove/action-slack@v2 + with: + status: ${{ job.status }} + fields: repo,author + author_name: 'CloudDrove' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} # required + if: always() diff --git a/.github/workflows/terraform.yml b/.github/workflows/terraform.yml new file mode 100644 index 0000000..0b18e51 --- /dev/null +++ b/.github/workflows/terraform.yml @@ -0,0 +1,112 @@ +name: 'Terraform GitHub Actions' +on: + pull_request: + branches: + - master +jobs: + fmt: + name: 'terraform fmt' + runs-on: ubuntu-latest + steps: + - name: 'Checkout' + uses: actions/checkout@v2.3.4 + + - name: 'Terraform Format' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'fmt' + + auto-scaling: + name: 'auto-scaling' + needs: fmt + runs-on: ubuntu-latest + steps: + - name: 'Checkout' + uses: actions/checkout@v2.3.4 + + - name: 'Configure AWS Credentials' + uses: clouddrove/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.TEST_AWS_ACCESS_KEY }} + aws-secret-access-key: ${{ secrets.TEST_AWS_ACCESS_SECRET_KEY }} + aws-region: us-east-2 + + - name: 'Terraform init for Auto Scale' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'init' + tf_actions_working_dir: ./_example/auto-scaling + + - name: 'Terraform validate for Auto Scale' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'validate' + tf_actions_working_dir: ./_example/auto-scaling + + - name: 'Terraform plan for Auto Scale' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'plan' + tf_actions_working_dir: ./_example/auto-scaling + fargate: + name: 'fargate' + needs: fmt + runs-on: ubuntu-latest + steps: + + - name: 'Checkout' + uses: actions/checkout@v2.3.4 + + - name: 'Configure AWS Credentials' + uses: clouddrove/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.TEST_AWS_ACCESS_KEY }} + aws-secret-access-key: ${{ secrets.TEST_AWS_ACCESS_SECRET_KEY }} + aws-region: us-east-2 + + - name: 'Terraform init for fargate' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'init' + tf_actions_working_dir: ./_example/fargate + + - name: 'Terraform validate for fargate' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'validate' + tf_actions_working_dir: ./_example/fargate + + - name: 'Terraform plan for fargate' + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'plan' + tf_actions_working_dir: ./_example/fargate + + pre-commit: + name: 'Pre-Commit' + needs: + - fmt + - auto-scaling + - fargate + runs-on: ubuntu-latest + steps: + - name: 'Checkout' + uses: actions/checkout@v2.3.4 + + - name: 'Install Tflint' + run: | + curl https://raw.githubusercontent.com/terraform-linters/tflint/master/install_linux.sh | bash + - name: 'Pre-Commit 🔎' + uses: pre-commit/action@v2.0.3 + continue-on-error: true + + - name: 'Slack Notification' + uses: clouddrove/action-slack@v2 + with: + status: ${{ job.status }} + fields: repo,author + author_name: 'CloudDrove' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} # required + if: always() diff --git a/.github/workflows/terratest.yml b/.github/workflows/terratest.yml new file mode 100644 index 0000000..5e41e28 --- /dev/null +++ b/.github/workflows/terratest.yml @@ -0,0 +1,51 @@ +name: 'Terratest GitHub Actions' +on: + pull_request: + branches: + - master + types: [labeled] + +jobs: + terraform: + name: 'Terraform' + runs-on: ubuntu-latest + steps: + + - name: 'Checkout' + uses: actions/checkout@master + + - name: Configure AWS Credentials + uses: clouddrove/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.TEST_AWS_ACCESS_KEY }} + aws-secret-access-key: ${{ secrets.TEST_AWS_ACCESS_SECRET_KEY }} + aws-region: us-east-2 + + - name: 'Terratest auto-scaling' + if: ${{ github.event.label.name == 'terratest' }} + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'terratest' + tf_actions_working_dir: '_test/auto-scaling' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: 'Terratest fargate' + if: ${{ github.event.label.name == 'terratest' }} + uses: 'clouddrove/github-actions@v7.0' + with: + actions_subcommand: 'terratest' + tf_actions_working_dir: '_test/fargate' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: 'Slack Notification' + uses: clouddrove/action-slack@v2 + with: + status: ${{ job.status }} + fields: repo,author + author_name: 'CloudDrove' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} # required + if: always() diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c78b7d1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,42 @@ +eks-admin-cluster-role-binding.yaml +eks-admin-service-account.yaml +config-map-aws-auth*.yaml +kubeconfig_* +.idea + +################################################################# +# Default .gitignore content for all terraform-aws-modules below +################################################################# + +.DS_Store + +# Local .terraform directories +**/.terraform/* + +# Terraform lockfile +.terraform.lock.hcl + +# .tfstate files +*.tfstate +*.tfstate.* +*.tfplan + +# Crash log files +crash.log + +# Exclude all .tfvars files, which are likely to contain sentitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Ignore CLI configuration files +.terraformrc +terraform.rc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..c207dca --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.58.0 + hooks: + - id: terraform_fmt + # - id: terraform_validate + - id: terraform_docs + args: + - '--args=--lockfile=false' + - id: terraform_tflint + args: + - '--args=--only=terraform_deprecated_interpolation' + - '--args=--only=terraform_deprecated_index' + - '--args=--only=terraform_unused_declarations' + - '--args=--only=terraform_comment_syntax' + - '--args=--only=terraform_documented_outputs' + - '--args=--only=terraform_documented_variables' + - '--args=--only=terraform_typed_variables' + - '--args=--only=terraform_module_pinned_source' + # - '--args=--only=terraform_naming_convention' + - '--args=--only=terraform_required_version' + - '--args=--only=terraform_required_providers' + - '--args=--only=terraform_standard_module_structure' + - '--args=--only=terraform_workspace_remote' + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-merge-conflict diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..342f610 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Cloud Drove + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/README.yaml b/README.yaml new file mode 100644 index 0000000..d220895 --- /dev/null +++ b/README.yaml @@ -0,0 +1,175 @@ +--- +# +# This is the canonical configuration for the `README.md` +# Run `make readme` to rebuild the `README.md` +# + +# Name of this project +name: Terraform AWS EKS + +# License of this project +license: "MIT" + +# Canonical GitHub repo +github_repo: clouddrove/terraform-aws-eks + +# Badges to display +badges: + - name: "Terraform" + image: "https://img.shields.io/badge/Terraform-v0.13-green" + url: "https://www.terraform.io" + - name: "Licence" + image: "https://img.shields.io/badge/License-MIT-blue.svg" + url: "LICENSE.md" + +# Prerequesties to display +prerequesties: + - name: "Kubectl" + url: "https://kubernetes.io/docs/tasks/tools/install-kubectl/" + - name: "AWS IAM Authenticator" + url: "https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html" + + +# description of this project +description: |- + Terraform module will be created Autoscaling, Workers, EKS, Node Groups. + +# extra content +include: + - "terraform.md" + +# How to use this project +usage : |- + ### Sample example + Here is an example of how you can use this module in your inventory structure: + ```hcl +module "eks" { + source = "../.." + + name = "eks" + environment = "test" + label_order = ["environment", "name"] + enabled = true + + kubernetes_version = "1.21" + endpoint_private_access = true + endpoint_public_access = true + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + oidc_provider_enabled = true + + # Network + vpc_id = module.vpc.vpc_id + subnet_ids = module.subnets.private_subnet_id + allowed_security_groups = [module.ssh.security_group_ids] + allowed_cidr_blocks = ["0.0.0.0/0"] + + # Node Groups Defaults Values It will Work all Node Groups + self_node_group_defaults = { + subnet_ids = module.subnets.private_subnet_id + key_name = module.keypair.name + propagate_tags = [{ + key = "aws-node-termination-handler/managed" + value = true + propagate_at_launch = true + }, + { + key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" + value = "owned" + propagate_at_launch = true + } + ] + + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 50 + volume_type = "gp3" + iops = 3000 + throughput = 150 + } + } + } + } + + + self_node_groups = { + tools = { + name = "tools" + min_size = 1 + max_size = 7 + desired_size = 2 + bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" + instance_type = "t3a.medium" + } + + spot = { + name = "spot" + instance_market_options = { + market_type = "spot" + } + min_size = 1 + max_size = 7 + desired_size = 1 + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + instance_type = "m5.large" + } + } + + # Node Groups Defaults Values It will Work all Node Groups + managed_node_group_defaults = { + subnet_ids = module.subnets.private_subnet_id + key_name = module.keypair.name + nodes_additional_security_group_ids = [module.ssh.security_group_ids] + tags = { + Example = "test" + } + + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 50 + volume_type = "gp3" + iops = 3000 + throughput = 150 + } + } + } + } + + managed_node_group = { + test = { + min_size = 1 + max_size = 7 + desired_size = 2 + instance_types = ["t3a.medium"] + } + + spot = { + name = "spot" + capacity_type = "SPOT" + + min_size = 1 + max_size = 7 + desired_size = 1 + force_update_version = true + instance_types = ["t3.medium", "t3a.medium"] + } + } + + apply_config_map_aws_auth = true + map_additional_iam_users = [ + { + userarn = "arn:aws:iam::xxxxxx:user/nikita@clouddrove.com" + username = "nikita@clouddrove.com" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::xxxxxx:user/sohan@clouddrove.com" + username = "sohan@clouddrove.com" + groups = ["system:masters"] + } + ] + } + ``` diff --git a/_aws_auth.tf b/_aws_auth.tf new file mode 100644 index 0000000..6739a9f --- /dev/null +++ b/_aws_auth.tf @@ -0,0 +1,114 @@ + + +# The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster +# to allow worker nodes to join the cluster via AWS IAM role authentication. + +# NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster), +# the requirements outlined here must be met: +# https://learn.hashicorp.com/terraform/aws/eks-intro#preparation +# https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks +# https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes + +# Additional links +# https://learn.hashicorp.com/terraform/aws/eks-intro +# https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b +# https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html +# https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html +# https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html +# https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html +# https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb +# http://marcinkaszynski.com/2018/07/12/eks-auth.html +# https://cloud.google.com/kubernetes-engine/docs/concepts/configmap +# http://yaml-multiline.info +# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216 +# https://www.terraform.io/docs/cloud/run/install-software.html +# https://stackoverflow.com/questions/26123740/is-it-possible-to-install-aws-cli-package-without-root-permission +# https://stackoverflow.com/questions/58232731/kubectl-missing-form-terraform-cloud +# https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html +# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html + + +locals { + certificate_authority_data_list = coalescelist(aws_eks_cluster.default.*.certificate_authority, [[{ data : "" }]]) + certificate_authority_data_list_internal = local.certificate_authority_data_list[0] + certificate_authority_data_map = local.certificate_authority_data_list_internal[0] + certificate_authority_data = local.certificate_authority_data_map["data"] + + # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap + # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically + map_worker_roles = [ + { + rolearn : join("", aws_iam_role.node_groups.*.arn) + username : "system:node:{{EC2PrivateDNSName}}" + groups : [ + "system:bootstrappers", + "system:nodes" + ] + } + ] +} + +data "template_file" "kubeconfig" { + count = var.enabled ? 1 : 0 + template = file("${path.module}/_kubeconfig.tpl") + + vars = { + server = join("", aws_eks_cluster.default.*.endpoint) + certificate_authority_data = local.certificate_authority_data + cluster_name = module.labels.id + } +} + +resource "null_resource" "wait_for_cluster" { + count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 + depends_on = [aws_eks_cluster.default[0]] + + provisioner "local-exec" { + command = var.wait_for_cluster_command + interpreter = var.local_exec_interpreter + environment = { + ENDPOINT = aws_eks_cluster.default[0].endpoint + } + } +} + +data "aws_eks_cluster" "eks" { + count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 + name = join("", aws_eks_cluster.default.*.id) +} + +# Get an authentication token to communicate with the EKS cluster. +# By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster. +# `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token. +# If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token. +# https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html +data "aws_eks_cluster_auth" "eks" { + count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 + name = join("", aws_eks_cluster.default.*.id) +} + +provider "kubernetes" { + token = join("", data.aws_eks_cluster_auth.eks.*.token) + host = join("", data.aws_eks_cluster.eks.*.endpoint) + cluster_ca_certificate = base64decode(join("", data.aws_eks_cluster.eks.*.certificate_authority.0.data)) +} + +resource "kubernetes_config_map" "aws_auth_ignore_changes" { + count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 + depends_on = [null_resource.wait_for_cluster[0]] + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = { + mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))) + mapUsers = yamlencode(var.map_additional_iam_users) + mapAccounts = yamlencode(var.map_additional_aws_accounts) + } + + lifecycle { + ignore_changes = [data["mapRoles"]] + } +} diff --git a/_aws_node_groups.tf b/_aws_node_groups.tf new file mode 100644 index 0000000..71c802c --- /dev/null +++ b/_aws_node_groups.tf @@ -0,0 +1,77 @@ +module "eks_managed_node_group" { + source = "./node_group/aws_managed" + + for_each = { for k, v in var.managed_node_group : k => v if var.enabled } + + enabled = try(each.value.enabled, true) + + cluster_name = join("", aws_eks_cluster.default.*.name) + cluster_version = var.kubernetes_version + vpc_security_group_ids = compact( + concat( + aws_security_group.node_group.*.id, + aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id, + var.nodes_additional_security_group_ids + + ) + ) + # EKS Managed Node Group + name = try(each.value.name, each.key) + environment = var.environment + repository = var.repository + subnet_ids = try(each.value.subnet_ids, var.managed_node_group_defaults.subnet_ids, var.subnet_ids) + + min_size = try(each.value.min_size, var.managed_node_group_defaults.min_size, 1) + max_size = try(each.value.max_size, var.managed_node_group_defaults.max_size, 3) + desired_size = try(each.value.desired_size, var.managed_node_group_defaults.desired_size, 1) + + ami_id = try(each.value.ami_id, var.managed_node_group_defaults.ami_id, "") + ami_type = try(each.value.ami_type, var.managed_node_group_defaults.ami_type, null) + ami_release_version = try(each.value.ami_release_version, var.managed_node_group_defaults.ami_release_version, null) + + capacity_type = try(each.value.capacity_type, var.managed_node_group_defaults.capacity_type, null) + disk_size = try(each.value.disk_size, var.managed_node_group_defaults.disk_size, null) + force_update_version = try(each.value.force_update_version, var.managed_node_group_defaults.force_update_version, null) + instance_types = try(each.value.instance_types, var.managed_node_group_defaults.instance_types, null) + labels = try(each.value.labels, var.managed_node_group_defaults.labels, null) + + remote_access = try(each.value.remote_access, var.managed_node_group_defaults.remote_access, {}) + taints = try(each.value.taints, var.managed_node_group_defaults.taints, {}) + update_config = try(each.value.update_config, var.managed_node_group_defaults.update_config, {}) + timeouts = try(each.value.timeouts, var.managed_node_group_defaults.timeouts, {}) + + # Launch Template + launch_template_description = try(each.value.launch_template_description, var.managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.managed_node_group_defaults.launch_template_tags, {}) + + ebs_optimized = try(each.value.ebs_optimized, var.managed_node_group_defaults.ebs_optimized, null) + key_name = try(each.value.key_name, var.managed_node_group_defaults.key_name, null) + kms_key_id = try(each.value.kms_key_id, var.managed_node_group_defaults.ebs_optimized, null) + + launch_template_default_version = try(each.value.launch_template_default_version, var.managed_node_group_defaults.launch_template_default_version, null) + update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.managed_node_group_defaults.update_launch_template_default_version, true) + disable_api_termination = try(each.value.disable_api_termination, var.managed_node_group_defaults.disable_api_termination, null) + kernel_id = try(each.value.kernel_id, var.managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.managed_node_group_defaults.ram_disk_id, null) + + block_device_mappings = try(each.value.block_device_mappings, var.managed_node_group_defaults.block_device_mappings, {}) + capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.managed_node_group_defaults.capacity_reservation_specification, null) + cpu_options = try(each.value.cpu_options, var.managed_node_group_defaults.cpu_options, null) + credit_specification = try(each.value.credit_specification, var.managed_node_group_defaults.credit_specification, null) + elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, var.managed_node_group_defaults.elastic_gpu_specifications, null) + elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.managed_node_group_defaults.elastic_inference_accelerator, null) + enclave_options = try(each.value.enclave_options, var.managed_node_group_defaults.enclave_options, null) + license_specifications = try(each.value.license_specifications, var.managed_node_group_defaults.license_specifications, null) + metadata_options = try(each.value.metadata_options, var.managed_node_group_defaults.metadata_options, local.metadata_options) + enable_monitoring = try(each.value.enable_monitoring, var.managed_node_group_defaults.enable_monitoring, true) + network_interfaces = try(each.value.network_interfaces, var.managed_node_group_defaults.network_interfaces, []) + placement = try(each.value.placement, var.managed_node_group_defaults.placement, null) + + # IAM role + iam_role_arn = join("", aws_iam_role.node_groups.*.arn) + + tags = merge(var.tags, try(each.value.tags, var.managed_node_group_defaults.tags, {})) +} + + + diff --git a/_data.tf b/_data.tf new file mode 100644 index 0000000..d9ea54c --- /dev/null +++ b/_data.tf @@ -0,0 +1,4 @@ +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + diff --git a/_example/aws_managed/main.tf b/_example/aws_managed/main.tf new file mode 100644 index 0000000..1fab4f3 --- /dev/null +++ b/_example/aws_managed/main.tf @@ -0,0 +1,179 @@ +provider "aws" { + region = "eu-west-1" +} + +locals { + tags = { + "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" + } +} + +################################################################################ +# VPC +################################################################################ + +module "vpc" { + source = "clouddrove/vpc/aws" + version = "0.15.0" + + name = "vpc" + environment = "test" + label_order = ["environment", "name"] + vpc_enabled = true + + cidr_block = "10.10.0.0/16" +} + +################################################################################ +# Subnets +################################################################################ + +module "subnets" { + source = "clouddrove/subnet/aws" + version = "0.15.0" + + name = "subnets" + environment = "test" + label_order = ["environment", "name"] + tags = local.tags + enabled = true + + nat_gateway_enabled = true + availability_zones = ["eu-west-1a", "eu-west-1b"] + vpc_id = module.vpc.vpc_id + cidr_block = module.vpc.vpc_cidr_block + ipv6_cidr_block = module.vpc.ipv6_cidr_block + type = "public-private" + igw_id = module.vpc.igw_id +} + +################################################################################ +# Keypair +################################################################################ + +module "keypair" { + source = "clouddrove/keypair/aws" + version = "0.15.0" + + name = "key" + environment = "test" + label_order = ["name", "environment"] + + enable_key_pair = true + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDc4AjHFctUATtd5of4u9bJtTgkh9bKogSDjxc9QqbylRORxUa422jO+t1ldTVdyqDRKltxQCJb4v23HZc2kssU5uROxpiF2fzgiHXRduL+RtyOtY2J+rNUdCRmHz4WQySblYpgteIJZpVo2smwdek8xSpjoHXhgxxa9hb4pQQwyjtVGEdH8vdYwtxgPZgPVaJgHVeJgVmhjTf2VGTATaeR9txzHsEPxhe/n1y34mQjX0ygEX8x0RZzlGziD1ih3KPaIHcpTVSYYk4LOoMK38vEI67SIMomskKn4yU043s+t9ZriJwk2V9+oU6tJU/5E1rd0SskXUhTypc3/Znc/rkYtLe8s6Uy26LOrBFzlhnCT7YH1XbCv3rEO+Nn184T4BSHeW2up8UJ1SOEd+WzzynXczdXoQcBN2kaz4dYFpRXchsAB6ejZrbEq7wyZvutf11OiS21XQ67+30lEL2WAO4i95e4sI8AdgwJgzrqVcicr3ImE+BRDkndMn5k1LhNGqwMD3Iuoel84xvinPAcElDLiFmL3BJVA/53bAlUmWqvUGW9SL5JpLUmZgE6kp+Tps7D9jpooGGJKmqgJLkJTzAmTSJh0gea/rT5KwI4j169TQD9xl6wFqns4BdQ4dMKHQCgDx8LbEd96l9F9ruWwQ8EAZBe4nIEKTV9ri+04JVhSQ== sohan@clouddrove.com" +} + +################################################################################ +# SSH +################################################################################ + +module "ssh" { + source = "clouddrove/security-group/aws" + version = "0.15.0" + + name = "ssh" + environment = "test" + label_order = ["environment", "name"] + + vpc_id = module.vpc.vpc_id + allowed_ip = [module.vpc.vpc_cidr_block] + allowed_ports = [22] +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + name = "eks" + environment = "test" + label_order = ["environment", "name"] + enabled = true + + # EKS + kubernetes_version = "1.21" + endpoint_private_access = true + endpoint_public_access = true + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + oidc_provider_enabled = true + # Networking + vpc_id = module.vpc.vpc_id + subnet_ids = module.subnets.private_subnet_id + allowed_security_groups = [module.ssh.security_group_ids] + allowed_cidr_blocks = ["10.0.0.0/16"] + + ################################################################################ + # AWS Managed Node Group + ################################################################################ + # Node Groups Defaults Values It will Work all Node Groups + managed_node_group_defaults = { + subnet_ids = module.subnets.private_subnet_id + key_name = module.keypair.name + nodes_additional_security_group_ids = [module.ssh.security_group_ids] + tags = { + Example = "test" + } + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 50 + volume_type = "gp3" + iops = 3000 + throughput = 150 + } + } + } + } + managed_node_group = { + tools = { + min_size = 1 + max_size = 7 + desired_size = 2 + instance_types = ["t3a.medium"] + } + + spot = { + name = "spot" + capacity_type = "SPOT" + + min_size = 1 + max_size = 7 + desired_size = 1 + force_update_version = true + instance_types = ["t3.medium", "t3a.medium"] + } + } + apply_config_map_aws_auth = true + map_additional_iam_users = [ + { + userarn = "arn:aws:iam::924144197303:user/nikita@clouddrove.com" + username = "nikita@clouddrove.com" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::924144197303:user/sohan@clouddrove.com" + username = "sohan@clouddrove.com" + groups = ["system:masters"] + } + ] +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + +data "aws_eks_cluster" "this" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_certificate_authority_data +} +provider "kubernetes" { + host = data.aws_eks_cluster.this.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.this.token +} diff --git a/_example/aws_managed/output.tf b/_example/aws_managed/output.tf new file mode 100644 index 0000000..e69de29 diff --git a/_example/complete/main.tf b/_example/complete/main.tf new file mode 100644 index 0000000..60c612b --- /dev/null +++ b/_example/complete/main.tf @@ -0,0 +1,240 @@ +provider "aws" { + region = "eu-west-1" +} +locals { + tags = { + "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" + } +} + +################################################################################ +# VPC +################################################################################ + +module "vpc" { + source = "clouddrove/vpc/aws" + version = "0.15.0" + + name = "vpc" + environment = "test" + label_order = ["environment", "name"] + vpc_enabled = true + + cidr_block = "10.10.0.0/16" +} + +################################################################################ +# Subnets +################################################################################ + +module "subnets" { + source = "clouddrove/subnet/aws" + version = "0.15.0" + + name = "subnets" + environment = "test" + label_order = ["environment", "name"] + tags = local.tags + enabled = true + + nat_gateway_enabled = true + availability_zones = ["eu-west-1a", "eu-west-1b"] + vpc_id = module.vpc.vpc_id + cidr_block = module.vpc.vpc_cidr_block + ipv6_cidr_block = module.vpc.ipv6_cidr_block + type = "public-private" + igw_id = module.vpc.igw_id +} + +################################################################################ +# Keypair +################################################################################ + +module "keypair" { + source = "clouddrove/keypair/aws" + version = "0.15.0" + + name = "key" + environment = "test" + label_order = ["name", "environment"] + + enable_key_pair = true + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDc4AjHFctUATtd5of4u9bJtTgkh9bKogSDjxc9QqbylRORxUa422jO+t1ldTVdyqDRKltxQCJb4v23HZc2kssU5uROxpiF2fzgiHXRduL+RtyOtY2J+rNUdCRmHz4WQySblYpgteIJZpVo2smwdek8xSpjoHXhgxxa9hb4pQQwyjtVGEdH8vdYwtxgPZgPVaJgHVeJgVmhjTf2VGTATaeR9txzHsEPxhe/n1y34mQjX0ygEX8x0RZzlGziD1ih3KPaIHcpTVSYYk4LOoMK38vEI67SIMomskKn4yU043s+t9ZriJwk2V9+oU6tJU/5E1rd0SskXUhTypc3/Znc/rkYtLe8s6Uy26LOrBFzlhnCT7YH1XbCv3rEO+Nn184T4BSHeW2up8UJ1SOEd+WzzynXczdXoQcBN2kaz4dYFpRXchsAB6ejZrbEq7wyZvutf11OiS21XQ67+30lEL2WAO4i95e4sI8AdgwJgzrqVcicr3ImE+BRDkndMn5k1LhNGqwMD3Iuoel84xvinPAcElDLiFmL3BJVA/53bAlUmWqvUGW9SL5JpLUmZgE6kp+Tps7D9jpooGGJKmqgJLkJTzAmTSJh0gea/rT5KwI4j169TQD9xl6wFqns4BdQ4dMKHQCgDx8LbEd96l9F9ruWwQ8EAZBe4nIEKTV9ri+04JVhSQ== sohan@clouddrove.com" +} + +################################################################################ +# SSH +################################################################################ + +module "ssh" { + source = "clouddrove/security-group/aws" + version = "0.15.0" + + name = "ssh" + environment = "test" + label_order = ["environment", "name"] + + vpc_id = module.vpc.vpc_id + allowed_ip = [module.vpc.vpc_cidr_block] + allowed_ports = [22] +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + name = "eks" + environment = "test" + label_order = ["environment", "name"] + enabled = true + + kubernetes_version = "1.21" + endpoint_private_access = true + endpoint_public_access = true + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + oidc_provider_enabled = true + + # Networking + vpc_id = module.vpc.vpc_id + subnet_ids = module.subnets.private_subnet_id + allowed_security_groups = [module.ssh.security_group_ids] + allowed_cidr_blocks = ["10.0.0.0/16"] + + ################################################################################ + # Self Managed Node Group + ################################################################################ + # Node Groups Defaults Values It will Work all Node Groups + self_node_group_defaults = { + subnet_ids = module.subnets.private_subnet_id + key_name = module.keypair.name + propagate_tags = [{ + key = "aws-node-termination-handler/managed" + value = true + propagate_at_launch = true + }, + { + key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" + value = "owned" + propagate_at_launch = true + + } + ] + + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 50 + volume_type = "gp3" + iops = 3000 + throughput = 150 + } + } + } + } + + + self_node_groups = { + tools = { + name = "tools" + min_size = 1 + max_size = 7 + desired_size = 2 + bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" + instance_type = "t3a.medium" + } + + spot = { + name = "spot" + instance_market_options = { + market_type = "spot" + } + min_size = 1 + max_size = 7 + desired_size = 1 + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + instance_type = "m5.large" + } + + } + + + ################################################################################ + # AWS Managed Node Group + ################################################################################ + # Node Groups Defaults Values It will Work all Node Groups + managed_node_group_defaults = { + subnet_ids = module.subnets.private_subnet_id + key_name = module.keypair.name + nodes_additional_security_group_ids = [module.ssh.security_group_ids] + tags = { + Example = "test" + } + + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 50 + volume_type = "gp3" + iops = 3000 + throughput = 150 + } + } + } + } + + managed_node_group = { + test = { + min_size = 1 + max_size = 7 + desired_size = 2 + instance_types = ["t3a.medium"] + } + + spot = { + name = "spot" + capacity_type = "SPOT" + + min_size = 1 + max_size = 7 + desired_size = 1 + force_update_version = true + instance_types = ["t3.medium", "t3a.medium"] + } + } + + apply_config_map_aws_auth = true + map_additional_iam_users = [ + { + userarn = "arn:aws:iam::924144197303:user/nikita@clouddrove.com" + username = "nikita@clouddrove.com" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::924144197303:user/sohan@clouddrove.com" + username = "sohan@clouddrove.com" + groups = ["system:masters"] + } + ] +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + +data "aws_eks_cluster" "this" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_certificate_authority_data +} +provider "kubernetes" { + host = data.aws_eks_cluster.this.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.this.token +} diff --git a/_example/complete/output.tf b/_example/complete/output.tf new file mode 100644 index 0000000..2f34d1b --- /dev/null +++ b/_example/complete/output.tf @@ -0,0 +1,11 @@ +output "eks_name" { + value = module.eks.cluster_id +} + +output "node_iam_role_name" { + value = module.eks.node_group_iam_role_name +} + +output "tags" { + value = module.eks.tags +} \ No newline at end of file diff --git a/_example/self_managed/main.tf b/_example/self_managed/main.tf new file mode 100644 index 0000000..82d8933 --- /dev/null +++ b/_example/self_managed/main.tf @@ -0,0 +1,180 @@ +provider "aws" { + region = "eu-west-1" +} +locals { + tags = { + "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" + } +} + +################################################################################ +# VPC +################################################################################ + +module "vpc" { + source = "clouddrove/vpc/aws" + version = "0.15.0" + + name = "vpc" + environment = "test" + label_order = ["environment", "name"] + vpc_enabled = true + + cidr_block = "10.10.0.0/16" +} + +################################################################################ +# Subnets +################################################################################ + +module "subnets" { + source = "clouddrove/subnet/aws" + version = "0.15.0" + + name = "subnets" + environment = "test" + label_order = ["environment", "name"] + tags = local.tags + enabled = true + + nat_gateway_enabled = true + availability_zones = ["eu-west-1a", "eu-west-1b"] + vpc_id = module.vpc.vpc_id + cidr_block = module.vpc.vpc_cidr_block + ipv6_cidr_block = module.vpc.ipv6_cidr_block + type = "public-private" + igw_id = module.vpc.igw_id +} + +################################################################################ +# Keypair +################################################################################ + +module "keypair" { + source = "clouddrove/keypair/aws" + version = "0.15.0" + + name = "key" + environment = "test" + label_order = ["name", "environment"] + + enable_key_pair = true + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDc4AjHFctUATtd5of4u9bJtTgkh9bKogSDjxc9QqbylRORxUa422jO+t1ldTVdyqDRKltxQCJb4v23HZc2kssU5uROxpiF2fzgiHXRduL+RtyOtY2J+rNUdCRmHz4WQySblYpgteIJZpVo2smwdek8xSpjoHXhgxxa9hb4pQQwyjtVGEdH8vdYwtxgPZgPVaJgHVeJgVmhjTf2VGTATaeR9txzHsEPxhe/n1y34mQjX0ygEX8x0RZzlGziD1ih3KPaIHcpTVSYYk4LOoMK38vEI67SIMomskKn4yU043s+t9ZriJwk2V9+oU6tJU/5E1rd0SskXUhTypc3/Znc/rkYtLe8s6Uy26LOrBFzlhnCT7YH1XbCv3rEO+Nn184T4BSHeW2up8UJ1SOEd+WzzynXczdXoQcBN2kaz4dYFpRXchsAB6ejZrbEq7wyZvutf11OiS21XQ67+30lEL2WAO4i95e4sI8AdgwJgzrqVcicr3ImE+BRDkndMn5k1LhNGqwMD3Iuoel84xvinPAcElDLiFmL3BJVA/53bAlUmWqvUGW9SL5JpLUmZgE6kp+Tps7D9jpooGGJKmqgJLkJTzAmTSJh0gea/rT5KwI4j169TQD9xl6wFqns4BdQ4dMKHQCgDx8LbEd96l9F9ruWwQ8EAZBe4nIEKTV9ri+04JVhSQ== sohan@clouddrove.com" +} + +################################################################################ +# SSH +################################################################################ + +module "ssh" { + source = "clouddrove/security-group/aws" + version = "0.15.0" + + name = "ssh" + environment = "test" + label_order = ["environment", "name"] + + vpc_id = module.vpc.vpc_id + allowed_ip = [module.vpc.vpc_cidr_block] + allowed_ports = [22] +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + name = "eks" + environment = "test" + label_order = ["environment", "name"] + enabled = true + + # EKS + kubernetes_version = "1.21" + endpoint_private_access = true + endpoint_public_access = true + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + oidc_provider_enabled = true + # Networking + vpc_id = module.vpc.vpc_id + subnet_ids = module.subnets.private_subnet_id + allowed_security_groups = [module.ssh.security_group_ids] + allowed_cidr_blocks = ["10.0.0.0/16"] + + + ################################################################################ + # Self Managed Node Group + ################################################################################ + # Node Groups Defaults Values It will Work all Node Groups + self_node_group_defaults = { + subnet_ids = module.subnets.private_subnet_id + key_name = module.keypair.name + propagate_tags = [{ + key = "aws-node-termination-handler/managed" + value = true + propagate_at_launch = true + }, + { + key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" + value = "owned" + propagate_at_launch = true + + } + ] + + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 50 + volume_type = "gp3" + iops = 3000 + throughput = 150 + } + } + } + } + + + self_node_groups = { + tools = { + name = "tools" + min_size = 1 + max_size = 7 + desired_size = 2 + bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" + instance_type = "t3a.medium" + } + + spot = { + name = "spot" + instance_market_options = { + market_type = "spot" + } + min_size = 1 + max_size = 7 + desired_size = 1 + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + instance_type = "m5.large" + } + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ +data "aws_eks_cluster" "this" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_certificate_authority_data +} +# +provider "kubernetes" { + host = data.aws_eks_cluster.this.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.this.token +} \ No newline at end of file diff --git a/_example/self_managed/output.tf b/_example/self_managed/output.tf new file mode 100644 index 0000000..e69de29 diff --git a/_example/self_managed/versions.tf b/_example/self_managed/versions.tf new file mode 100644 index 0000000..d07da4c --- /dev/null +++ b/_example/self_managed/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 0.14" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + cloudinit = { + source = "hashicorp/cloudinit" + version = ">= 2.0" + } + } +} diff --git a/_iam.tf b/_iam.tf new file mode 100644 index 0000000..f774e49 --- /dev/null +++ b/_iam.tf @@ -0,0 +1,164 @@ + +data "aws_iam_policy_document" "assume_role" { + count = var.enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "default" { + count = var.enabled ? 1 : 0 + + name = module.labels.id + assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json) + permissions_boundary = var.permissions_boundary + + tags = module.labels.tags +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" { + count = var.enabled ? 1 : 0 + policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition)) + role = join("", aws_iam_role.default.*.name) +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" { + count = var.enabled ? 1 : 0 + policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition)) + role = join("", aws_iam_role.default.*.name) +} + +data "aws_iam_policy_document" "service_role" { + count = var.enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = [ + "ec2:DescribeInternetGateways", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSubnets", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + ] + resources = ["*"] + } +} + + +resource "aws_iam_role_policy" "service_role" { + count = var.enabled ? 1 : 0 + role = join("", aws_iam_role.default.*.name) + policy = join("", data.aws_iam_policy_document.service_role.*.json) + + name = module.labels.id + +} + + +#-------------------------------------------------------IAM FOR node Group---------------------------------------------- + +#Module : IAM ROLE +#Description : Provides an IAM role. +resource "aws_iam_role" "node_groups" { + count = var.enabled ? 1 : 0 + name = "${module.labels.id}-node_group" + assume_role_policy = join("", data.aws_iam_policy_document.node_group.*.json) + tags = module.labels.tags +} + +#Module : IAM ROLE POLICY ATTACHMENT CNI +#Description : Attaches a Managed IAM Policy to an IAM role. +resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" { + count = var.enabled ? 1 : 0 + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = join("", aws_iam_role.node_groups.*.name) +} + +#Module : IAM ROLE POLICY ATTACHMENT EC2 CONTAINER REGISTRY READ ONLY +#Description : Attaches a Managed IAM Policy to an IAM role. +resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" { + count = var.enabled ? 1 : 0 + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + role = join("", aws_iam_role.node_groups.*.name) +} + +resource "aws_iam_policy" "amazon_eks_node_group_autoscaler_policy" { + count = var.enabled ? 1 : 0 + name = format("%s-node-group-policy", module.labels.id) + policy = join("", data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy.*.json) +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_node_group_autoscaler_policy" { + count = var.enabled ? 1 : 0 + policy_arn = join("", aws_iam_policy.amazon_eks_node_group_autoscaler_policy.*.arn) + role = join("", aws_iam_role.node_groups.*.name) +} + +resource "aws_iam_policy" "amazon_eks_worker_node_autoscaler_policy" { + count = var.enabled ? 1 : 0 + name = "${module.labels.id}-autoscaler" + path = "/" + policy = join("", data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy.*.json) +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_autoscaler_policy" { + count = var.enabled ? 1 : 0 + policy_arn = join("", aws_iam_policy.amazon_eks_worker_node_autoscaler_policy.*.arn) + role = join("", aws_iam_role.node_groups.*.name) +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" { + count = var.enabled ? 1 : 0 + policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEKSWorkerNodePolicy") + role = join("", aws_iam_role.node_groups.*.name) +} + +data "aws_iam_policy_document" "node_group" { + count = var.enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +# Autoscaler policy for node group +data "aws_iam_policy_document" "amazon_eks_node_group_autoscaler_policy" { + count = var.enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:DescribeLaunchTemplateVersions", + "ecr:*" + ] + resources = ["*"] + } +} + +#Module : IAM INSTANCE PROFILE +#Description : Provides an IAM instance profile. +resource "aws_iam_instance_profile" "default" { + count = var.enabled ? 1 : 0 + name = format("%s-instance-profile", module.labels.id) + role = join("", aws_iam_role.node_groups.*.name) +} \ No newline at end of file diff --git a/_kms.tf b/_kms.tf new file mode 100644 index 0000000..69ab2e4 --- /dev/null +++ b/_kms.tf @@ -0,0 +1,60 @@ +data "aws_iam_policy_document" "cloudwatch" { + policy_id = "key-policy-cloudwatch" + statement { + sid = "Enable IAM User Permissions" + actions = [ + "kms:*", + ] + effect = "Allow" + principals { + type = "AWS" + identifiers = [ + format( + "arn:%s:iam::%s:root", + join("", data.aws_partition.current.*.partition), + data.aws_caller_identity.current.account_id + ) + ] + } + resources = ["*"] + } + statement { + sid = "AllowCloudWatchLogs" + actions = [ + "kms:Encrypt*", + "kms:Decrypt*", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:Describe*" + ] + effect = "Allow" + principals { + type = "Service" + identifiers = [ + format( + "logs.%s.amazonaws.com", + data.aws_region.current.name + ) + ] + } + resources = ["*"] + } +} + +resource "aws_kms_key" "cluster" { + count = var.enabled && var.cluster_encryption_config_enabled ? 1 : 0 + description = "EKS Cluster ${module.labels.id} Encryption Config KMS Key" + enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation + deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days + policy = var.cluster_encryption_config_kms_key_policy + tags = module.labels.tags +} + +resource "aws_kms_key" "cloudwatch_log" { + count = var.enabled && var.cluster_encryption_config_enabled ? 1 : 0 + description = "CloudWatch log group ${module.labels.id} Encryption Config KMS Key" + enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation + deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days + policy = data.aws_iam_policy_document.cloudwatch.json + tags = module.labels.tags +} \ No newline at end of file diff --git a/_kubeconfig.tpl b/_kubeconfig.tpl new file mode 100644 index 0000000..aaa2909 --- /dev/null +++ b/_kubeconfig.tpl @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Config +preferences: {} +clusters: + - cluster: + server: ${server} + certificate-authority-data: ${certificate_authority_data} + name: ${cluster_name} +contexts: + - context: + cluster: ${cluster_name} + user: ${cluster_name} + name: ${cluster_name} +current-context: ${cluster_name} +users: + - name: ${cluster_name} + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + command: aws-iam-authenticator + args: + - "token" + - "-i" + - "${cluster_name}" \ No newline at end of file diff --git a/_locals.tf b/_locals.tf new file mode 100644 index 0000000..3c6e261 --- /dev/null +++ b/_locals.tf @@ -0,0 +1,12 @@ +locals { + # Encryption + cluster_encryption_config = { + resources = var.cluster_encryption_config_resources + provider_key_arn = var.enabled ? join("", aws_kms_key.cluster.*.arn) : null + } + aws_policy_prefix = format("arn:%s:iam::aws:policy", join("", data.aws_partition.current.*.partition)) + +} + + + diff --git a/_security_groups.tf b/_security_groups.tf new file mode 100644 index 0000000..99615ae --- /dev/null +++ b/_security_groups.tf @@ -0,0 +1,66 @@ + +#Module : SECURITY GROUP +#Description : Provides a security group resource. +resource "aws_security_group" "node_group" { + count = var.enabled ? 1 : 0 + name = "${module.labels.id}-node-group" + description = "Security Group for nodes Groups" + vpc_id = var.vpc_id + tags = module.labels.tags +} + +#Module : SECURITY GROUP RULE EGRESS +#Description : Provides a security group rule resource. Represents a single egress group rule, +# which can be added to external Security Groups. +resource "aws_security_group_rule" "node_group" { + count = var.enabled ? 1 : 0 + description = "Allow all egress traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = join("", aws_security_group.node_group.*.id) + type = "egress" +} + +#Module : SECURITY GROUP RULE INGRESS SELF +#Description : Provides a security group rule resource. Represents a single ingress group rule, +# which can be added to external Security Groups. +resource "aws_security_group_rule" "ingress_self" { + count = var.enabled ? 1 : 0 + description = "Allow nodes to communicate with each other" + from_port = 0 + to_port = 65535 + protocol = "-1" + security_group_id = join("", aws_security_group.node_group.*.id) + source_security_group_id = join("", aws_security_group.node_group.*.id) + type = "ingress" +} + +#Module : SECURITY GROUP +#Description : Provides a security group rule resource. Represents a single ingress group rule, +# which can be added to external Security Groups. +resource "aws_security_group_rule" "ingress_security_groups_node_group" { + count = var.enabled ? length(var.allowed_security_groups) : 0 + description = "Allow inbound traffic from existing Security Groups" + from_port = 0 + to_port = 65535 + protocol = "-1" + source_security_group_id = element(var.allowed_security_groups, count.index) + security_group_id = join("", aws_security_group.node_group.*.id) + type = "ingress" +} + +#Module : SECURITY GROUP RULE CIDR BLOCK +#Description : Provides a security group rule resource. Represents a single ingress group rule, +# which can be added to external Security Groups. +resource "aws_security_group_rule" "ingress_cidr_blocks_node_group" { + count = var.enabled && length(var.allowed_cidr_blocks) > 0 ? 1 : 0 + description = "Allow inbound traffic from CIDR blocks" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = var.allowed_cidr_blocks + security_group_id = join("", aws_security_group.node_group.*.id) + type = "ingress" +} \ No newline at end of file diff --git a/_self_node_groups.tf b/_self_node_groups.tf new file mode 100644 index 0000000..4b6ff77 --- /dev/null +++ b/_self_node_groups.tf @@ -0,0 +1,115 @@ +locals { + metadata_options = { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 2 + } +} + +################################################################################ +# Self Managed Node Group +################################################################################ + +module "self_managed_node_group" { + source = "./node_group/self_managed" + + for_each = { for k, v in var.self_node_groups : k => v if var.enabled } + + enabled = try(each.value.enabled, true) + + cluster_name = join("", aws_eks_cluster.default.*.name) + security_group_ids = compact( + concat( + aws_security_group.node_group.*.id, + aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id + ) + ) + + iam_instance_profile_arn = join("", aws_iam_instance_profile.default.*.arn) + + # Autoscaling Group + name = try(each.value.name, each.key) + environment = var.environment + repository = var.repository + + + availability_zones = try(each.value.availability_zones, var.self_node_group_defaults.availability_zones, null) + subnet_ids = try(each.value.subnet_ids, var.self_node_group_defaults.subnet_ids, var.subnet_ids) + key_name = try(each.value.key_name, var.self_node_group_defaults.key_name, null) + + min_size = try(each.value.min_size, var.self_node_group_defaults.min_size, 0) + max_size = try(each.value.max_size, var.self_node_group_defaults.max_size, 3) + desired_size = try(each.value.desired_size, var.self_node_group_defaults.desired_size, 1) + capacity_rebalance = try(each.value.capacity_rebalance, var.self_node_group_defaults.capacity_rebalance, null) + min_elb_capacity = try(each.value.min_elb_capacity, var.self_node_group_defaults.min_elb_capacity, null) + wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_node_group_defaults.wait_for_elb_capacity, null) + wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_node_group_defaults.wait_for_capacity_timeout, null) + default_cooldown = try(each.value.default_cooldown, var.self_node_group_defaults.default_cooldown, null) + protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_node_group_defaults.protect_from_scale_in, null) + + target_group_arns = try(each.value.target_group_arns, var.self_node_group_defaults.target_group_arns, null) + placement_group = try(each.value.placement_group, var.self_node_group_defaults.placement_group, null) + health_check_type = try(each.value.health_check_type, var.self_node_group_defaults.health_check_type, null) + health_check_grace_period = try(each.value.health_check_grace_period, var.self_node_group_defaults.health_check_grace_period, null) + + force_delete = try(each.value.force_delete, var.self_node_group_defaults.force_delete, null) + termination_policies = try(each.value.termination_policies, var.self_node_group_defaults.termination_policies, null) + suspended_processes = try(each.value.suspended_processes, var.self_node_group_defaults.suspended_processes, null) + max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_node_group_defaults.max_instance_lifetime, null) + + enabled_metrics = try(each.value.enabled_metrics, var.self_node_group_defaults.enabled_metrics, null) + metrics_granularity = try(each.value.metrics_granularity, var.self_node_group_defaults.metrics_granularity, null) + service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_node_group_defaults.service_linked_role_arn, null) + + initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_node_group_defaults.initial_lifecycle_hooks, []) + instance_refresh = try(each.value.instance_refresh, var.self_node_group_defaults.instance_refresh, null) + use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_node_group_defaults.use_mixed_instances_policy, false) + mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_node_group_defaults.mixed_instances_policy, null) + warm_pool = try(each.value.warm_pool, var.self_node_group_defaults.warm_pool, null) + + create_schedule = try(each.value.create_schedule, var.self_node_group_defaults.create_schedule, false) + schedules = try(each.value.schedules, var.self_node_group_defaults.schedules, null) + + delete_timeout = try(each.value.delete_timeout, var.self_node_group_defaults.delete_timeout, null) + + # User data + cluster_endpoint = try(aws_eks_cluster.default[0].endpoint, "") + cluster_auth_base64 = try(aws_eks_cluster.default[0].certificate_authority[0].data, "") + pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_node_group_defaults.pre_bootstrap_user_data, "") + post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_node_group_defaults.post_bootstrap_user_data, "") + bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_node_group_defaults.bootstrap_extra_args, "") + + # Launch Template + + + ebs_optimized = try(each.value.ebs_optimized, var.self_node_group_defaults.ebs_optimized, true) + kubernetes_version = try(each.value.kubernetes_version, var.self_node_group_defaults.cluster_version, var.kubernetes_version) + instance_type = try(each.value.instance_type, var.self_node_group_defaults.instance_type, "m6i.large") + kms_key_id = try(each.value.kms_key_id, var.self_node_group_defaults.ebs_optimized, null) + + disable_api_termination = try(each.value.disable_api_termination, var.self_node_group_defaults.disable_api_termination, null) + instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_node_group_defaults.instance_initiated_shutdown_behavior, null) + kernel_id = try(each.value.kernel_id, var.self_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.self_node_group_defaults.ram_disk_id, null) + + block_device_mappings = try(each.value.block_device_mappings, var.self_node_group_defaults.block_device_mappings, []) + capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_node_group_defaults.capacity_reservation_specification, null) + cpu_options = try(each.value.cpu_options, var.self_node_group_defaults.cpu_options, null) + credit_specification = try(each.value.credit_specification, var.self_node_group_defaults.credit_specification, null) + elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, var.self_node_group_defaults.elastic_gpu_specifications, null) + elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.self_node_group_defaults.elastic_inference_accelerator, null) + enclave_options = try(each.value.enclave_options, var.self_node_group_defaults.enclave_options, null) + hibernation_options = try(each.value.hibernation_options, var.self_node_group_defaults.hibernation_options, null) + instance_market_options = try(each.value.instance_market_options, var.self_node_group_defaults.instance_market_options, null) + license_specifications = try(each.value.license_specifications, var.self_node_group_defaults.license_specifications, null) + metadata_options = try(each.value.metadata_options, var.self_node_group_defaults.metadata_options, local.metadata_options) + enable_monitoring = try(each.value.enable_monitoring, var.self_node_group_defaults.enable_monitoring, false) + # network_interfaces = try(each.value.network_interfaces, var.self_node_group_defaults.network_interfaces, []) + placement = try(each.value.placement, var.self_node_group_defaults.placement, null) + + tags = merge(var.tags, try(each.value.tags, var.self_node_group_defaults.tags, {})) + propagate_tags = try(each.value.propagate_tags, var.self_node_group_defaults.propagate_tags, []) + +} + + diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..f1130b0 --- /dev/null +++ b/main.tf @@ -0,0 +1,93 @@ + +#Module : label +#Description : Terraform module to create consistent naming for multiple names. +module "labels" { + source = "clouddrove/labels/aws" + version = "0.15.0" + + name = var.name + repository = var.repository + environment = var.environment + managedby = var.managedby + attributes = compact(concat(var.attributes, ["cluster"])) + extra_tags = var.tags + label_order = var.label_order +} + +#Cloudwatch: Logs for Eks cluster +resource "aws_cloudwatch_log_group" "default" { + count = var.enabled && length(var.enabled_cluster_log_types) > 0 ? 1 : 0 + name = "/aws/eks/${module.labels.id}/cluster" + retention_in_days = var.cluster_log_retention_period + tags = module.labels.tags + kms_key_id = join("", aws_kms_key.cloudwatch_log.*.arn) +} + + +resource "aws_eks_cluster" "default" { + count = var.enabled ? 1 : 0 + name = module.labels.id + role_arn = join("", aws_iam_role.default.*.arn) + version = var.kubernetes_version + enabled_cluster_log_types = var.enabled_cluster_log_types + tags = module.labels.tags + + + vpc_config { + subnet_ids = var.subnet_ids + endpoint_private_access = var.endpoint_private_access + endpoint_public_access = var.endpoint_public_access + public_access_cidrs = var.public_access_cidrs + security_group_ids = var.eks_additional_security_group_ids + } + + dynamic "encryption_config" { + for_each = var.cluster_encryption_config_enabled ? [local.cluster_encryption_config] : [] + content { + resources = lookup(encryption_config.value, "resources") + provider { + key_arn = lookup(encryption_config.value, "provider_key_arn") + } + } + } + + timeouts { + create = var.cluster_create_timeout + delete = var.cluster_delete_timeout + update = var.cluster_update_timeout + } + + depends_on = [ + aws_iam_role_policy_attachment.amazon_eks_cluster_policy, + aws_iam_role_policy_attachment.amazon_eks_service_policy, + aws_cloudwatch_log_group.default, + + ] +} + +data "tls_certificate" "cluster" { + count = var.enabled && var.oidc_provider_enabled ? 1 : 0 + url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer) +} + +resource "aws_iam_openid_connect_provider" "default" { + count = var.enabled && var.oidc_provider_enabled ? 1 : 0 + url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer) + + client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences))) + thumbprint_list = [join("", data.tls_certificate.cluster.*.certificates.0.sha1_fingerprint)] + tags = module.labels.tags +} + +resource "aws_eks_addon" "cluster" { + for_each = var.enabled ? { for addon in var.addons : addon.addon_name => addon } : {} + + cluster_name = join("", aws_eks_cluster.default.*.name) + addon_name = each.key + addon_version = lookup(each.value, "addon_version", null) + resolve_conflicts = lookup(each.value, "resolve_conflicts", null) + service_account_role_arn = lookup(each.value, "service_account_role_arn", null) + + tags = module.labels.tags +} + diff --git a/node_group/aws_managed/main.tf b/node_group/aws_managed/main.tf new file mode 100644 index 0000000..10689f1 --- /dev/null +++ b/node_group/aws_managed/main.tf @@ -0,0 +1,274 @@ +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +#Module : label +#Description : Terraform module to create consistent naming for multiple names. +module "labels" { + source = "clouddrove/labels/aws" + version = "0.15.0" + + name = var.name + repository = var.repository + environment = var.environment + managedby = var.managedby + extra_tags = var.tags + attributes = compact(concat(var.attributes, ["nodes"])) + label_order = var.label_order +} + + +################################################################################ +# Launch template +################################################################################ + + + +resource "aws_launch_template" "this" { + count = var.enabled ? 1 : 0 + name = module.labels.id + description = var.launch_template_description + + ebs_optimized = var.ebs_optimized + image_id = var.ami_id + # # Set on node group instead + # instance_type = var.launch_template_instance_type + key_name = var.key_name + user_data = var.before_cluster_joining_userdata + vpc_security_group_ids = var.vpc_security_group_ids + + disable_api_termination = var.disable_api_termination + kernel_id = var.kernel_id + ram_disk_id = var.ram_disk_id + + dynamic "block_device_mappings" { + for_each = var.block_device_mappings + content { + device_name = block_device_mappings.value.device_name + no_device = lookup(block_device_mappings.value, "no_device", null) + virtual_name = lookup(block_device_mappings.value, "virtual_name", null) + + dynamic "ebs" { + for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) + content { + delete_on_termination = true + encrypted = true + kms_key_id = var.kms_key_id + iops = lookup(ebs.value, "iops", null) + throughput = lookup(ebs.value, "throughput", null) + snapshot_id = lookup(ebs.value, "snapshot_id", null) + volume_size = lookup(ebs.value, "volume_size", null) + volume_type = lookup(ebs.value, "volume_type", null) + } + } + } + } + + dynamic "capacity_reservation_specification" { + for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : [] + content { + capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) + + dynamic "capacity_reservation_target" { + for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", []) + content { + capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) + } + } + } + } + + dynamic "cpu_options" { + for_each = var.cpu_options != null ? [var.cpu_options] : [] + content { + core_count = cpu_options.value.core_count + threads_per_core = cpu_options.value.threads_per_core + } + } + + dynamic "credit_specification" { + for_each = var.credit_specification != null ? [var.credit_specification] : [] + content { + cpu_credits = credit_specification.value.cpu_credits + } + } + + dynamic "elastic_gpu_specifications" { + for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : [] + content { + type = elastic_gpu_specifications.value.type + } + } + + dynamic "elastic_inference_accelerator" { + for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : [] + content { + type = elastic_inference_accelerator.value.type + } + } + + dynamic "enclave_options" { + for_each = var.enclave_options != null ? [var.enclave_options] : [] + content { + enabled = enclave_options.value.enabled + } + } + + + dynamic "license_specification" { + for_each = var.license_specifications != null ? [var.license_specifications] : [] + content { + license_configuration_arn = license_specifications.value.license_configuration_arn + } + } + + dynamic "metadata_options" { + for_each = var.metadata_options != null ? [var.metadata_options] : [] + content { + http_endpoint = lookup(metadata_options.value, "http_endpoint", null) + http_tokens = lookup(metadata_options.value, "http_tokens", null) + http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) + http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) + instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) + } + } + + dynamic "monitoring" { + for_each = var.enable_monitoring != null ? [1] : [] + content { + enabled = var.enable_monitoring + } + } + + dynamic "network_interfaces" { + for_each = var.network_interfaces + content { + associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) + associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) + delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) + description = lookup(network_interfaces.value, "description", null) + device_index = lookup(network_interfaces.value, "device_index", null) + ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : [] + ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) + ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : [] + ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) + network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) + private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) + security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : [] + # Set on EKS managed node group, will fail if set here + # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics + # subnet_id = lookup(network_interfaces.value, "subnet_id", null) + } + } + + dynamic "placement" { + for_each = var.placement != null ? [var.placement] : [] + content { + affinity = lookup(placement.value, "affinity", null) + availability_zone = lookup(placement.value, "availability_zone", null) + group_name = lookup(placement.value, "group_name", null) + host_id = lookup(placement.value, "host_id", null) + spread_domain = lookup(placement.value, "spread_domain", null) + tenancy = lookup(placement.value, "tenancy", null) + partition_number = lookup(placement.value, "partition_number", null) + } + } + + dynamic "tag_specifications" { + for_each = toset(["instance", "volume", "network-interface"]) + content { + resource_type = tag_specifications.key + tags = merge( + module.labels.tags, + { Name = module.labels.id }) + } + } + + + lifecycle { + create_before_destroy = true + } + + tags = module.labels.tags +} + +################################################################################ +# Node Group +################################################################################ + +resource "aws_eks_node_group" "this" { + count = var.enabled ? 1 : 0 + + # Required + cluster_name = var.cluster_name + node_role_arn = var.iam_role_arn + subnet_ids = var.subnet_ids + + scaling_config { + min_size = var.min_size + max_size = var.max_size + desired_size = var.desired_size + } + + # Optional + node_group_name = module.labels.id + + # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami + ami_type = var.ami_id != "" ? null : var.ami_type + release_version = var.ami_id != "" ? null : var.ami_release_version + version = var.ami_id != "" ? null : var.cluster_version + + capacity_type = var.capacity_type + disk_size = var.disk_size + force_update_version = var.force_update_version + instance_types = var.instance_types + labels = var.labels + + dynamic "launch_template" { + for_each = var.enabled ? [1] : [] + content { + name = try(aws_launch_template.this[0].name) + version = try(aws_launch_template.this[0].latest_version) + } + } + + dynamic "remote_access" { + for_each = length(var.remote_access) > 0 ? [var.remote_access] : [] + content { + ec2_ssh_key = try(remote_access.value.ec2_ssh_key, null) + source_security_group_ids = try(remote_access.value.source_security_group_ids, []) + } + } + + dynamic "taint" { + for_each = var.taints + content { + key = taint.value.key + value = lookup(taint.value, "value") + effect = taint.value.effect + } + } + + dynamic "update_config" { + for_each = length(var.update_config) > 0 ? [var.update_config] : [] + content { + max_unavailable_percentage = try(update_config.value.max_unavailable_percentage, null) + max_unavailable = try(update_config.value.max_unavailable, null) + } + } + + timeouts { + create = lookup(var.timeouts, "create", null) + update = lookup(var.timeouts, "update", null) + delete = lookup(var.timeouts, "delete", null) + } + + lifecycle { + create_before_destroy = true + ignore_changes = [ + scaling_config[0].desired_size, + ] + } + + tags = module.labels.tags +} diff --git a/node_group/aws_managed/outputs.tf b/node_group/aws_managed/outputs.tf new file mode 100644 index 0000000..76d0a5e --- /dev/null +++ b/node_group/aws_managed/outputs.tf @@ -0,0 +1,42 @@ +################################################################################ +# Launch template +################################################################################ + +output "launch_template_id" { + description = "The ID of the launch template" + value = try(aws_launch_template.this[0].id, "") +} + +output "launch_template_arn" { + description = "The ARN of the launch template" + value = try(aws_launch_template.this[0].arn, "") +} + +output "launch_template_latest_version" { + description = "The latest version of the launch template" + value = try(aws_launch_template.this[0].latest_version, "") +} + +################################################################################ +# Node Group +################################################################################ + +output "node_group_arn" { + description = "Amazon Resource Name (ARN) of the EKS Node Group" + value = try(aws_eks_node_group.this[0].arn, "") +} + +output "node_group_id" { + description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)" + value = try(aws_eks_node_group.this[0].id, "") +} + +output "node_group_resources" { + description = "List of objects containing information about underlying resources" + value = try(aws_eks_node_group.this[0].resources, "") +} + +output "node_group_status" { + description = "Status of the EKS Node Group" + value = try(aws_eks_node_group.this[0].arn, "") +} diff --git a/node_group/aws_managed/variables.tf b/node_group/aws_managed/variables.tf new file mode 100644 index 0000000..68d0f93 --- /dev/null +++ b/node_group/aws_managed/variables.tf @@ -0,0 +1,324 @@ +#Module : LABEL +#Description : Terraform label module variables. +variable "name" { + type = string + default = "" + description = "Name (e.g. `app` or `cluster`)." +} + +variable "repository" { + type = string + default = "https://github.com/clouddrove/terraform-aws-eks" + description = "Terraform current module repo" +} + +variable "environment" { + type = string + default = "" + description = "Environment (e.g. `prod`, `dev`, `staging`)." +} + +variable "label_order" { + type = list(any) + default = [] + description = "Label order, e.g. `name`,`application`." +} + +variable "managedby" { + type = string + default = "hello@clouddrove.com" + description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." +} + +variable "attributes" { + type = list(any) + default = [] + description = "Additional attributes (e.g. `1`)." +} + +variable "tags" { + type = map(any) + default = {} + description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." +} + + +variable "enabled" { + type = bool + default = true + description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." +} + +variable "cluster_name" { + description = "Name of associated EKS cluster" + type = string + default = null +} + +#-----------------------------------------------------------Launch_Template--------------------------------------------------------- + +variable "launch_template_description" { + description = "Description of the launch template" + type = string + default = null +} + +variable "ebs_optimized" { + description = "If true, the launched EC2 instance(s) will be EBS-optimized" + type = bool + default = null +} + +variable "ami_id" { + description = "The AMI from which to launch the instance. If not supplied, EKS will use its own default image" + type = string + default = "" +} + +variable "key_name" { + description = "The key name that should be used for the instance(s)" + type = string + default = null +} + +variable "vpc_security_group_ids" { + description = "A list of security group IDs to associate" + type = list(string) + default = [] +} + +variable "launch_template_default_version" { + description = "Default version of the launch template" + type = string + default = null +} + +variable "update_launch_template_default_version" { + description = "Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version`" + type = bool + default = true +} + +variable "disable_api_termination" { + description = "If true, enables EC2 instance termination protection" + type = bool + default = null +} + +variable "kernel_id" { + description = "The kernel ID" + type = string + default = null +} + +variable "ram_disk_id" { + description = "The ID of the ram disk" + type = string + default = null +} + +variable "block_device_mappings" { + description = "Specify volumes to attach to the instance besides the volumes specified by the AMI" + type = any + default = {} +} + +variable "capacity_reservation_specification" { + description = "Targeting for EC2 capacity reservations" + type = any + default = null +} + +variable "cpu_options" { + description = "The CPU options for the instance" + type = map(string) + default = null +} + +variable "credit_specification" { + description = "Customize the credit specification of the instance" + type = map(string) + default = null +} + +variable "elastic_gpu_specifications" { + description = "The elastic GPU to attach to the instance" + type = map(string) + default = null +} + +variable "elastic_inference_accelerator" { + description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance" + type = map(string) + default = null +} + +variable "enclave_options" { + description = "Enable Nitro Enclaves on launched instances" + type = map(string) + default = null +} + +variable "instance_market_options" { + description = "The market (purchasing) option for the instance" + type = any + default = null +} + +variable "license_specifications" { + description = "A list of license specifications to associate with" + type = map(string) + default = null +} + +variable "metadata_options" { + description = "Customize the metadata options for the instance" + type = map(string) + default = { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 2 + } +} + +variable "kms_key_id" { + type = string + default = null + description = "The KMS ID of EBS volume" +} + + +variable "enable_monitoring" { + description = "Enables/disables detailed monitoring" + type = bool + default = false +} + +variable "network_interfaces" { + description = "Customize network interfaces to be attached at instance boot time" + type = list(any) + default = [] +} + +variable "placement" { + description = "The placement of the instance" + type = map(string) + default = null +} + +variable "launch_template_tags" { + description = "A map of additional tags to add to the tag_specifications of launch template created" + type = map(string) + default = {} +} + +#EKS_Managed_Node_Group + +variable "subnet_ids" { + description = "Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME`" + type = list(string) + default = null +} + +variable "min_size" { + description = "Minimum number of instances/nodes" + type = number + default = 0 +} + +variable "max_size" { + description = "Maximum number of instances/nodes" + type = number + default = 3 +} + +variable "desired_size" { + description = "Desired number of instances/nodes" + type = number + default = 1 +} + +variable "ami_type" { + description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64`" + type = string + default = null +} + +variable "ami_release_version" { + description = "AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version" + type = string + default = null +} + +variable "capacity_type" { + description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`" + type = string + default = "ON_DEMAND" +} + +variable "disk_size" { + description = "Disk size in GiB for nodes. Defaults to `20`" + type = number + default = null +} + +variable "force_update_version" { + description = "Force version update if existing pods are unable to be drained due to a pod disruption budget issue" + type = bool + default = null +} + +variable "iam_role_arn" { + type = string + default = "" + description = "" +} + + +variable "instance_types" { + description = "Set of instance types associated with the EKS Node Group. Defaults to `[\"t3.medium\"]`" + type = list(string) + default = null +} + +variable "labels" { + description = "Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed" + type = map(string) + default = null +} + +variable "cluster_version" { + description = "Kubernetes version. Defaults to EKS Cluster Kubernetes version" + type = string + default = null +} + +variable "remote_access" { + description = "Configuration block with remote access settings" + type = any + default = {} +} + +variable "taints" { + description = "The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group" + type = any + default = {} +} + +variable "update_config" { + description = "Configuration block of settings for max unavailable resources during node group updates" + type = map(string) + default = {} +} + +variable "timeouts" { + description = "Create, update, and delete timeout configurations for the node group" + type = map(string) + default = {} +} + +variable "before_cluster_joining_userdata" { + type = string + default = "" + description = "Additional commands to execute on each worker node before joining the EKS cluster (before executing the `bootstrap.sh` script). For more info, see https://kubedex.com/90-days-of-aws-eks-in-test" +} diff --git a/node_group/aws_managed/versions.tf b/node_group/aws_managed/versions.tf new file mode 100644 index 0000000..d07da4c --- /dev/null +++ b/node_group/aws_managed/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 0.14" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.72" + } + cloudinit = { + source = "hashicorp/cloudinit" + version = ">= 2.0" + } + } +} diff --git a/node_group/self_managed/_userdata.tpl b/node_group/self_managed/_userdata.tpl new file mode 100644 index 0000000..effb9a8 --- /dev/null +++ b/node_group/self_managed/_userdata.tpl @@ -0,0 +1,2 @@ +#!/bin/bash +/etc/eks/bootstrap.sh --apiserver-endpoint '${cluster_endpoint}' --b64-cluster-ca '${certificate_authority_data}' ${bootstrap_extra_args} '${cluster_name}' diff --git a/node_group/self_managed/main.tf b/node_group/self_managed/main.tf new file mode 100644 index 0000000..0856b9d --- /dev/null +++ b/node_group/self_managed/main.tf @@ -0,0 +1,425 @@ +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + + +#AMI AMAZON LINUX +data "aws_ami" "eks_default" { + count = var.enabled ? 1 : 0 + + filter { + name = "name" + values = ["amazon-eks-node-${var.kubernetes_version}-v*"] + } + + most_recent = true + owners = ["amazon"] +} + +data "template_file" "userdata" { + count = var.enabled ? 1 : 0 + template = file("${path.module}/_userdata.tpl") + + vars = { + cluster_endpoint = var.cluster_endpoint + certificate_authority_data = var.cluster_auth_base64 + cluster_name = var.cluster_name + bootstrap_extra_args = var.bootstrap_extra_args + + } +} +#Module : label +#Description : Terraform module to create consistent naming for multiple names. +module "labels" { + source = "clouddrove/labels/aws" + version = "0.15.0" + + name = var.name + repository = var.repository + environment = var.environment + managedby = var.managedby + extra_tags = var.tags + attributes = compact(concat(var.attributes, ["nodes"])) + label_order = var.label_order +} + + +resource "aws_launch_template" "this" { + count = var.enabled ? 1 : 0 + name = module.labels.id + + ebs_optimized = var.ebs_optimized + image_id = join("", data.aws_ami.eks_default.*.image_id) + instance_type = var.instance_type + key_name = var.key_name + user_data = base64encode(join("", data.template_file.userdata.*.rendered)) + disable_api_termination = var.disable_api_termination + instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior + kernel_id = var.kernel_id + ram_disk_id = var.ram_disk_id + + + #volumes + dynamic "block_device_mappings" { + for_each = var.block_device_mappings + content { + device_name = block_device_mappings.value.device_name + no_device = lookup(block_device_mappings.value, "no_device", null) + virtual_name = lookup(block_device_mappings.value, "virtual_name", null) + + + dynamic "ebs" { + for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) + content { + delete_on_termination = true + encrypted = true + kms_key_id = var.kms_key_id + iops = lookup(ebs.value, "iops", null) + throughput = lookup(ebs.value, "throughput", null) + snapshot_id = lookup(ebs.value, "snapshot_id", null) + volume_size = lookup(ebs.value, "volume_size", null) + volume_type = lookup(ebs.value, "volume_type", null) + } + } + } + } + + # capacity_reservation + dynamic "capacity_reservation_specification" { + for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : [] + content { + capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) + + dynamic "capacity_reservation_target" { + for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", []) + content { + capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) + } + } + } + } + + #CPU option + dynamic "cpu_options" { + for_each = var.cpu_options != null ? [var.cpu_options] : [] + content { + core_count = cpu_options.value.core_count + threads_per_core = cpu_options.value.threads_per_core + } + } + + #credit_specification + dynamic "credit_specification" { + for_each = var.credit_specification != null ? [var.credit_specification] : [] + content { + cpu_credits = credit_specification.value.cpu_credits + } + } + + dynamic "elastic_gpu_specifications" { + for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : [] + content { + type = elastic_gpu_specifications.value.type + } + } + + dynamic "elastic_inference_accelerator" { + for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : [] + content { + type = elastic_inference_accelerator.value.type + } + } + + dynamic "enclave_options" { + for_each = var.enclave_options != null ? [var.enclave_options] : [] + content { + enabled = enclave_options.value.enabled + } + } + + dynamic "hibernation_options" { + for_each = var.hibernation_options != null ? [var.hibernation_options] : [] + content { + configured = hibernation_options.value.configured + } + } + + iam_instance_profile { + arn = var.iam_instance_profile_arn + } + + + dynamic "instance_market_options" { + for_each = var.instance_market_options != null ? [var.instance_market_options] : [] + content { + market_type = instance_market_options.value.market_type + + dynamic "spot_options" { + for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : [] + content { + block_duration_minutes = lookup(spot_options.value, block_duration_minutes, null) + instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) + max_price = lookup(spot_options.value, "max_price", null) + spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) + valid_until = lookup(spot_options.value, "valid_until", null) + } + } + } + } + + dynamic "license_specification" { + for_each = var.license_specifications != null ? [var.license_specifications] : [] + content { + license_configuration_arn = license_specifications.value.license_configuration_arn + } + } + + dynamic "metadata_options" { + for_each = var.metadata_options != null ? [var.metadata_options] : [] + content { + http_endpoint = lookup(metadata_options.value, "http_endpoint", null) + http_tokens = lookup(metadata_options.value, "http_tokens", null) + http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) + http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) + instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) + } + } + + dynamic "monitoring" { + for_each = var.enable_monitoring != null ? [1] : [] + content { + enabled = var.enable_monitoring + } + } + + + network_interfaces { + description = module.labels.id + device_index = 0 + associate_public_ip_address = var.associate_public_ip_address + delete_on_termination = true + security_groups = var.security_group_ids + } + + dynamic "placement" { + for_each = var.placement != null ? [var.placement] : [] + content { + affinity = lookup(placement.value, "affinity", null) + availability_zone = lookup(placement.value, "availability_zone", null) + group_name = lookup(placement.value, "group_name", null) + host_id = lookup(placement.value, "host_id", null) + spread_domain = lookup(placement.value, "spread_domain", null) + tenancy = lookup(placement.value, "tenancy", null) + partition_number = lookup(placement.value, "partition_number", null) + } + } + + + dynamic "tag_specifications" { + for_each = toset(["instance", "volume", "network-interface"]) + content { + resource_type = tag_specifications.key + tags = merge( + module.labels.tags, + { Name = module.labels.id }) + } + } + + lifecycle { + create_before_destroy = true + } + + + tags = module.labels.tags + +} + + +resource "aws_autoscaling_group" "this" { + count = var.enabled ? 1 : 0 + + name = module.labels.id + + dynamic "launch_template" { + for_each = var.use_mixed_instances_policy ? [] : [1] + + content { + name = join("", aws_launch_template.this.*.name) + version = join("", aws_launch_template.this.*.latest_version) + } + } + + availability_zones = var.availability_zones + vpc_zone_identifier = var.subnet_ids + + min_size = var.min_size + max_size = var.max_size + desired_capacity = var.desired_size + capacity_rebalance = var.capacity_rebalance + min_elb_capacity = var.min_elb_capacity + wait_for_elb_capacity = var.wait_for_elb_capacity + wait_for_capacity_timeout = var.wait_for_capacity_timeout + default_cooldown = var.default_cooldown + protect_from_scale_in = var.protect_from_scale_in + + target_group_arns = var.target_group_arns + placement_group = var.placement_group + health_check_type = var.health_check_type + health_check_grace_period = var.health_check_grace_period + + force_delete = var.force_delete + termination_policies = var.termination_policies + suspended_processes = var.suspended_processes + max_instance_lifetime = var.max_instance_lifetime + + enabled_metrics = var.enabled_metrics + metrics_granularity = var.metrics_granularity + service_linked_role_arn = var.service_linked_role_arn + + dynamic "initial_lifecycle_hook" { + for_each = var.initial_lifecycle_hooks + content { + name = initial_lifecycle_hook.value.name + default_result = lookup(initial_lifecycle_hook.value, "default_result", null) + heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null) + lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition + notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null) + notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null) + role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null) + } + } + + dynamic "instance_refresh" { + for_each = var.instance_refresh != null ? [var.instance_refresh] : [] + content { + strategy = instance_refresh.value.strategy + triggers = lookup(instance_refresh.value, "triggers", null) + + dynamic "preferences" { + for_each = lookup(instance_refresh.value, "preferences", null) != null ? [instance_refresh.value.preferences] : [] + content { + instance_warmup = lookup(preferences.value, "instance_warmup", null) + min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null) + checkpoint_delay = lookup(preferences.value, "checkpoint_delay", null) + checkpoint_percentages = lookup(preferences.value, "checkpoint_percentages", null) + } + } + } + } + + dynamic "mixed_instances_policy" { + for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : [] + content { + dynamic "instances_distribution" { + for_each = try([mixed_instances_policy.value.instances_distribution], []) + content { + on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null) + on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null) + on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null) + spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null) + spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null) + spot_max_price = lookup(instances_distribution.value, "spot_max_price", null) + } + } + + launch_template { + launch_template_specification { + launch_template_name = join("", aws_launch_template.this.*.name) + version = join("", aws_launch_template.this.*.latest_version) + } + + dynamic "override" { + for_each = try(mixed_instances_policy.value.override, []) + content { + instance_type = lookup(override.value, "instance_type", null) + weighted_capacity = lookup(override.value, "weighted_capacity", null) + + dynamic "launch_template_specification" { + for_each = lookup(override.value, "launch_template_specification", null) != null ? override.value.launch_template_specification : [] + content { + launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null) + } + } + } + } + } + } + } + + dynamic "warm_pool" { + for_each = var.warm_pool != null ? [var.warm_pool] : [] + content { + pool_state = lookup(warm_pool.value, "pool_state", null) + min_size = lookup(warm_pool.value, "min_size", null) + max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null) + } + } + + timeouts { + delete = var.cluster_delete_timeout + } + + + lifecycle { + create_before_destroy = true + ignore_changes = [ + desired_capacity + ] + } + + tags = concat( + [ + { + key = "Name" + value = module.labels.id + propagate_at_launch = true + }, + { + key = "Environment" + value = var.environment + propagate_at_launch = true + }, + { + key = "kubernetes.io/cluster/${var.cluster_name}" + value = "owned" + propagate_at_launch = true + }, + { + key = "k8s.io/cluster/${var.cluster_name}" + value = "owned" + propagate_at_launch = true + }, + ], + var.propagate_tags, + [for k, v in var.tags : + { + key = k + value = v + propagate_at_launch = true + } + ] + ) +} + +#---------------------------------------------------ASG-schedule----------------------------------------------------------- + +resource "aws_autoscaling_schedule" "this" { + for_each = var.enabled && var.create_schedule ? var.schedules : {} + + scheduled_action_name = each.key + autoscaling_group_name = join("", aws_autoscaling_group.this.*.name) + + min_size = lookup(each.value, "min_size", null) + max_size = lookup(each.value, "max_size", null) + desired_capacity = lookup(each.value, "desired_size", null) + start_time = lookup(each.value, "start_time", null) + end_time = lookup(each.value, "end_time", null) + time_zone = lookup(each.value, "time_zone", null) + + # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] + # Cron examples: https://crontab.guru/examples.html + recurrence = lookup(each.value, "recurrence", null) +} + + + diff --git a/node_group/self_managed/outputs.tf b/node_group/self_managed/outputs.tf new file mode 100644 index 0000000..a801f01 --- /dev/null +++ b/node_group/self_managed/outputs.tf @@ -0,0 +1,86 @@ +################################################################################ +# Launch template +################################################################################ + +output "launch_template_id" { + description = "The ID of the launch template" + value = try(aws_launch_template.this[0].id, "") +} + +output "launch_template_arn" { + description = "The ARN of the launch template" + value = try(aws_launch_template.this[0].arn, "") +} + +output "launch_template_latest_version" { + description = "The latest version of the launch template" + value = try(aws_launch_template.this[0].latest_version, "") +} + +################################################################################ +# autoscaling group +################################################################################ + +output "autoscaling_group_name" { + description = "The autoscaling group name" + value = try(aws_autoscaling_group.this[0].name, "") +} + +output "autoscaling_group_arn" { + description = "The ARN for this autoscaling group" + value = try(aws_autoscaling_group.this[0].arn, "") +} + +output "autoscaling_group_id" { + description = "The autoscaling group id" + value = try(aws_autoscaling_group.this[0].id, "") +} + +output "autoscaling_group_min_size" { + description = "The minimum size of the autoscaling group" + value = try(aws_autoscaling_group.this[0].min_size, "") +} + +output "autoscaling_group_max_size" { + description = "The maximum size of the autoscaling group" + value = try(aws_autoscaling_group.this[0].max_size, "") +} + +output "autoscaling_group_desired_capacity" { + description = "The number of Amazon EC2 instances that should be running in the group" + value = try(aws_autoscaling_group.this[0].desired_capacity, "") +} + +output "autoscaling_group_default_cooldown" { + description = "Time between a scaling activity and the succeeding scaling activity" + value = try(aws_autoscaling_group.this[0].default_cooldown, "") +} + +output "autoscaling_group_health_check_grace_period" { + description = "Time after instance comes into service before checking health" + value = try(aws_autoscaling_group.this[0].health_check_grace_period, "") +} + +output "autoscaling_group_health_check_type" { + description = "EC2 or ELB. Controls how health checking is done" + value = try(aws_autoscaling_group.this[0].health_check_type, "") +} + +output "autoscaling_group_availability_zones" { + description = "The availability zones of the autoscaling group" + value = try(aws_autoscaling_group.this[0].availability_zones, "") +} + +output "autoscaling_group_vpc_zone_identifier" { + description = "The VPC zone identifier" + value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "") +} + +################################################################################ +# autoscaling group schedule +################################################################################ + +output "autoscaling_group_schedule_arns" { + description = "ARNs of autoscaling group schedules" + value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } +} diff --git a/node_group/self_managed/variables.tf b/node_group/self_managed/variables.tf new file mode 100644 index 0000000..97cf8ff --- /dev/null +++ b/node_group/self_managed/variables.tf @@ -0,0 +1,464 @@ +#Module : LABEL +#Description : Terraform label module variables. +variable "name" { + type = string + default = "" + description = "Name (e.g. `app` or `cluster`)." +} + +variable "repository" { + type = string + default = "https://github.com/clouddrove/terraform-aws-eks" + description = "Terraform current module repo" +} + +variable "environment" { + type = string + default = "" + description = "Environment (e.g. `prod`, `dev`, `staging`)." +} + +variable "label_order" { + type = list(any) + default = [] + description = "Label order, e.g. `name`,`application`." +} + +variable "managedby" { + type = string + default = "hello@clouddrove.com" + description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." +} + +variable "attributes" { + type = list(any) + default = [] + description = "Additional attributes (e.g. `1`)." +} + +variable "tags" { + type = map(any) + default = {} + description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." +} + + +variable "enabled" { + type = bool + default = true + description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." +} + + +#-----------------------------------------------------------EKS--------------------------------------------------------- +variable "kubernetes_version" { + type = string + default = "" + description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used." +} + +variable "cluster_endpoint" { + type = string + default = "" + description = "Endpoint of associated EKS cluster" +} + +variable "cluster_name" { + type = string + default = "" + description = "The name of the EKS cluster." +} + +variable "cluster_auth_base64" { + description = "Base64 encoded CA of associated EKS cluster" + type = string + default = "" +} + +variable "cluster_service_ipv4_cidr" { + type = string + default = null + description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" +} + +variable "pre_bootstrap_user_data" { + type = string + default = "" + description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" +} + +variable "post_bootstrap_user_data" { + type = string + default = "" + description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" +} + +variable "bootstrap_extra_args" { + type = string + default = "" + description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" +} + +variable "enable_bootstrap_user_data" { + type = bool + default = true + description = "Determines whether the bootstrap configurations are populated within the user data template" +} + +#-----------------------------------------------------------Launch_Template--------------------------------------------------------- + +variable "ebs_optimized" { + type = bool + default = null + description = "If true, the launched EC2 instance will be EBS-optimized" +} + +variable "instance_type" { + type = string + default = "" + description = "The type of the instance to launch" +} + +variable "key_name" { + description = "The key name that should be used for the instance" + type = string + default = null +} + +variable "associate_public_ip_address" { + type = bool + default = false + description = "Associate a public IP address with an instance in a VPC." +} + +variable "security_group_ids" { + type = list(string) + default = [] + description = "A list of associated security group IDs." +} + +variable "disable_api_termination" { + type = bool + default = null + description = "If true, enables EC2 instance termination protection" +} + +variable "instance_initiated_shutdown_behavior" { + type = string + default = null + description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)" +} + +variable "kernel_id" { + type = string + default = null + description = "The kernel ID" +} + +variable "ram_disk_id" { + type = string + default = null + description = "The ID of the ram disk" +} + +variable "block_device_mappings" { + type = any + default = {} + description = "Specify volumes to attach to the instance besides the volumes specified by the AMI" +} + +variable "kms_key_id" { + type = string + default = null + description = "The KMS ID of EBS volume" +} + +variable "capacity_reservation_specification" { + type = any + default = null + description = "Targeting for EC2 capacity reservations" +} + +variable "cpu_options" { + type = map(string) + default = null + description = "The CPU options for the instance" +} + +variable "credit_specification" { + type = map(string) + default = null + description = "Customize the credit specification of the instance" +} + +variable "elastic_gpu_specifications" { + type = map(string) + default = null + description = "The elastic GPU to attach to the instance" +} + +variable "elastic_inference_accelerator" { + type = map(string) + default = null + description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance" +} + +variable "enclave_options" { + type = map(string) + default = null + description = "Enable Nitro Enclaves on launched instances" +} + +variable "hibernation_options" { + type = map(string) + default = null + description = "The hibernation options for the instance" +} + +variable "instance_market_options" { + type = any + default = null + description = "The market (purchasing) option for the instance" +} + +variable "license_specifications" { + type = map(string) + default = null + description = "A list of license specifications to associate with" +} + +variable "metadata_options" { + type = map(string) + default = { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 2 + } + description = "Customize the metadata options for the instance" + +} + +variable "enable_monitoring" { + type = bool + default = true + description = "Enables/disables detailed monitoring" +} +variable "iam_instance_profile_arn" { + type = string + default = null + description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group" +} + + +variable "placement" { + type = map(string) + default = null + description = "The placement of the instance" +} + +#------------------------------------------------Auto-Scaling----------------------------------------------------------- +variable "use_mixed_instances_policy" { + type = bool + default = false + description = "Determines whether to use a mixed instances policy in the autoscaling group or not" +} + +variable "availability_zones" { + type = list(string) + default = null + description = "A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids`" +} + +variable "subnet_ids" { + type = list(string) + default = null + description = "A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`" +} + +variable "min_size" { + description = "The minimum size of the autoscaling group" + type = number + default = 0 +} + +variable "max_size" { + description = "The maximum size of the autoscaling group" + type = number + default = 3 +} + +variable "desired_size" { + description = "The number of Amazon EC2 instances that should be running in the autoscaling group" + type = number + default = 1 +} + +variable "capacity_rebalance" { + description = "Indicates whether capacity rebalance is enabled" + type = bool + default = null +} + +variable "min_elb_capacity" { + description = "Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes" + type = number + default = null +} + +variable "wait_for_elb_capacity" { + description = "Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior." + type = number + default = null +} + +variable "wait_for_capacity_timeout" { + description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior." + type = string + default = null +} + +variable "default_cooldown" { + description = "The amount of time, in seconds, after a scaling activity completes before another scaling activity can start" + type = number + default = null +} + +variable "protect_from_scale_in" { + description = "Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." + type = bool + default = false +} + +variable "target_group_arns" { + description = "A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing" + type = list(string) + default = [] +} + +variable "placement_group" { + description = "The name of the placement group into which you'll launch your instances, if any" + type = string + default = null +} + +variable "health_check_type" { + description = "`EC2` or `ELB`. Controls how health checking is done" + type = string + default = null +} + +variable "health_check_grace_period" { + description = "Time (in seconds) after instance comes into service before checking health" + type = number + default = null +} + +variable "force_delete" { + description = "Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling" + type = bool + default = null +} + +variable "termination_policies" { + description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`" + type = list(string) + default = null +} + +variable "suspended_processes" { + description = "A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly" + type = list(string) + default = null +} + +variable "max_instance_lifetime" { + description = "The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds" + type = number + default = null +} + +variable "enabled_metrics" { + description = "A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`" + type = list(string) + default = null +} + +variable "metrics_granularity" { + description = "The granularity to associate with the metrics to collect. The only valid value is `1Minute`" + type = string + default = null +} + +variable "service_linked_role_arn" { + description = "The ARN of the service-linked role that the ASG will use to call other AWS services" + type = string + default = null +} +variable "initial_lifecycle_hooks" { + description = "One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource" + type = list(map(string)) + default = [] +} + +variable "instance_refresh" { + description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated" + type = any + default = null +} + +variable "mixed_instances_policy" { + description = "Configuration block containing settings to define launch targets for Auto Scaling groups" + type = any + default = null +} + +variable "warm_pool" { + description = "If this block is configured, add a Warm Pool to the specified Auto Scaling group" + type = any + default = null +} + +variable "delete_timeout" { + description = "Delete timeout to wait for destroying autoscaling group" + type = string + default = null +} + +variable "propagate_tags" { + description = "A list of tag blocks. Each element should have keys named `key`, `value`, and `propagate_at_launch`" + type = list(map(string)) + default = [] +} + +#-----------------------------------------------TimeOuts---------------------------------------------------------------- + +variable "cluster_create_timeout" { + description = "Timeout value when creating the EKS cluster." + type = string + default = "30m" +} + +variable "cluster_delete_timeout" { + description = "Timeout value when deleting the EKS cluster." + type = string + default = "15m" +} + +variable "cluster_update_timeout" { + description = "Timeout value when updating the EKS cluster." + type = string + default = "60m" +} + +#---------------------------------------------------ASG-schedule----------------------------------------------------------- +variable "create_schedule" { + description = "Determines whether to create autoscaling group schedule or not" + type = bool + default = true +} + +variable "schedules" { + description = "Map of autoscaling group schedule to create" + type = map(any) + default = {} +} diff --git a/outputs.tf b/outputs.tf new file mode 100644 index 0000000..5f9778c --- /dev/null +++ b/outputs.tf @@ -0,0 +1,92 @@ +output "cluster_arn" { + value = try(aws_eks_cluster.default[0].arn, "") + description = "The Amazon Resource Name (ARN) of the cluster" +} + +output "cluster_certificate_authority_data" { + value = try(aws_eks_cluster.default[0].certificate_authority[0].data, "") + description = "Base64 encoded certificate data required to communicate with the cluster" +} + +output "cluster_endpoint" { + value = try(aws_eks_cluster.default[0].endpoint, "") + description = "Endpoint for your Kubernetes API server" +} + +output "cluster_id" { + value = try(aws_eks_cluster.default[0].id, "") + description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" +} + +output "cluster_oidc_issuer_url" { + value = try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, "") + description = "The URL on the EKS cluster for the OpenID Connect identity provider" +} + +output "cluster_platform_version" { + value = try(aws_eks_cluster.default[0].platform_version, "") + description = "Platform version for the cluster" +} + +output "cluster_status" { + value = try(aws_eks_cluster.default[0].status, "") + description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" +} + +output "cluster_primary_security_group_id" { + value = try(aws_eks_cluster.default[0].vpc_config[0].cluster_security_group_id, "") + description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use default security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" +} + +output "node_security_group_arn" { + description = "Amazon Resource Name (ARN) of the node shared security group" + value = try(aws_security_group.node_group[0].arn, "") +} + +output "node_security_group_id" { + value = try(aws_security_group.node_group[0].id, "") + description = "ID of the node shared security group" +} + +output "oidc_provider_arn" { + value = try(aws_iam_openid_connect_provider.default[0].arn, "") + description = "The ARN of the OIDC Provider if `enable_irsa = true`" +} + +output "cluster_iam_role_name" { + value = try(aws_iam_role.default[0].name, "") + description = "IAM role name of the EKS cluster" +} + +output "cluster_iam_role_arn" { + value = try(aws_iam_role.default[0].arn, "") + description = "IAM role ARN of the EKS cluster" +} + +output "cluster_iam_role_unique_id" { + value = try(aws_iam_role.default[0].unique_id, "") + description = "Stable and unique string identifying the IAM role" +} + +output "node_group_iam_role_name" { + value = try(aws_iam_role.node_groups[0].name, "") + description = "IAM role name of the EKS cluster" +} + +output "node_group_iam_role_arn" { + value = try(aws_iam_role.node_groups[0].arn, "") + description = "IAM role ARN of the EKS cluster" +} + +output "node_group_iam_role_unique_id" { + value = try(aws_iam_role.node_groups[0].unique_id, "") + description = "Stable and unique string identifying the IAM role" +} + +output "tags" { + value = module.labels.tags +} + +output "cluster_name" { + value = module.labels.id +} \ No newline at end of file diff --git a/variables.tf b/variables.tf new file mode 100644 index 0000000..df8f923 --- /dev/null +++ b/variables.tf @@ -0,0 +1,291 @@ +#Module : LABEL +#Description : Terraform label module variables. +variable "name" { + type = string + default = "" + description = "Name (e.g. `app` or `cluster`)." +} + +variable "repository" { + type = string + default = "https://github.com/clouddrove/terraform-aws-eks" + description = "Terraform current module repo" +} + +variable "environment" { + type = string + default = "" + description = "Environment (e.g. `prod`, `dev`, `staging`)." +} + +variable "label_order" { + type = list(any) + default = [] + description = "Label order, e.g. `name`,`application`." +} + +variable "managedby" { + type = string + default = "hello@clouddrove.com" + description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." +} + +variable "attributes" { + type = list(any) + default = [] + description = "Additional attributes (e.g. `1`)." +} + +variable "tags" { + type = map(any) + default = {} + description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." +} + + +variable "enabled" { + type = bool + default = true + description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." +} + +#---------------------------------------------------------EKS----------------------------------------------------------- +variable "cluster_encryption_config_resources" { + type = list(any) + default = ["secrets"] + description = "Cluster Encryption Config Resources to encrypt, e.g. ['secrets']" +} + +variable "enabled_cluster_log_types" { + type = list(string) + default = [] + description = "A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]." +} + +variable "cluster_log_retention_period" { + type = number + default = 30 + description = "Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html." +} + +variable "kubernetes_version" { + type = string + default = "" + description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used." +} + +variable "oidc_provider_enabled" { + type = bool + default = false + description = "Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html" +} +variable "eks_additional_security_group_ids" { + type = list(string) + default = [] + description = "EKS additional security group id" +} +variable "nodes_additional_security_group_ids" { + type = list(string) + default = [] + description = "EKS additional node group ids" +} +variable "addons" { + type = list(object({ + addon_name = string + addon_version = string + resolve_conflicts = string + service_account_role_arn = string + })) + default = [] + description = "Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources." +} + +#-----------------------------------------------------------KMS--------------------------------------------------------- +variable "cluster_encryption_config_enabled" { + type = bool + default = true + description = "Set to `true` to enable Cluster Encryption Configuration" +} + +variable "cluster_encryption_config_kms_key_enable_key_rotation" { + type = bool + default = true + description = "Cluster Encryption Config KMS Key Resource argument - enable kms key rotation" +} + +variable "cluster_encryption_config_kms_key_deletion_window_in_days" { + type = number + default = 10 + description = "Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction" +} + +variable "cluster_encryption_config_kms_key_policy" { + type = string + default = null + description = "Cluster Encryption Config KMS Key Resource argument - key policy" +} + +variable "openid_connect_audiences" { + type = list(string) + default = [] + description = "List of OpenID Connect audience client IDs to add to the IRSA provider" +} + + +#---------------------------------------------------------IAM----------------------------------------------------------- +variable "permissions_boundary" { + type = string + default = null + description = "If provided, all IAM roles will be created with this permissions boundary attached." +} + +#---------------------------------------------------------Security_Group------------------------------------------------ +variable "allowed_security_groups" { + type = list(string) + default = [] + description = "List of Security Group IDs to be allowed to connect to the EKS cluster." +} + +variable "allowed_cidr_blocks" { + type = list(string) + default = [] + description = "List of CIDR blocks to be allowed to connect to the EKS cluster." +} + +#------------------------------------------------------------Networking------------------------------------------------- +variable "vpc_id" { + type = string + default = "" + description = "VPC ID for the EKS cluster." +} + +variable "subnet_ids" { + type = list(string) + default = [] + description = "A list of subnet IDs to launch the cluster in." +} + +variable "public_access_cidrs" { + type = list(string) + default = ["0.0.0.0/0"] + description = "Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0." +} + +variable "endpoint_private_access" { + type = bool + default = false + description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false." +} + +variable "endpoint_public_access" { + type = bool + default = true + description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true." +} + +variable "vpc_security_group_ids" { + type = list(string) + default = [] + description = "A list of security group IDs to associate" +} +#-----------------------------------------------TimeOuts---------------------------------------------------------------- + +variable "cluster_create_timeout" { + type = string + default = "30m" + description = "Timeout value when creating the EKS cluster." +} + +variable "cluster_delete_timeout" { + type = string + default = "15m" + description = "Timeout value when deleting the EKS cluster." +} + +variable "cluster_update_timeout" { + type = string + default = "60m" + description = "Timeout value when updating the EKS cluster." +} + +################################################################################ +# Self Managed Node Group +################################################################################ + +variable "self_node_groups" { + type = any + default = {} + description = "Map of self-managed node group definitions to create" +} + +variable "self_node_group_defaults" { + type = any + default = {} + description = "Map of self-managed node group default configurations" +} + +# AWS auth +variable "apply_config_map_aws_auth" { + type = bool + default = true + description = "Whether to generate local files from `kubeconfig` and `config_map_aws_auth` and perform `kubectl apply` to apply the ConfigMap to allow the worker nodes to join the EKS cluster." +} + +variable "wait_for_cluster_command" { + type = string + default = "curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz" + description = "`local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT`" +} + +variable "local_exec_interpreter" { + type = list(string) + default = ["/bin/sh", "-c"] + description = "shell to use for local_exec" +} + +variable "map_additional_iam_roles" { + type = list(object({ + rolearn = string + username = string + groups = list(string) + })) + + default = [] + description = "Additional IAM roles to add to `config-map-aws-auth` ConfigMap" +} + +variable "map_additional_iam_users" { + type = list(object({ + userarn = string + username = string + groups = list(string) + })) + + default = [] + description = "Additional IAM users to add to `config-map-aws-auth` ConfigMap" +} + +variable "map_additional_aws_accounts" { + type = list(string) + default = [] + description = "Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap" +} + +variable "aws_iam_role_arn" { + type = string + default = "" + description = "ARN of EKS iam user" +} + +#Managed +variable "managed_node_group_defaults" { + type = any + default = {} + description = "Map of eks-managed node group definitions to create" +} + +variable "managed_node_group" { + type = any + default = {} + description = "Map of eks-managed node group definitions to create" +} diff --git a/versions.tf b/versions.tf new file mode 100644 index 0000000..5b22b09 --- /dev/null +++ b/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_version = ">= 0.14" + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.1.15" + } + local = { + source = "hashicorp/local" + version = "~> 2.1" + } + } +}