diff --git a/.github/workflows/terraform.yml b/.github/workflows/terraform.yml
index 24722a0..8199cfb 100644
--- a/.github/workflows/terraform.yml
+++ b/.github/workflows/terraform.yml
@@ -1,4 +1,5 @@
-name: "TF GH Action"
+name: Terraform
+
on:
- pull_request
@@ -11,12 +12,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Terraform
- uses: hashicorp/setup-terraform@v1.3.2
+ uses: hashicorp/setup-terraform@v3
with:
- terraform_version: 1.1.x
+ terraform_version: "1.5.5"
- name: Terraform fmt
run: terraform fmt -recursive -write=false -check -diff .
@@ -27,21 +28,21 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- terraform_version: [1.1.x]
+ terraform_version: ["1.5.5"]
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Terraform ${{ matrix.terraform_version }}
- uses: hashicorp/setup-terraform@v1.3.2
+ uses: hashicorp/setup-terraform@v3
with:
terraform_version: ${{ matrix.terraform_version }}
- name: Terraform Validate Root
- run: cd "${GITHUB_WORKSPACE}" && terraform init -backend=false && AWS_REGION=us-east-1 terraform validate -no-color
+ run: cd "${GITHUB_WORKSPACE}" && terraform init -backend=false && AWS_REGION=us-east-1 terraform validate
- name: Terraform Validate Modules
- run: for module in modules/*/; do cd "${GITHUB_WORKSPACE}/${module}" && terraform init -backend=false && AWS_REGION=us-east-1 terraform validate -no-color ; done
+ run: for module in modules/*/; do cd "${GITHUB_WORKSPACE}/${module}" && terraform init -backend=false && AWS_REGION=us-east-1 terraform validate; done
- name: Terraform Validate Examples
- run: for example in examples/*/; do cd "${GITHUB_WORKSPACE}/${example}" && terraform init -backend=false && AWS_REGION=us-east-1 terraform validate -no-color ; done
+ run: for example in examples/*/; do cd "${GITHUB_WORKSPACE}/${example}" && terraform init -backend=false && AWS_REGION=us-east-1 terraform validate; done
diff --git a/README.md b/README.md
index e88e999..1165ac3 100644
--- a/README.md
+++ b/README.md
@@ -56,11 +56,11 @@ A bare minimum configuration to execute the module:
```hcl
data "aws_eks_cluster" "cluster" {
- name = module.eks_cluster.eks_cluster_id
+ name = module.eks_cluster.eks_cluster_name
}
data "aws_eks_cluster_auth" "cluster" {
- name = module.eks_cluster.eks_cluster_id
+ name = module.eks_cluster.eks_cluster_name
}
provider "aws" {
@@ -168,24 +168,25 @@ _Note: Since this module manages all of the Kubernetes addon dependencies requir
| Name | Version |
|------|---------|
-| [terraform](#requirement\_terraform) | >=1.1.0 |
-| [aws](#requirement\_aws) | >=3.61.0 |
-| [helm](#requirement\_helm) | 2.2.0 |
-| [kubernetes](#requirement\_kubernetes) | >=2.6.1 |
+| [terraform](#requirement\_terraform) | >= 1.3.2 |
+| [aws](#requirement\_aws) | >= 5.75 |
+| [helm](#requirement\_helm) | >= 2.16 |
+| [kubernetes](#requirement\_kubernetes) | >= 2.32 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | 5.71.0 |
-| [helm](#provider\_helm) | 2.16.0 |
+| [aws](#provider\_aws) | 5.75.0 |
+| [helm](#provider\_helm) | 2.16.1 |
| [kubernetes](#provider\_kubernetes) | 2.33.0 |
## Modules
| Name | Source | Version |
|------|--------|---------|
-| [eks](#module\_eks) | terraform-aws-modules/eks/aws | 18.30.2 |
+| [eks](#module\_eks) | terraform-aws-modules/eks/aws | 20.29.0 |
+| [eks\_auth](#module\_eks\_auth) | terraform-aws-modules/eks/aws//modules/aws-auth | 20.29.0 |
| [istio](#module\_istio) | github.com/streamnative/terraform-helm-charts//modules/istio-operator | v0.8.6 |
| [vpc\_tags](#module\_vpc\_tags) | ./modules/eks-vpc-tags | n/a |
@@ -225,16 +226,16 @@ _Note: Since this module manages all of the Kubernetes addon dependencies requir
| [aws_s3_bucket.velero](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource |
| [aws_s3_bucket_server_side_encryption_configuration.tiered_storage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource |
| [aws_s3_bucket_server_side_encryption_configuration.velero](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource |
-| [helm_release.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.cert_issuer](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.cert_manager](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.cilium](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.csi](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.external_dns](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.metrics_server](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
-| [helm_release.velero](https://registry.terraform.io/providers/hashicorp/helm/2.2.0/docs/resources/release) | resource |
+| [helm_release.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cert_issuer](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cert_manager](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cilium](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.csi](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.external_dns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.metrics_server](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.velero](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [kubernetes_namespace.sn_system](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource |
| [kubernetes_namespace.velero](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource |
| [kubernetes_storage_class.sn_default](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class) | resource |
@@ -393,10 +394,10 @@ _Note: Since this module manages all of the Kubernetes addon dependencies requir
| [eks\_cluster\_arn](#output\_eks\_cluster\_arn) | The ARN for the EKS cluster created by this module |
| [eks\_cluster\_certificate\_authority\_data](#output\_eks\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | The endpoint for the EKS cluster created by this module |
-| [eks\_cluster\_id](#output\_eks\_cluster\_id) | The id/name of the EKS cluster created by this module |
| [eks\_cluster\_identity\_oidc\_issuer\_arn](#output\_eks\_cluster\_identity\_oidc\_issuer\_arn) | The ARN for the OIDC issuer created by this module |
| [eks\_cluster\_identity\_oidc\_issuer\_string](#output\_eks\_cluster\_identity\_oidc\_issuer\_string) | A formatted string containing the prefix for the OIDC issuer created by this module. Same as "cluster\_oidc\_issuer\_url", but with "https://" stripped from the name. This output is typically used in other StreamNative modules that request the "oidc\_issuer" input. |
| [eks\_cluster\_identity\_oidc\_issuer\_url](#output\_eks\_cluster\_identity\_oidc\_issuer\_url) | The URL for the OIDC issuer created by this module |
+| [eks\_cluster\_name](#output\_eks\_cluster\_name) | The name of the EKS cluster created by this module |
| [eks\_cluster\_platform\_version](#output\_eks\_cluster\_platform\_version) | The platform version for the EKS cluster created by this module |
| [eks\_cluster\_primary\_security\_group\_id](#output\_eks\_cluster\_primary\_security\_group\_id) | The id of the primary security group created by the EKS service itself, not by this module. This is labeled "Cluster Security Group" in the EKS console. |
| [eks\_cluster\_secondary\_security\_group\_id](#output\_eks\_cluster\_secondary\_security\_group\_id) | The id of the secondary security group created by this module. This is labled "Additional Security Groups" in the EKS console. |
diff --git a/aws_load_balancer_controller.tf b/aws_load_balancer_controller.tf
index 704314e..5fd7d52 100644
--- a/aws_load_balancer_controller.tf
+++ b/aws_load_balancer_controller.tf
@@ -13,6 +13,8 @@
# limitations under the License.
data "aws_iam_policy_document" "aws_load_balancer_controller" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"iam:CreateServiceLinkedRole",
@@ -237,6 +239,8 @@ data "aws_iam_policy_document" "aws_load_balancer_controller" {
}
data "aws_iam_policy_document" "aws_load_balancer_controller_sts" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"sts:AssumeRoleWithWebIdentity"
@@ -258,7 +262,7 @@ resource "aws_iam_role" "aws_load_balancer_controller" {
count = var.enable_resource_creation ? 1 : 0
name = format("%s-lbc-role", module.eks.cluster_id)
description = format("Role used by IRSA and the KSA aws-load-balancer-controller on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
- assume_role_policy = data.aws_iam_policy_document.aws_load_balancer_controller_sts.json
+ assume_role_policy = data.aws_iam_policy_document.aws_load_balancer_controller_sts.0.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = local.tags
@@ -275,7 +279,7 @@ resource "aws_iam_policy" "aws_load_balancer_controller" {
name = format("%s-AWSLoadBalancerControllerPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the AWS Load Balancer Controller addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
- policy = data.aws_iam_policy_document.aws_load_balancer_controller.json
+ policy = data.aws_iam_policy_document.aws_load_balancer_controller.0.json
tags = local.tags
}
diff --git a/cert_manager.tf b/cert_manager.tf
index 5342301..0a9c711 100644
--- a/cert_manager.tf
+++ b/cert_manager.tf
@@ -13,6 +13,8 @@
# limitations under the License.
data "aws_iam_policy_document" "cert_manager" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
sid = "Changes"
actions = [
@@ -49,6 +51,8 @@ data "aws_iam_policy_document" "cert_manager" {
}
data "aws_iam_policy_document" "cert_manager_sts" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"sts:AssumeRoleWithWebIdentity"
@@ -70,7 +74,7 @@ resource "aws_iam_role" "cert_manager" {
count = var.enable_resource_creation ? 1 : 0
name = format("%s-cm-role", module.eks.cluster_id)
description = format("Role assumed by IRSA and the KSA cert-manager on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
- assume_role_policy = data.aws_iam_policy_document.cert_manager_sts.json
+ assume_role_policy = data.aws_iam_policy_document.cert_manager_sts.0.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = local.tags
@@ -87,7 +91,7 @@ resource "aws_iam_policy" "cert_manager" {
name = format("%s-CertManagerPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the Cert-Manager addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
- policy = data.aws_iam_policy_document.cert_manager.json
+ policy = data.aws_iam_policy_document.cert_manager.0.json
tags = local.tags
}
diff --git a/cluster_autoscaler.tf b/cluster_autoscaler.tf
index 429a0e5..0170883 100644
--- a/cluster_autoscaler.tf
+++ b/cluster_autoscaler.tf
@@ -13,6 +13,8 @@
# limitations under the License.
data "aws_iam_policy_document" "cluster_autoscaler" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
effect = "Allow"
@@ -51,6 +53,8 @@ data "aws_iam_policy_document" "cluster_autoscaler" {
}
data "aws_iam_policy_document" "cluster_autoscaler_sts" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"sts:AssumeRoleWithWebIdentity"
@@ -77,7 +81,7 @@ resource "aws_iam_role" "cluster_autoscaler" {
count = var.enable_resource_creation ? 1 : 0
name = format("%s-ca-role", module.eks.cluster_id)
description = format("Role used by IRSA and the KSA cluster-autoscaler on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
- assume_role_policy = data.aws_iam_policy_document.cluster_autoscaler_sts.json
+ assume_role_policy = data.aws_iam_policy_document.cluster_autoscaler_sts.0.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = local.tags
@@ -94,7 +98,7 @@ resource "aws_iam_policy" "cluster_autoscaler" {
name = format("%s-ClusterAutoscalerPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the Cluster Autoscaler addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
- policy = data.aws_iam_policy_document.cluster_autoscaler.json
+ policy = data.aws_iam_policy_document.cluster_autoscaler.0.json
tags = local.tags
}
diff --git a/csi.tf b/csi.tf
index d546674..477ebb3 100644
--- a/csi.tf
+++ b/csi.tf
@@ -13,6 +13,8 @@
# limitations under the License.
data "aws_iam_policy_document" "csi" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"ec2:CreateSnapshot",
@@ -142,6 +144,8 @@ data "aws_iam_policy_document" "csi" {
}
data "aws_iam_policy_document" "csi_sts" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"sts:AssumeRoleWithWebIdentity"
@@ -168,7 +172,7 @@ resource "aws_iam_role" "csi" {
count = var.enable_resource_creation ? 1 : 0
name = format("%s-csi-role", module.eks.cluster_id)
description = format("Role used by IRSA and the KSA ebs-csi-controller-sa on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
- assume_role_policy = data.aws_iam_policy_document.csi_sts.json
+ assume_role_policy = data.aws_iam_policy_document.csi_sts.0.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = local.tags
@@ -185,7 +189,7 @@ resource "aws_iam_policy" "csi" {
name = format("%s-CsiPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the EBS Container Storage Interface CSI addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
- policy = data.aws_iam_policy_document.csi.json
+ policy = data.aws_iam_policy_document.csi.0.json
tags = local.tags
}
diff --git a/examples/example-with-vpc/main.tf b/examples/example-with-vpc/main.tf
deleted file mode 100644
index e6480c7..0000000
--- a/examples/example-with-vpc/main.tf
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2023 StreamNative, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-terraform {
- required_version = ">=1.0.0"
-
- required_providers {
- aws = {
- version = ">= 3.45.0"
- source = "hashicorp/aws"
- }
- helm = {
- source = "hashicorp/helm"
- version = "2.2.0"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = "2.2.0"
- }
- }
-}
-
-#######
-### These data sources are required by the Kubernetes and Helm providers in order to connect to the newly provisioned cluster
-#######
-data "aws_eks_cluster" "cluster" {
- name = module.sn_cluster.eks_cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.sn_cluster.eks_cluster_id
-}
-
-data "aws_caller_identity" "current" {}
-
-#######
-### The "random_pet" resource and locals block assist in building out the Cluster Name, as well the variables defined
-#######
-
-variable "environment" {
- default = "test"
-}
-
-variable "region" {
- default = "us-west-2"
-}
-resource "random_pet" "cluster_name" {
- length = 1
-}
-
-locals {
- account_id = data.aws_caller_identity.current.account_id
- cluster_name = format("sn-%s-%s-%s", random_pet.cluster_name.id, var.environment, var.region)
-}
-
-#######
-### The providers can be configured to dynamically retrieve the cluster connection configuration after it's been created
-#######
-provider "aws" {
- region = var.region
-}
-
-provider "helm" {
- kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
- token = data.aws_eks_cluster_auth.cluster.token
- insecure = false
- config_path = pathexpand("~/.kube/${local.cluster_name}-config")
-}
-
-module "vpc" {
- source = "streamnative/cloud/aws//modules/vpc"
-
- num_azs = 3 # The number of availabiltiy zones to create.
- vpc_cidr = "10.80.0.0/16" # The module will automatically create subnets based on this cidr and assign them to their respective AZs.
- vpc_name = local.cluster_name
- region = var.region
-}
-
-module "sn_cluster" {
- source = "streamnative/cloud/aws"
-
- add_vpc_tags = true # This will add the necessary tags to the VPC resources for Ingress controller auto-discovery
- cluster_name = local.cluster_name
- cluster_version = "1.20"
- hosted_zone_id = "Z04554535IN8Z31SKDVQ2" # Change this to your hosted zone ID
- kubeconfig_output_path = pathexpand("~/.kube/${local.cluster_name}-config")
- node_pool_instance_types = ["c6i.xlarge"]
- node_pool_desired_size = 3
- node_pool_min_size = 1
- node_pool_max_size = 3
-
- map_additional_iam_roles = [ # Map your IAM admin role for access within the Cluster
- {
- rolearn = "arn:aws:iam::123456789012:role/my-aws-admin-role"
- username = "management-admin"
- groups = ["system:masters"]
- }
- ]
-
- private_subnet_ids = module.vpc.private_subnet_ids # Use the list of private subnets created by the VPC module
- public_subnet_ids = module.vpc.public_subnet_ids # Use the list of public subnets created by the VPC module
- region = var.region
- vpc_id = module.vpc.vpc_id # Use the VPC ID created by the VPC module
-
- depends_on = [
- module.vpc # Adding a dependency on the VPC module allows for a cleaner destroy
- ]
-}
\ No newline at end of file
diff --git a/examples/root-example/main.tf b/examples/root-example/main.tf
index 5cfd1f4..fad3ce8 100644
--- a/examples/root-example/main.tf
+++ b/examples/root-example/main.tf
@@ -25,11 +25,11 @@ variable "region" {
### These data sources are required by the Kubernetes and Helm providers in order to connect to the newly provisioned cluster
#######
data "aws_eks_cluster" "cluster" {
- name = module.sn_cluster.eks_cluster_id
+ name = module.sn_cluster.eks_cluster_name
}
data "aws_eks_cluster_auth" "cluster" {
- name = module.sn_cluster.eks_cluster_id
+ name = module.sn_cluster.eks_cluster_name
}
provider "aws" {
@@ -55,7 +55,7 @@ provider "kubernetes" {
### Create the StreamNative Platform Cluster
#######
module "sn_cluster" {
- source = "streamnative/cloud/aws"
+ source = "../.."
add_vpc_tags = true # This will add the necessary tags to the VPC resources for Ingress controller auto-discovery
cluster_name = local.cluster_name
diff --git a/examples/streamnative-platform/main.tf b/examples/streamnative-platform/main.tf
deleted file mode 100644
index e8614d9..0000000
--- a/examples/streamnative-platform/main.tf
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright 2023 StreamNative, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#######
-### This section contains configurable inputs to satisfy your cluster specifications
-#######
-locals {
- availability_zones = 3 # Number of AZs to use. EKS requires a minimum of 2.
- desired_num_nodes = 3 # The desired node count for the node groups. This module creates a node group for each availability zone.
- environment = "dev" # This is used for naming of resources created by this module.
- hosted_zone_id = "*" # Specify the hosted zone ID where you want DNS records to be created and managed. This scopes access to the External DNS service.
- instance_type = ["c6i.xlarge"]
- max_num_nodes = 12 # The maximum number of nodes to create across all node groups. This module creates a node group for each availability zone.
- pulsar_namespace = "pulsar" # The module doesn't create a namespace for Pulsar, but it uses it for scoping access to the Tiered Storage Bucket
- region = "us-west-2" # Specify the region where the cluster is located
- vpc_cidr = "10.80.0.0/16" # If creating a VPC, specify the CIDR range to use
-}
-
-provider "aws" {
- region = local.region
-}
-
-provider "helm" {
- kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
- token = data.aws_eks_cluster_auth.cluster.token
- insecure = false
-}
-
-data "aws_eks_cluster" "cluster" {
- name = module.sn_cluster.eks_cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.sn_cluster.eks_cluster_id
-}
-
-data "aws_caller_identity" "current" {}
-
-#######
-### Randomly generate a pet name for the cluster. This is useful for development environments, but is not required. Update local.cluster_name if you want to use a more specific name.
-#######
-resource "random_pet" "cluster_name" {
- length = 1
-}
-
-#######
-### Creates a VPC for the StreamNative Platform EKS cluster
-###
-### NOTE! NOTE! NOTE!
-###
-### If you are applying this for the first time, you will need to target the VPC module PRIOR to applying the entire module.
-### This is because the subnet IDs passed to the `sn_cluster` module are computed, which a downstream module cannot handle.
-###
-### Example:
-###
-### terraform apply -target=module.vpc
-###
-### After you apply the targeted VPC module, you can then proceed with `terraform apply` on the entire module.
-#######
-module "vpc" {
- source = "github.com/streamnative/terraform-aws-cloud//modules/vpc?ref=v2.2.4-alpha"
-
- num_azs = local.availability_zones
- vpc_cidr = local.vpc_cidr
- vpc_name = local.cluster_name
- region = local.region
-}
-
-########
-### Creates an EKS cluster for StreamNative Platform
-########
-module "sn_cluster" {
- source = "github.com/streamnative/terraform-aws-cloud?ref=v2.2.4-alpha"
-
- cluster_name = local.cluster_name
- cluster_version = "1.20"
- hosted_zone_id = local.hosted_zone_id
- map_additional_iam_roles = local.cluster_role_mapping
- node_pool_instance_types = local.instance_type
- node_pool_desired_size = floor(local.desired_num_nodes / length(module.vpc.private_subnet_ids)) # Floor here to keep the desired count lower, autoscaling will take care of the rest
- node_pool_min_size = 1
- node_pool_max_size = ceil(local.max_num_nodes / length(module.vpc.private_subnet_ids)) # Ceiling here to keep the upper limits on the high end
- public_subnet_ids = module.vpc.public_subnet_ids
- private_subnet_ids = module.vpc.private_subnet_ids
- region = local.region
- vpc_id = module.vpc.vpc_id
-
- depends_on = [
- module.vpc,
- ]
-}
-
-########
-### Installs the required operators on the EKS cluster for StreamNative Platform
-########
-module "sn_bootstrap" {
- source = "github.com/streamnative/terraform-helm-charts?ref=v0.8.1"
-
- enable_function_mesh_operator = true
- enable_pulsar_operator = true
- enable_vault_operator = true
-
- depends_on = [
- module.sn_cluster
- ]
-}
-
-#######
-### Creates resources used for tiered storage offloading in Pulsar
-#######
-module "sn_tiered_storage_resources" {
- source = "github.com/streamnative/terraform-aws-cloud//modules/tiered-storage-resources?ref=v2.2.4-alpha"
-
- cluster_name = module.sn_cluster.eks_cluster_id
- oidc_issuer = module.sn_cluster.eks_cluster_identity_oidc_issuer_string
- pulsar_namespace = local.pulsar_namespace
-
- tags = {
- Project = "StreamNative Platform"
- Environment = local.environment
- }
-
- depends_on = [
- module.sn_cluster
- ]
-}
-
-#######
-### Creates resources used by Vault for storing and retrieving secrets related to the Pulsar cluster
-#######
-module "sn_tiered_storage_vault_resources" {
- source = "github.com/streamnative/terraform-aws-cloud//modules/vault-resources?ref=v2.2.4-alpha"
-
- cluster_name = module.sn_cluster.eks_cluster_id
- oidc_issuer = module.sn_cluster.eks_cluster_identity_oidc_issuer_string
- pulsar_namespace = local.pulsar_namespace
-
- tags = {
- Project = "StreamNative Platform"
- Environment = local.environment
- }
-
- depends_on = [
- module.sn_cluster
- ]
-}
-
-### Helpers
-locals {
- cluster_name = format("sn-%s-%s-%s", random_pet.cluster_name.id, local.environment, local.region)
- cluster_role_mapping = [
- {
- rolearn = module.sn_cluster.worker_iam_role_arn # The module creates IAM resources with the path "/StreamNative/". However the parent module is configured to remove the path from the worker nodes in the role mapping, which causes an erroneous node group health error in the EKS console.
- username = "system:node:{{EC2PrivateDNSName}}"
- groups = ["system:bootstrappers", "system:nodes"]
- }
- ]
-}
-
-output "cleanup_for_destroying_cluster" {
- description = "If you need to DESTROY the cluster, this command to clean up k8s resources from the tfstate, allowing you to cleanly proceed with a `terraform destroy`"
- value = "for i in $(tf state list | grep -E 'kubernetes|helm'); do tf state rm $i; done"
-}
-
-output "connect_to_cluster" {
- value = format("aws eks update-kubeconfig --name %s --kubeconfig ~/.kube/config --region %s", module.sn_cluster.eks_cluster_id, local.region)
-}
-
-output "eks_cluster_id" {
- value = module.sn_cluster.eks_cluster_id
-}
-
-output "vpc_id" {
- value = module.vpc.vpc_id
-}
-
-output "public_subnet_ids" {
- value = module.vpc.public_subnet_ids
-}
-
-output "private_subnet_ids" {
- value = module.vpc.public_subnet_ids
-}
-
-output "worker_iam_role_arn" {
- value = module.sn_cluster.worker_iam_role_arn
-}
diff --git a/external_dns.tf b/external_dns.tf
index 507006d..b7bcb8b 100644
--- a/external_dns.tf
+++ b/external_dns.tf
@@ -13,6 +13,8 @@
# limitations under the License.
data "aws_iam_policy_document" "external_dns" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
sid = "ChangeResourceRecordSets"
actions = [
@@ -39,6 +41,8 @@ data "aws_iam_policy_document" "external_dns" {
}
data "aws_iam_policy_document" "external_dns_sts" {
+ count = var.enable_resource_creation ? 1 : 0
+
statement {
actions = [
"sts:AssumeRoleWithWebIdentity"
@@ -60,7 +64,7 @@ resource "aws_iam_role" "external_dns" {
count = var.enable_resource_creation ? 1 : 0
name = format("%s-extdns-role", module.eks.cluster_id)
description = format("Role used by IRSA and the KSA external-dns on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
- assume_role_policy = data.aws_iam_policy_document.external_dns_sts.json
+ assume_role_policy = data.aws_iam_policy_document.external_dns_sts.0.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = local.tags
@@ -77,7 +81,7 @@ resource "aws_iam_policy" "external_dns" {
name = format("%s-ExternalDnsPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the ExternalDNS addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
- policy = data.aws_iam_policy_document.external_dns.json
+ policy = data.aws_iam_policy_document.external_dns.0.json
tags = local.tags
}
diff --git a/main.tf b/main.tf
index ea64211..6e05730 100644
--- a/main.tf
+++ b/main.tf
@@ -81,7 +81,8 @@ locals {
} : {}
)
- node_pool_taints = merge(var.node_pool_taints, local.computed_node_taints)
+ node_pool_taints = merge(var.node_pool_taints, local.computed_node_taints)
+ node_group_iam_role_arn = replace(aws_iam_role.ng.arn, replace(var.iam_path, "/^//", ""), "") # Work around for https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
node_group_defaults = {
create_security_group = false
@@ -99,12 +100,15 @@ locals {
}
}
}
+ update_config = {
+ max_unavailable = 1
+ }
create_iam_role = false # We create the IAM role ourselves to reduce complexity in managing the aws-auth configmap
+ iam_role_arn = local.node_group_iam_role_arn
create_launch_template = true
desired_size = var.node_pool_desired_size
ebs_optimized = var.node_pool_ebs_optimized
enable_monitoring = var.enable_node_pool_monitoring
- iam_role_arn = replace(aws_iam_role.ng.arn, replace(var.iam_path, "/^//", ""), "") # Work around for https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
min_size = var.node_pool_min_size
max_size = var.node_pool_max_size
pre_bootstrap_user_data = var.node_pool_pre_userdata
@@ -117,22 +121,23 @@ locals {
}
## Create the node groups, one for each instance type AND each availability zone/subnet
- v2_node_groups = tomap({
+ v2_node_groups = {
for node_group in flatten([
for instance_type in var.node_pool_instance_types : [
for i, j in data.aws_subnet.private_subnets : {
- subnet_ids = [data.aws_subnet.private_subnets[i].id]
- instance_types = [instance_type]
- name = "snc-${split(".", instance_type)[1]}-${data.aws_subnet.private_subnets[i].availability_zone}"
- taints = {}
- desired_size = var.node_pool_desired_size
- min_size = var.node_pool_min_size
- max_size = var.node_pool_max_size
- labels = tomap(merge(var.node_pool_labels, { "cloud.streamnative.io/instance-type" = lookup(local.compute_units, split(".", instance_type)[1], "null") }))
+ subnet_ids = [data.aws_subnet.private_subnets[i].id]
+ instance_types = [instance_type]
+ name = "snc-${split(".", instance_type)[1]}-${data.aws_subnet.private_subnets[i].availability_zone}"
+ use_name_prefix = true
+ taints = {}
+ desired_size = var.node_pool_desired_size
+ min_size = var.node_pool_min_size
+ max_size = var.node_pool_max_size
+ labels = tomap(merge(var.node_pool_labels, { "cloud.streamnative.io/instance-type" = lookup(local.compute_units, split(".", instance_type)[1], "null") }))
}
]
]) : "${node_group.name}" => node_group
- })
+ }
v3_node_taints = var.enable_v3_node_taints ? {
"core" = {
@@ -142,23 +147,31 @@ locals {
}
} : {}
- v3_node_groups = tomap({
+ v3_node_groups = {
"snc-core" = {
- subnet_ids = local.node_group_subnet_ids
- instance_types = [var.v3_node_group_core_instance_type]
- name = "snc-core"
- taints = local.v3_node_taints
- desired_size = var.node_pool_desired_size
- min_size = var.node_pool_min_size
- max_size = var.node_pool_max_size
+ subnet_ids = local.node_group_subnet_ids
+ instance_types = [var.v3_node_group_core_instance_type]
+ name = "snc-core"
+ use_name_prefix = true
+ taints = local.v3_node_taints
+ desired_size = var.node_pool_desired_size
+ min_size = var.node_pool_min_size
+ max_size = var.node_pool_max_size
labels = tomap(merge(var.node_pool_labels, {
"cloud.streamnative.io/instance-type" = "Small"
"cloud.streamnative.io/instance-group" = "Core"
}))
}
- })
+ }
node_groups = var.enable_v3_node_migration ? merge(local.v3_node_groups, local.v2_node_groups) : var.enable_v3_node_groups ? local.v3_node_groups : local.v2_node_groups
+ defaulted_node_groups = var.node_groups != null ? {
+ for k, v in var.node_groups : k => merge(
+ v,
+ contains(keys(v), "subnet_ids") ? {} : { "subnet_ids" = local.node_group_subnet_ids },
+ )
+ } : {}
+ eks_managed_node_groups = [local.defaulted_node_groups, local.node_groups][var.node_groups != null ? 0 : 1]
## Node Security Group Configuration
default_sg_rules = {
@@ -191,6 +204,11 @@ locals {
### IAM role bindings
sncloud_control_plane_access = [
+ {
+ rolearn = format("arn:${local.aws_partition}:iam::%s:role/StreamNativeCloudBootstrapRole", local.account_id)
+ username = "sn-manager:{{AccountID}}:{{SessionName}}"
+ groups = ["system:masters"]
+ },
{
rolearn = format("arn:${local.aws_partition}:iam::%s:role/StreamNativeCloudManagementRole", local.account_id)
username = "sn-manager:{{AccountID}}:{{SessionName}}"
@@ -200,6 +218,11 @@ locals {
# Add the worker node role back in with the path so the EKS console reports healthy node status
worker_node_role = [
+ {
+ rolearn = local.node_group_iam_role_arn
+ username = "system:node:{{EC2PrivateDNSName}}"
+ groups = ["system:bootstrappers", "system:nodes"]
+ },
{
rolearn = aws_iam_role.ng.arn
username = "system:node:{{EC2PrivateDNSName}}"
@@ -209,54 +232,66 @@ locals {
# Switches for different role binding scenarios
role_bindings = var.enable_sncloud_control_plane_access && var.iam_path != "" ? concat(local.sncloud_control_plane_access, local.worker_node_role, var.map_additional_iam_roles) : var.enable_sncloud_control_plane_access && var.iam_path == "" ? concat(local.sncloud_control_plane_access, var.map_additional_iam_roles) : var.enable_sncloud_control_plane_access == false && var.iam_path != "" ? concat(var.map_additional_iam_roles, local.worker_node_role) : var.map_additional_iam_roles
-
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
- version = "18.30.2" #"19.6.0"
-
- ######################################################################################################
- ### This section takes into account the breaking changes made in v18.X of the community EKS module ###
- ### They are only applicable if migration_mode is set to true, for upgrading existing clusters ###
- ######################################################################################################
- prefix_separator = var.migration_mode ? "" : "-"
- iam_role_name = var.migration_mode ? var.cluster_name : null
- cluster_security_group_name = var.migration_mode ? var.cluster_name : null
- cluster_security_group_description = var.migration_mode ? "EKS cluster security group." : "EKS cluster security group"
- node_security_group_description = var.migration_mode ? "Security group for all nodes in the cluster." : "EKS node shared security group"
- node_security_group_use_name_prefix = var.migration_mode ? false : true
- node_security_group_name = var.migration_mode ? var.migration_mode_node_sg_name : null
- ######################################################################################################
-
- aws_auth_roles = local.role_bindings
- cluster_name = var.cluster_name
- cluster_version = var.cluster_version
- cluster_endpoint_private_access = true # Always set to true here, which enables private networking for the node groups
- cluster_endpoint_public_access = var.disable_public_eks_endpoint ? false : true
- cluster_endpoint_public_access_cidrs = var.allowed_public_cidrs
- cluster_enabled_log_types = var.cluster_enabled_log_types
- cluster_security_group_additional_rules = var.cluster_security_group_additional_rules
- cluster_security_group_id = var.cluster_security_group_id
+ version = "20.29.0"
+
+ cluster_name = var.cluster_name
+ cluster_version = var.cluster_version
+ cluster_endpoint_private_access = true # Always set to true here, which enables private networking for the node groups
+ cluster_endpoint_public_access = var.disable_public_eks_endpoint ? false : true
+ cluster_endpoint_public_access_cidrs = var.allowed_public_cidrs
+ enable_irsa = true
+ openid_connect_audiences = ["sts.amazonaws.com"]
+ bootstrap_self_managed_addons = var.bootstrap_self_managed_addons
+ enable_cluster_creator_admin_permissions = true
+ cluster_encryption_config = var.cluster_encryption_config
+ cluster_encryption_policy_path = var.iam_path
+
+ iam_role_arn = try(var.cluster_iam.iam_role_arn, aws_iam_role.cluster[0].arn, null)
+ create_iam_role = try(var.cluster_iam.create_iam_role, true)
+ iam_role_use_name_prefix = try(var.cluster_iam.iam_role_use_name_prefix, true)
+ iam_role_name = try(var.cluster_iam.iam_role_name, null)
+ iam_role_path = try(var.cluster_iam.iam_role_path, var.iam_path, "/StreamNative/")
+ iam_role_permissions_boundary = try(var.cluster_iam.iam_role_permissions_boundary, var.permissions_boundary_arn, null)
+
+ vpc_id = var.vpc_id
control_plane_subnet_ids = local.cluster_subnet_ids
- create_cloudwatch_log_group = false
+ cluster_service_ipv4_cidr = try(var.cluster_networking.cluster_service_ipv4_cidr, var.cluster_service_ipv4_cidr, null)
+ cluster_security_group_id = try(var.cluster_networking.cluster_security_group_id, var.cluster_security_group_id, "")
+ cluster_additional_security_group_ids = try(var.cluster_networking.cluster_additional_security_group_ids, [])
+ create_cluster_security_group = try(var.cluster_networking.create_cluster_security_group, var.create_cluster_security_group, true)
+ cluster_security_group_name = try(var.cluster_networking.cluster_security_group_name, null)
+ cluster_security_group_additional_rules = try(var.cluster_networking.cluster_security_group_additional_rules, var.cluster_security_group_additional_rules, {})
create_cluster_primary_security_group_tags = false # Cleaner if we handle the tag in aws_ec2_tag.cluster_security_group
- create_cluster_security_group = var.create_cluster_security_group
- create_node_security_group = var.create_node_security_group
- create_iam_role = var.use_runtime_policy ? false : true
- eks_managed_node_groups = local.node_groups
- eks_managed_node_group_defaults = local.node_group_defaults
- enable_irsa = true
- iam_role_arn = var.use_runtime_policy ? aws_iam_role.cluster[0].arn : null
- iam_role_path = var.iam_path
- iam_role_permissions_boundary = var.permissions_boundary_arn
- manage_aws_auth_configmap = var.manage_aws_auth_configmap
- node_security_group_id = var.node_security_group_id
- node_security_group_additional_rules = merge(var.node_security_group_additional_rules, local.default_sg_rules)
- openid_connect_audiences = ["sts.amazonaws.com"]
- tags = local.tags
- vpc_id = var.vpc_id
- cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr
+
+ eks_managed_node_groups = local.eks_managed_node_groups
+ eks_managed_node_group_defaults = local.node_group_defaults
+
+ node_security_group_id = var.node_security_group_id
+ create_node_security_group = var.create_node_security_group
+ node_security_group_additional_rules = merge(var.node_security_group_additional_rules, local.default_sg_rules)
+
+ cluster_enabled_log_types = var.cluster_enabled_log_types
+ create_cloudwatch_log_group = false
+ tags = local.tags
+}
+
+module "eks_auth" {
+ source = "terraform-aws-modules/eks/aws//modules/aws-auth"
+ version = "20.29.0"
+
+ manage_aws_auth_configmap = var.manage_aws_auth_configmap
+ aws_auth_roles = local.role_bindings
+
+ depends_on = [ module.eks ]
+}
+
+moved {
+ from = module.eks.kubernetes_config_map_v1_data.aws_auth[0]
+ to = module.eks_auth.kubernetes_config_map_v1_data.aws_auth[0]
}
### Additional Tags
@@ -340,6 +375,7 @@ moved {
### Cluster IAM Role
data "aws_iam_policy_document" "cluster_assume_role_policy" {
+ count = var.use_runtime_policy ? 1 : 0
statement {
actions = [
"sts:AssumeRole"
@@ -356,9 +392,9 @@ resource "aws_iam_role" "cluster" {
count = var.use_runtime_policy ? 1 : 0
name = format("%s-cluster-role", var.cluster_name)
description = format("The IAM Role used by the %s EKS cluster", var.cluster_name)
- assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
+ assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy[0].json
tags = local.tags
- path = "/StreamNative/"
+ path = var.iam_path
permissions_boundary = var.permissions_boundary_arn
}
@@ -400,7 +436,7 @@ resource "aws_iam_role" "ng" {
description = format("The IAM Role used by the %s EKS cluster's worker nodes", var.cluster_name)
assume_role_policy = data.aws_iam_policy_document.ng_assume_role_policy.json
tags = local.tags
- path = "/StreamNative/"
+ path = var.iam_path
permissions_boundary = var.permissions_boundary_arn
}
diff --git a/outputs.tf b/outputs.tf
index 1dbb45a..164ee16 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -22,9 +22,9 @@ output "eks_cluster_endpoint" {
description = "The endpoint for the EKS cluster created by this module"
}
-output "eks_cluster_id" {
- value = module.eks.cluster_id
- description = "The id/name of the EKS cluster created by this module"
+output "eks_cluster_name" {
+ value = module.eks.cluster_name
+ description = "The name of the EKS cluster created by this module"
}
output "eks_cluster_identity_oidc_issuer_url" {
diff --git a/variables.tf b/variables.tf
index d78bc2f..937dbe9 100644
--- a/variables.tf
+++ b/variables.tf
@@ -126,6 +126,12 @@ variable "cluster_enabled_log_types" {
type = list(string)
}
+variable "cluster_encryption_config" {
+ description = "Configuration block with encryption configuration for the cluster. To disable secret encryption, set this value to `{}`"
+ type = any
+ default = {}
+}
+
variable "cluster_name" {
default = ""
description = "The name of your EKS cluster and associated resources. Must be 16 characters or less."
@@ -660,32 +666,84 @@ variable "cluster_service_ipv4_cidr" {
default = null
}
-variable "cluster_encryption_config" {
- description = "Configuration block with encryption configuration for the cluster. To disable secret encryption, set this value to `{}`"
- type = any
- default = {}
-}
-
variable "bootstrap_self_managed_addons" {
description = "Indicates whether or not to bootstrap self-managed addons after the cluster has been created"
type = bool
default = null
}
+/** Example
+cluster_iam = {
+ create_iam_role = true
+ iam_role_use_name_prefix = false
+ iam_role_name = ""
+ iam_role_arn = ""
+}
+**/
variable "cluster_iam" {
description = "Cluster IAM settings"
type = any
default = null
}
+/** Example
+cluster_networking = {
+ cluster_service_ipv4_cidr = null
+
+ cluster_security_group_id = ""
+ cluster_additional_security_group_ids = []
+ create_cluster_security_group = true
+ cluster_security_group_name = null
+ cluster_security_group_additional_rules = {}
+ cluster_security_group_description = ""
+ create_cluster_primary_security_group_tags = false
+}
+**/
variable "cluster_networking" {
description = "Cluster Networking settings"
type = any
default = null
}
+/** Example
+node_groups = {
+ snc_core = {
+ name = "snc-core"
+ use_name_prefix = true
+
+ create_iam_role = false
+ iam_role_arn = null
+ iam_role_name = null
+ iam_role_use_name_prefix = true
+ iam_role_path = null
+ iam_role_description = ""
+ iam_role_permissions_boundary = null
+ iam_role_tags = {}
+ iam_role_attach_cni_policy = true
+ iam_role_additional_policies = {}
+ create_iam_role_policy = true
+ iam_role_policy_statements = []
+
+ create_launch_template = true
+ use_custom_launch_template = true
+ launch_template_id = ""
+ launch_template_name = "snc-core"
+ launch_template_use_name_prefix = true
+ launch_template_version = null
+ launch_template_default_version = null
+ update_launch_template_default_version = true
+ launch_template_description = ""
+ vpc_security_group_ids = []
+
+ instance_types = ["m6i.large"]
+ min_size = 2
+ max_size = 5
+ desired_size = 2
+ }
+}
+**/
variable "node_groups" {
description = "Map of EKS managed node group definitions to create"
type = any
default = null
-}
\ No newline at end of file
+}
diff --git a/versions.tf b/versions.tf
index 75013ed..6871401 100644
--- a/versions.tf
+++ b/versions.tf
@@ -13,20 +13,20 @@
# limitations under the License.
terraform {
- required_version = ">=1.1.0"
+ required_version = ">= 1.3.2"
required_providers {
aws = {
- version = ">=3.61.0"
source = "hashicorp/aws"
- }
- helm = {
- source = "hashicorp/helm"
- version = "2.2.0"
+ version = ">= 5.75"
}
kubernetes = {
source = "hashicorp/kubernetes"
- version = ">=2.6.1"
+ version = ">= 2.32"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 2.16"
}
}
}