Skip to content

Commit

Permalink
Support AWS Provider V5 (#101)
Browse files Browse the repository at this point in the history
  • Loading branch information
max-lobur authored Feb 15, 2024
1 parent 52248ad commit 6c613ca
Show file tree
Hide file tree
Showing 12 changed files with 849 additions and 352 deletions.
1 change: 1 addition & 0 deletions .github/workflows/release-branch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
- 'docs/**'
- 'examples/**'
- 'test/**'
- 'README.*'

permissions:
contents: write
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release-published.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ permissions:

jobs:
terraform-module:
uses: cloudposse/github-actions-workflows-terraform-module/.github/workflows/release.yml@main
uses: cloudposse/github-actions-workflows-terraform-module/.github/workflows/release-published.yml@main
319 changes: 115 additions & 204 deletions README.md

Large diffs are not rendered by default.

32 changes: 19 additions & 13 deletions README.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ related:
url: "https://github.com/cloudposse/terraform-aws-ec2-instance-group"
# Short description of this project
description: |-
Terraform module to provision AWS resources to run EC2 worker nodes for [Elastic Container Service for Kubernetes](https://aws.amazon.com/eks/).
Terraform module to provision AWS resources to run EC2 worker nodes for [Elastic Kubernetes Service](https://aws.amazon.com/eks/).
Instantiate it multiple times to create many EKS worker node pools with specific settings such as GPUs, EC2 instance types, or autoscale parameters.
introduction: |-
Expand Down Expand Up @@ -79,23 +79,29 @@ usage: |2-
}
module "vpc" {
source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.8.0"
namespace = var.namespace
stage = var.stage
name = var.name
cidr_block = "172.16.0.0/16"
tags = local.tags
source = "cloudposse/vpc/aws"
version = "2.1.1"
namespace = var.namespace
stage = var.stage
name = var.name
ipv4_primary_cidr_block = "172.16.0.0/16"
tags = local.tags
}
module "subnets" {
source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.16.0"
source = "cloudposse/dynamic-subnets/aws"
version = "2.4.1"
namespace = var.namespace
stage = var.stage
name = var.name
availability_zones = var.availability_zones
namespace = var.namespace
stage = var.stage
name = var.name
vpc_id = module.vpc.vpc_id
igw_id = module.vpc.igw_id
cidr_block = module.vpc.vpc_cidr_block
igw_id = [module.vpc.igw_id]
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
nat_gateway_enabled = false
nat_instance_enabled = false
tags = local.tags
Expand Down
5 changes: 5 additions & 0 deletions docs/targets.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,10 @@
```text
Available targets:
help Help screen
help/all Display help for all targets
help/short This help short screen
lint Lint terraform code
```
<!-- markdownlint-restore -->
2 changes: 1 addition & 1 deletion docs/terraform.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

| Name | Source | Version |
|------|--------|---------|
| <a name="module_autoscale_group"></a> [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.30.1 |
| <a name="module_autoscale_group"></a> [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.37.1 |
| <a name="module_label"></a> [label](#module\_label) | cloudposse/label/null | 0.25.0 |
| <a name="module_this"></a> [this](#module\_this) | cloudposse/label/null | 0.25.0 |

Expand Down
9 changes: 4 additions & 5 deletions examples/complete/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,25 +11,24 @@ locals {

module "vpc" {
source = "cloudposse/vpc/aws"
version = "1.1.0"
version = "2.1.1"

cidr_block = "172.16.0.0/16"
tags = local.tags
ipv4_primary_cidr_block = "172.16.0.0/16"
tags = local.tags

context = module.this.context
}

module "subnets" {
source = "cloudposse/dynamic-subnets/aws"
version = "2.0.2"
version = "2.4.1"

availability_zones = var.availability_zones
vpc_id = module.vpc.vpc_id
igw_id = [module.vpc.igw_id]
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
nat_gateway_enabled = false
nat_instance_enabled = false
tags = local.tags

context = module.this.context
}
Expand Down
36 changes: 18 additions & 18 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ locals {
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
}

workers_role_arn = var.use_existing_aws_iam_instance_profile ? join("", data.aws_iam_instance_profile.default.*.role_arn) : join("", aws_iam_role.default.*.arn)
workers_role_name = var.use_existing_aws_iam_instance_profile ? join("", data.aws_iam_instance_profile.default.*.role_name) : join("", aws_iam_role.default.*.name)
workers_role_arn = var.use_existing_aws_iam_instance_profile ? join("", data.aws_iam_instance_profile.default[*].role_arn) : join("", aws_iam_role.default[*].arn)
workers_role_name = var.use_existing_aws_iam_instance_profile ? join("", data.aws_iam_instance_profile.default[*].role_name) : join("", aws_iam_role.default[*].name)

userdata = templatefile("${path.module}/userdata.tpl", {
cluster_endpoint = var.cluster_endpoint
Expand Down Expand Up @@ -47,38 +47,38 @@ data "aws_iam_policy_document" "assume_role" {
resource "aws_iam_role" "default" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
name = module.label.id
assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json)
assume_role_policy = join("", data.aws_iam_policy_document.assume_role[*].json)
tags = module.label.tags
}

resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = join("", aws_iam_role.default.*.name)
role = join("", aws_iam_role.default[*].name)
}

resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKS_CNI_Policy"
role = join("", aws_iam_role.default.*.name)
role = join("", aws_iam_role.default[*].name)
}

resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = join("", aws_iam_role.default.*.name)
role = join("", aws_iam_role.default[*].name)
}

resource "aws_iam_role_policy_attachment" "existing_policies_attach_to_eks_workers_role" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? var.workers_role_policy_arns_count : 0
policy_arn = var.workers_role_policy_arns[count.index]
role = join("", aws_iam_role.default.*.name)
role = join("", aws_iam_role.default[*].name)
}

resource "aws_iam_instance_profile" "default" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
name = module.label.id
role = join("", aws_iam_role.default.*.name)
role = join("", aws_iam_role.default[*].name)
}

resource "aws_security_group" "default" {
Expand All @@ -96,7 +96,7 @@ resource "aws_security_group_rule" "egress" {
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = join("", aws_security_group.default.*.id)
security_group_id = join("", aws_security_group.default[*].id)
type = "egress"
}

Expand All @@ -106,8 +106,8 @@ resource "aws_security_group_rule" "ingress_self" {
from_port = 0
to_port = 65535
protocol = "-1"
security_group_id = join("", aws_security_group.default.*.id)
source_security_group_id = join("", aws_security_group.default.*.id)
security_group_id = join("", aws_security_group.default[*].id)
source_security_group_id = join("", aws_security_group.default[*].id)
type = "ingress"
}

Expand All @@ -117,7 +117,7 @@ resource "aws_security_group_rule" "ingress_cluster" {
from_port = 0
to_port = 65535
protocol = "-1"
security_group_id = join("", aws_security_group.default.*.id)
security_group_id = join("", aws_security_group.default[*].id)
source_security_group_id = var.cluster_security_group_id
type = "ingress"
}
Expand All @@ -129,7 +129,7 @@ resource "aws_security_group_rule" "ingress_security_groups" {
to_port = 65535
protocol = "-1"
source_security_group_id = var.allowed_security_groups[count.index]
security_group_id = join("", aws_security_group.default.*.id)
security_group_id = join("", aws_security_group.default[*].id)
type = "ingress"
}

Expand All @@ -140,7 +140,7 @@ resource "aws_security_group_rule" "ingress_cidr_blocks" {
to_port = 0
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
security_group_id = join("", aws_security_group.default.*.id)
security_group_id = join("", aws_security_group.default[*].id)
type = "ingress"
}

Expand All @@ -165,18 +165,18 @@ data "aws_iam_instance_profile" "default" {

module "autoscale_group" {
source = "cloudposse/ec2-autoscale-group/aws"
version = "0.30.1"
version = "0.37.1"

enabled = local.enabled
tags = merge(local.tags, var.autoscaling_group_tags)

image_id = var.use_custom_image_id ? var.image_id : join("", data.aws_ami.eks_worker.*.id)
iam_instance_profile_name = var.use_existing_aws_iam_instance_profile == false ? join("", aws_iam_instance_profile.default.*.name) : var.aws_iam_instance_profile_name
image_id = var.use_custom_image_id ? var.image_id : join("", data.aws_ami.eks_worker[*].id)
iam_instance_profile_name = var.use_existing_aws_iam_instance_profile == false ? join("", aws_iam_instance_profile.default[*].name) : var.aws_iam_instance_profile_name

security_group_ids = compact(
concat(
[
var.use_existing_security_group == false ? join("", aws_security_group.default.*.id) : var.workers_security_group_id
var.use_existing_security_group == false ? join("", aws_security_group.default[*].id) : var.workers_security_group_id
],
var.additional_security_group_ids
)
Expand Down
6 changes: 3 additions & 3 deletions outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,17 @@ output "autoscaling_group_health_check_type" {

output "security_group_id" {
description = "ID of the worker nodes Security Group"
value = join("", aws_security_group.default.*.id)
value = join("", aws_security_group.default[*].id)
}

output "security_group_arn" {
description = "ARN of the worker nodes Security Group"
value = join("", aws_security_group.default.*.arn)
value = join("", aws_security_group.default[*].arn)
}

output "security_group_name" {
description = "Name of the worker nodes Security Group"
value = join("", aws_security_group.default.*.name)
value = join("", aws_security_group.default[*].name)
}

output "workers_role_arn" {
Expand Down
Loading

0 comments on commit 6c613ca

Please sign in to comment.