diff --git a/docs/node-groups.md b/docs/node-groups.md
index 181dc7df21..d19f307f95 100644
--- a/docs/node-groups.md
+++ b/docs/node-groups.md
@@ -87,7 +87,7 @@ The below example demonstrates advanced configuration options for a self-managed
node_group_name = "self-managed-ondemand"
instance_type = "m5.large"
custom_ami_id = "ami-0dfaa019a300f219c" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc.
- capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot"
+ capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot"
launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows
pre_userdata = <<-EOT
yum install -y amazon-ssm-agent \
@@ -96,14 +96,33 @@ The below example demonstrates advanced configuration options for a self-managed
post_userdata = ""
kubelet_extra_args = ""
bootstrap_extra_args = ""
- disk_size = 20
- disk_type = "gp2"
+ block_device_mapping = [
+ {
+ device_name = "/dev/xvda" # mount point to /
+ volume_type = "gp2"
+ volume_size = 20
+ },
+ {
+ device_name = "/dev/xvdf" # mount point to /local1 (it could be local2, depending upon the disks are attached during boot)
+ volume_type = "gp3"
+ volume_size = 50
+ iops = 3000
+ throughput = 125
+ },
+ {
+ device_name = "/dev/xvdg" # mount point to /local2 (it could be local1, depending upon the disks are attached during boot)
+ volume_type = "gp3"
+ volume_size = 100
+ iops = 3000
+ throughput = 125
+ }
+ ]
enable_monitoring = false
public_ip = false # Enable only for public subnets
# AUTOSCALING
max_size = "3"
min_size = "1"
- subnet_ids = [] # Mandatory Public or Private Subnet IDs
+ subnet_ids = [] # Mandatory Public or Private Subnet IDs
additional_tags = {
ExtraTag = "m5x-on-demand"
Name = "m5x-on-demand"
@@ -115,6 +134,13 @@ The below example demonstrates advanced configuration options for a self-managed
}
```
+With the previous described example at `block_device_mapping`, in case you choose an instance that has local NVMe storage, you will achieve the three specified EBS disks plus all local NVMe disks that instance brings. For example, for an `m5d.large` you will end up with the following mount points: `/` for device named `/dev/xvda`, `/local1` for device named `/dev/xvdf`, `/local2` for device named `/dev/xvdg`, and `/local3` for instance storage (in such case a disk with 70GB).
+
+Check the following references as you may desire:
+* [Amazon EBS and NVMe on Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html).
+* [AWS NVMe drivers for Windows instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/aws-nvme-drivers.html)
+* [EC2 Instance Update – M5 Instances with Local NVMe Storage (M5d)](https://aws.amazon.com/blogs/aws/ec2-instance-update-m5-instances-with-local-nvme-storage-m5d/)
+
### Fargate Profile
The example below demonstrates how you can customize a Fargate profile for your cluster.
diff --git a/examples/advanced/live/preprod/eu-west-1/application_acct/dev/main.tf b/examples/advanced/live/preprod/eu-west-1/application_acct/dev/main.tf
index 374e7403b7..f3af91f32c 100644
--- a/examples/advanced/live/preprod/eu-west-1/application_acct/dev/main.tf
+++ b/examples/advanced/live/preprod/eu-west-1/application_acct/dev/main.tf
@@ -273,7 +273,13 @@ module "aws-eks-accelerator-for-terraform" {
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
- disk_size = 20
+ block_device_mapping = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp2"
+ volume_size = 20
+ }
+ ]
instance_type = "m5.large"
desired_size = 2
@@ -310,7 +316,13 @@ module "aws-eks-accelerator-for-terraform" {
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
- disk_size = 20
+ block_device_mapping = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp2"
+ volume_size = 20
+ }
+ ]
instance_type = "m5.large"
desired_size = 2
@@ -351,7 +363,13 @@ module "aws-eks-accelerator-for-terraform" {
max_unavailable = 1
instance_types = "m5.large"
- disk_size = 50
+ block_device_mapping = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp2"
+ volume_size = 50
+ }
+ ]
subnet_ids = [] # Define your private/public subnets list with comma seprated subnet_ids = ['subnet1','subnet2','subnet3']
diff --git a/examples/eks-cluster-with-self-managed-node-groups/main.tf b/examples/eks-cluster-with-self-managed-node-groups/main.tf
index b3914dfc09..a7d108339f 100644
--- a/examples/eks-cluster-with-self-managed-node-groups/main.tf
+++ b/examples/eks-cluster-with-self-managed-node-groups/main.tf
@@ -113,15 +113,36 @@ module "aws-eks-accelerator-for-terraform" {
node_group_name = "self-managed-ondemand" # Name is used to create a dedicated IAM role for each node group and adds to AWS-AUTH config map
subnet_ids = module.aws_vpc.private_subnets
create_launch_template = true
- launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows
- custom_ami_id = "ami-0dfaa019a300f219c" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc.
- public_ip = false # Enable only for public subnets
+ launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows
+ custom_ami_id = "" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc.
+ public_ip = false # Enable only for public subnets
pre_userdata = <<-EOT
yum install -y amazon-ssm-agent \
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
- disk_size = 20
+ block_device_mappings = [
+ {
+ device_name = "/dev/xvda" # mount point to /
+ volume_type = "gp2"
+ volume_size = 50
+ },
+ {
+ device_name = "/dev/xvdf" # mount point to /local1 (it could be local2, depending upon the disks are attached during boot)
+ volume_type = "gp3"
+ volume_size = 80
+ iops = 3000
+ throughput = 125
+ },
+ {
+ device_name = "/dev/xvdg" # mount point to /local2 (it could be local1, depending upon the disks are attached during boot)
+ volume_type = "gp3"
+ volume_size = 100
+ iops = 3000
+ throughput = 125
+ }
+ ]
+
instance_type = "m5.large"
desired_size = 2
max_size = 10
diff --git a/modules/aws-eks-self-managed-node-groups/README.md b/modules/aws-eks-self-managed-node-groups/README.md
index c9e69d3133..3652d4a0b0 100644
--- a/modules/aws-eks-self-managed-node-groups/README.md
+++ b/modules/aws-eks-self-managed-node-groups/README.md
@@ -37,7 +37,14 @@ This module allows you to create on-demand or spot self managed Linux or Windows
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
- disk_size = 20
+ block_device_mappings = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp2"
+ volume_size = 20
+ }
+ ]
+
instance_type = "m5.large"
max_size = 10
min_size = 2
@@ -63,7 +70,14 @@ This module allows you to create on-demand or spot self managed Linux or Windows
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
- disk_size = 20
+ block_device_mappings = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp2"
+ volume_size = 20
+ }
+ ]
+
instance_type = "m5.large"
max_size = 10
@@ -90,7 +104,17 @@ This module allows you to create on-demand or spot self managed Linux or Windows
max_size = 3
min_size = 3
instance_types = "m5.large"
- disk_size = 50
+
+ block_device_mappings = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp3"
+ volume_size = 50
+ iops = 3000
+ throughput = 125
+ }
+ ]
+
subnet_ids = [] # Define your private/public subnets list with comma seprated subnet_ids = ['subnet1','subnet2','subnet3']
additional_tags = {
ExtraTag = "bottlerocket"
@@ -108,7 +132,16 @@ This module allows you to create on-demand or spot self managed Linux or Windows
# custom_ami_id = "ami-xxxxxxxxxxxxxxxx" # Bring your own custom AMI. Default Windows AMI is the latest EKS Optimized Windows Server 2019 English Core AMI.
public_ip = false # Enable only for public subnets
- disk_size = 50
+ block_device_mappings = [
+ {
+ device_name = "/dev/sda1"
+ volume_type = "gp3"
+ volume_size = 50
+ iops = 3000
+ throughput = 125
+ }
+ ]
+
instance_type = "m5.large"
max_size = 4
min_size = 2
@@ -138,7 +171,9 @@ No requirements.
## Modules
-No modules.
+| Name | Source | Version |
+|------|--------|---------|
+| [launch\_template\_self\_managed\_ng](#module\_launch\_template\_self\_managed\_ng) | ../launch-templates | n/a |
## Resources
@@ -150,7 +185,6 @@ No modules.
| [aws_iam_role.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.eks_windows_cni](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_launch_template.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_primary_sg_ingress_worker_sgr](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.control_plane_egress_to_worker_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
@@ -192,9 +226,9 @@ No modules.
| Name | Description |
|------|-------------|
-| [launch\_template\_arn](#output\_launch\_template\_arn) | launch templated id for EKS Self Managed Node Group |
-| [launch\_template\_ids](#output\_launch\_template\_ids) | launch templated id for EKS Self Managed Node Group |
-| [launch\_template\_latest\_versions](#output\_launch\_template\_latest\_versions) | launch templated version for EKS Self Managed Node Group |
+| [launch\_template\_arn](#output\_launch\_template\_arn) | Launch Template ARNs for EKS Self Managed Node Group |
+| [launch\_template\_ids](#output\_launch\_template\_ids) | Launch Template IDs for EKS Self Managed Node Group |
+| [launch\_template\_latest\_versions](#output\_launch\_template\_latest\_versions) | Launch Template latest versions for EKS Self Managed Node Group |
| [self\_managed\_asg\_names](#output\_self\_managed\_asg\_names) | Self managed group ASG names |
| [self\_managed\_iam\_role\_name](#output\_self\_managed\_iam\_role\_name) | Self managed groups IAM role names |
| [self\_managed\_nodegroup\_iam\_instance\_profile\_arn](#output\_self\_managed\_nodegroup\_iam\_instance\_profile\_arn) | IAM Instance Profile arnd for EKS Self Managed Node Group |
diff --git a/modules/aws-eks-self-managed-node-groups/locals.tf b/modules/aws-eks-self-managed-node-groups/locals.tf
index ee5d86bd68..3c51719d9e 100644
--- a/modules/aws-eks-self-managed-node-groups/locals.tf
+++ b/modules/aws-eks-self-managed-node-groups/locals.tf
@@ -1,4 +1,6 @@
locals {
+ lt_self_managed_group_map_key = "self-managed-node-group"
+
default_self_managed_ng = {
node_group_name = "m4_on_demand"
instance_type = "m4.large"
@@ -11,11 +13,17 @@ locals {
post_userdata = ""
kubelet_extra_args = ""
bootstrap_extra_args = ""
- disk_size = 50
- disk_type = "gp2"
enable_monitoring = false
public_ip = false
+ block_device_mappings = [
+ {
+ device_name = "/dev/xvda"
+ volume_type = "gp2"
+ volume_size = 50
+ }
+ ]
+
# AUTOSCALING
max_size = "3"
min_size = "1"
@@ -42,28 +50,8 @@ locals {
default_custom_ami_id = contains(local.predefined_ami_types, local.self_managed_node_group["launch_template_os"]) ? data.aws_ami.predefined[local.self_managed_node_group["launch_template_os"]].id : ""
custom_ami_id = local.self_managed_node_group["custom_ami_id"] == "" ? local.default_custom_ami_id : local.self_managed_node_group["custom_ami_id"]
- userdata_params = {
- eks_cluster_id = var.eks_cluster_id
- cluster_ca_base64 = var.cluster_ca_base64
- cluster_endpoint = var.cluster_endpoint
- bootstrap_extra_args = local.self_managed_node_group["bootstrap_extra_args"]
- pre_userdata = local.self_managed_node_group["pre_userdata"]
- post_userdata = local.self_managed_node_group["post_userdata"]
- kubelet_extra_args = local.self_managed_node_group["kubelet_extra_args"]
- }
-
- userdata_base64 = {
- for launch_template_os in local.predefined_ami_types : launch_template_os => base64encode(
- templatefile(
- "${path.module}/templates/userdata-${launch_template_os}.tpl",
- local.userdata_params
- )
- )
- }
-
- custom_userdata_base64 = contains(local.predefined_ami_types, local.self_managed_node_group["launch_template_os"]) ? local.userdata_base64[local.self_managed_node_group["launch_template_os"]] : null
- policy_arn_prefix = "arn:aws:iam::aws:policy"
- ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
+ policy_arn_prefix = "arn:aws:iam::aws:policy"
+ ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
# EKS Worker Managed Policies
eks_worker_policies = toset(concat([
@@ -78,7 +66,7 @@ locals {
var.tags,
local.self_managed_node_group["additional_tags"],
{
- Name = "${var.eks_cluster_id}-${local.self_managed_node_group["node_group_name"]}"
+ Name = "${local.self_managed_node_group["node_group_name"]}-${var.eks_cluster_id}"
"k8s.io/cluster-autoscaler/${var.eks_cluster_id}" = "owned"
"k8s.io/cluster-autoscaler/enabled" = "TRUE"
"kubernetes.io/cluster/${var.eks_cluster_id}" = "owned"
diff --git a/modules/aws-eks-self-managed-node-groups/main.tf b/modules/aws-eks-self-managed-node-groups/main.tf
index 836e35f8c8..4d81b75347 100644
--- a/modules/aws-eks-self-managed-node-groups/main.tf
+++ b/modules/aws-eks-self-managed-node-groups/main.tf
@@ -6,8 +6,8 @@ resource "aws_autoscaling_group" "self_managed_ng" {
vpc_zone_identifier = local.self_managed_node_group["subnet_ids"]
launch_template {
- id = aws_launch_template.self_managed_ng.id
- version = aws_launch_template.self_managed_ng.latest_version
+ id = module.launch_template_self_managed_ng.launch_template_id[local.lt_self_managed_group_map_key]
+ version = module.launch_template_self_managed_ng.launch_template_latest_version[local.lt_self_managed_group_map_key]
}
lifecycle {
@@ -29,5 +29,4 @@ resource "aws_autoscaling_group" "self_managed_ng" {
aws_iam_instance_profile.self_managed_ng,
aws_iam_role_policy_attachment.self_managed_ng
]
-
}
diff --git a/modules/aws-eks-self-managed-node-groups/outputs.tf b/modules/aws-eks-self-managed-node-groups/outputs.tf
index 4f96e91427..10e3f074f3 100644
--- a/modules/aws-eks-self-managed-node-groups/outputs.tf
+++ b/modules/aws-eks-self-managed-node-groups/outputs.tf
@@ -24,18 +24,18 @@ output "self_managed_asg_names" {
}
output "launch_template_latest_versions" {
- description = "launch templated version for EKS Self Managed Node Group"
- value = aws_launch_template.self_managed_ng[*].latest_version
+ description = "Launch Template latest versions for EKS Self Managed Node Group"
+ value = module.launch_template_self_managed_ng.launch_template_latest_version
}
output "launch_template_ids" {
- description = "launch templated id for EKS Self Managed Node Group"
- value = aws_launch_template.self_managed_ng[*].id
+ description = "Launch Template IDs for EKS Self Managed Node Group"
+ value = module.launch_template_self_managed_ng.launch_template_id
}
output "launch_template_arn" {
- description = "launch templated id for EKS Self Managed Node Group"
- value = aws_launch_template.self_managed_ng[*].arn
+ description = "Launch Template ARNs for EKS Self Managed Node Group"
+ value = module.launch_template_self_managed_ng.launch_template_arn
}
output "self_managed_nodegroup_iam_instance_profile_id" {
diff --git a/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf b/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf
index 4788ef3599..9c34974ea8 100644
--- a/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf
+++ b/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf
@@ -1,66 +1,31 @@
-resource "aws_launch_template" "self_managed_ng" {
- name = "${var.eks_cluster_id}-${local.self_managed_node_group["node_group_name"]}"
- description = "Launch Template for EKS Self Managed Node Groups"
-
- instance_type = local.self_managed_node_group["instance_type"]
- image_id = local.custom_ami_id
-
- update_default_version = true
- user_data = local.custom_userdata_base64
-
- dynamic "instance_market_options" {
- for_each = local.self_managed_node_group["capacity_type"] == "spot" ? [1] : []
- content {
- market_type = local.self_managed_node_group["capacity_type"]
+module "launch_template_self_managed_ng" {
+ source = "../launch-templates"
+
+ eks_cluster_id = var.eks_cluster_id
+ launch_template_config = {
+ "${local.lt_self_managed_group_map_key}" = {
+ ami = local.custom_ami_id
+ launch_template_os = local.self_managed_node_group["launch_template_os"]
+ launch_template_prefix = local.self_managed_node_group["node_group_name"]
+ instance_type = local.self_managed_node_group["instance_type"]
+ capacity_type = local.self_managed_node_group["capacity_type"]
+ iam_instance_profile = aws_iam_instance_profile.self_managed_ng.name
+
+ block_device_mappings = local.self_managed_node_group["block_device_mappings"]
+
+ network_interfaces = [
+ {
+ public_ip = local.self_managed_node_group["public_ip"]
+ security_groups = (
+ local.self_managed_node_group["create_worker_security_group"] == true ? compact(
+ flatten([[aws_security_group.self_managed_ng[0].id],
+ local.self_managed_node_group["worker_additional_security_group_ids"]])) : compact(
+ flatten([[var.worker_security_group_id],
+ var.worker_additional_security_group_ids])))
+ }
+ ]
}
}
- iam_instance_profile {
- name = aws_iam_instance_profile.self_managed_ng.name
- }
-
- ebs_optimized = true
-
- block_device_mappings {
- device_name = "/dev/xvda"
-
- ebs {
- volume_type = local.self_managed_node_group["disk_type"]
- volume_size = local.self_managed_node_group["disk_size"]
- encrypted = true
- # kms_key_id = ""
- delete_on_termination = true
- }
- }
-
- metadata_options {
- http_endpoint = var.http_endpoint
- http_tokens = var.http_tokens
- http_put_response_hop_limit = var.http_put_response_hop_limit
- }
-
- monitoring {
- enabled = local.self_managed_node_group["enable_monitoring"]
- }
-
- lifecycle {
- create_before_destroy = true
- }
-
- network_interfaces {
- associate_public_ip_address = local.self_managed_node_group["public_ip"]
- security_groups = local.self_managed_node_group["create_worker_security_group"] == true ? compact(flatten([[aws_security_group.self_managed_ng[0].id], local.self_managed_node_group["worker_additional_security_group_ids"]])) : compact(flatten([[var.worker_security_group_id], var.worker_additional_security_group_ids]))
- }
-
- tag_specifications {
- resource_type = "volume"
- tags = local.common_tags
- }
-
- depends_on = [
- aws_iam_role.self_managed_ng,
- aws_iam_instance_profile.self_managed_ng,
- aws_iam_role_policy_attachment.self_managed_ng
- ]
-
+ tags = local.common_tags
}
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata-amazonlinux2eks.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata-amazonlinux2eks.tpl
deleted file mode 100644
index 47b1f0d2ec..0000000000
--- a/modules/aws-eks-self-managed-node-groups/templates/userdata-amazonlinux2eks.tpl
+++ /dev/null
@@ -1,18 +0,0 @@
-MIME-Version: 1.0
-Content-Type: multipart/mixed; boundary="//"
-
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-#!/bin/bash
-set -ex
-
-# User-supplied pre userdata code
-${pre_userdata}
-
-# Bootstrap and join the cluster
-/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_ca_base64}' --apiserver-endpoint '${cluster_endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${eks_cluster_id}'
-
-# User-supplied post userdata code
-${post_userdata}
-
---//--
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata-bottlerocket.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata-bottlerocket.tpl
deleted file mode 100644
index 4db54603c9..0000000000
--- a/modules/aws-eks-self-managed-node-groups/templates/userdata-bottlerocket.tpl
+++ /dev/null
@@ -1,6 +0,0 @@
-${pre_userdata}
-[settings.kubernetes]
-api-server = "${cluster_endpoint}"
-cluster-certificate = "${cluster_ca_base64}"
-cluster-name = "${eks_cluster_id}"
-${post_userdata}
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata-windows.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata-windows.tpl
deleted file mode 100644
index 6207e26217..0000000000
--- a/modules/aws-eks-self-managed-node-groups/templates/userdata-windows.tpl
+++ /dev/null
@@ -1,11 +0,0 @@
-
map(object({| n/a | yes | +| [launch\_template\_config](#input\_launch\_template\_config) | Launch template configuration |
ami = string
launch_template_os = optional(string)
launch_template_prefix = string
iam_instance_profile = optional(string)
vpc_security_group_ids = optional(list(string)) # conflicts with network_interfaces
network_interfaces = optional(list(object({
public_ip = optional(bool)
security_groups = optional(list(string))
})))
block_device_mappings = list(object({
device_name = string
volume_type = string
volume_size = string
delete_on_termination = optional(bool)
encrypted = optional(bool)
kms_key_id = optional(string)
iops = optional(string)
throughput = optional(string)
}))
pre_userdata = optional(string)
bootstrap_extra_args = optional(string)
post_userdata = optional(string)
kubelet_extra_args = optional(string)
http_endpoint = optional(string)
http_tokens = optional(string)
http_put_response_hop_limit = optional(number)
}))
map(object({| n/a | yes | | [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit`,`XYZ`) | `map(string)` | `{}` | no | ## Outputs @@ -127,8 +127,10 @@ No modules. | Name | Description | |------|-------------| | [launch\_template\_arn](#output\_launch\_template\_arn) | Launch Template ARNs | +| [launch\_template\_default\_version](#output\_launch\_template\_default\_version) | Launch Template Default Versions | | [launch\_template\_id](#output\_launch\_template\_id) | Launch Template IDs | | [launch\_template\_image\_id](#output\_launch\_template\_image\_id) | Launch Template Image IDs | +| [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | Launch Template Latest Versions | | [launch\_template\_name](#output\_launch\_template\_name) | Launch Template Names | diff --git a/modules/launch-templates/locals.tf b/modules/launch-templates/locals.tf index e0b22bb609..34fb427bfb 100644 --- a/modules/launch-templates/locals.tf +++ b/modules/launch-templates/locals.tf @@ -9,8 +9,10 @@ locals { ami = "" launch_template_os = "amazonlinux2eks" #bottlerocket launch_template_prefix = "" - vpc_security_group_ids = "" + instance_type = "" + capacity_type = "" iam_instance_profile = "" + vpc_security_group_ids = "" network_interfaces = { public_ip = false @@ -36,5 +38,7 @@ locals { http_endpoint = "enabled" http_tokens = "optional" http_put_response_hop_limit = 1 + + monitoring = true }) } diff --git a/modules/launch-templates/main.tf b/modules/launch-templates/main.tf index 7229224000..7758ca7b86 100644 --- a/modules/launch-templates/main.tf +++ b/modules/launch-templates/main.tf @@ -7,6 +7,8 @@ resource "aws_launch_template" "this" { image_id = each.value.ami update_default_version = true + instance_type = try(length(each.value.instance_type), 0) == 0 ? null : each.value.instance_type + user_data = base64encode(templatefile("${path.module}/templates/userdata-${each.value.launch_template_os}.tpl", { pre_userdata = each.value.pre_userdata @@ -18,8 +20,20 @@ resource "aws_launch_template" "this" { cluster_endpoint = data.aws_eks_cluster.eks.endpoint })) - iam_instance_profile { - name = each.value.iam_instance_profile + dynamic "iam_instance_profile" { + for_each = try(length(each.value.iam_instance_profile), 0) == 0 ? {} : { iam_instance_profile : each.value.iam_instance_profile } + iterator = iam + content { + name = iam.value + } + } + + dynamic "instance_market_options" { + for_each = trimspace(lower(each.value.capacity_type)) == "spot" ? { enabled = true } : {} + + content { + market_type = each.value.capacity_type + } } ebs_optimized = true @@ -42,13 +56,20 @@ resource "aws_launch_template" "this" { } } - vpc_security_group_ids = length(each.value.vpc_security_group_ids) == 0 ? null : each.value.vpc_security_group_ids + vpc_security_group_ids = try(length(each.value.vpc_security_group_ids), 0) == 0 ? null : each.value.vpc_security_group_ids dynamic "network_interfaces" { for_each = each.value.network_interfaces content { associate_public_ip_address = try(network_interfaces.value.public_ip, false) - security_groups = length(each.value.network_interfaces.security_groups) == 0 ? null : network_interfaces.value.security_groups + security_groups = try(length(each.value.network_interfaces.security_groups), 0) == 0 ? null : network_interfaces.value.security_groups + } + } + + dynamic "monitoring" { + for_each = each.value.monitoring ? { enabled = true } : {} + content { + enabled = true } } @@ -66,4 +87,9 @@ resource "aws_launch_template" "this" { resource_type = "instance" tags = length(var.tags) > 0 ? var.tags : { Name = "eks" } } + + tag_specifications { + resource_type = "volume" + tags = length(var.tags) > 0 ? var.tags : { Name = "eks-volume" } + } } diff --git a/modules/launch-templates/outputs.tf b/modules/launch-templates/outputs.tf index 4f56eba459..6c39c16790 100644 --- a/modules/launch-templates/outputs.tf +++ b/modules/launch-templates/outputs.tf @@ -17,3 +17,13 @@ output "launch_template_name" { description = "Launch Template Names" value = { for template in sort(keys(var.launch_template_config)) : template => aws_launch_template.this[template].name } } + +output "launch_template_default_version" { + description = "Launch Template Default Versions" + value = { for template in sort(keys(var.launch_template_config)) : template => aws_launch_template.this[template].default_version } +} + +output "launch_template_latest_version" { + description = "Launch Template Latest Versions" + value = { for template in sort(keys(var.launch_template_config)) : template => aws_launch_template.this[template].latest_version } +} diff --git a/modules/launch-templates/templates/userdata-amazonlinux2eks.tpl b/modules/launch-templates/templates/userdata-amazonlinux2eks.tpl index 47b1f0d2ec..28532eb465 100644 --- a/modules/launch-templates/templates/userdata-amazonlinux2eks.tpl +++ b/modules/launch-templates/templates/userdata-amazonlinux2eks.tpl @@ -9,6 +9,20 @@ set -ex # User-supplied pre userdata code ${pre_userdata} +# Deal with extra new disks +IDX=1 +DEVICES=$(lsblk -o NAME,TYPE -dsn | awk '/disk/ {print $1}') +for DEV in $DEVICES +do + mkfs.xfs /dev/$${DEV} + mkdir -p /local$${IDX} + + echo /dev/$${DEV} /local$${IDX} xfs defaults,noatime 1 2 >> /etc/fstab + + IDX=$(($${IDX} + 1)) +done +mount -a + # Bootstrap and join the cluster /etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_ca_base64}' --apiserver-endpoint '${cluster_endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${eks_cluster_id}' diff --git a/modules/launch-templates/templates/userdata-windows.tpl b/modules/launch-templates/templates/userdata-windows.tpl new file mode 100644 index 0000000000..a8fe329854 --- /dev/null +++ b/modules/launch-templates/templates/userdata-windows.tpl @@ -0,0 +1,28 @@ +
ami = string
launch_template_os = optional(string)
launch_template_prefix = string
instance_type = optional(string)
capacity_type = optional(string)
iam_instance_profile = optional(string)
vpc_security_group_ids = optional(list(string)) # conflicts with network_interfaces
network_interfaces = optional(list(object({
public_ip = optional(bool)
security_groups = optional(list(string))
})))
block_device_mappings = list(object({
device_name = string
volume_type = string
volume_size = string
delete_on_termination = optional(bool)
encrypted = optional(bool)
kms_key_id = optional(string)
iops = optional(string)
throughput = optional(string)
}))
pre_userdata = optional(string)
bootstrap_extra_args = optional(string)
post_userdata = optional(string)
kubelet_extra_args = optional(string)
http_endpoint = optional(string)
http_tokens = optional(string)
http_put_response_hop_limit = optional(number)
monitoring = optional(bool)
}))