Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/208 windows resize volume #231

Merged
merged 8 commits into from
Feb 11, 2022
9 changes: 7 additions & 2 deletions docs/node-groups.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,13 @@ The below example demonstrates advanced configuration options for a self-managed
post_userdata = ""
kubelet_extra_args = ""
bootstrap_extra_args = ""
disk_size = 20
disk_type = "gp2"
block_device_mapping = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 20
}
]
enable_monitoring = false
public_ip = false # Enable only for public subnets
# AUTOSCALING
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,13 @@ module "aws-eks-accelerator-for-terraform" {
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT

disk_size = 20
block_device_mapping = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 20
}
]
instance_type = "m5.large"

desired_size = 2
Expand Down Expand Up @@ -310,7 +316,13 @@ module "aws-eks-accelerator-for-terraform" {
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT

disk_size = 20
block_device_mapping = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 20
}
]
instance_type = "m5.large"

desired_size = 2
Expand Down Expand Up @@ -351,7 +363,13 @@ module "aws-eks-accelerator-for-terraform" {
max_unavailable = 1

instance_types = "m5.large"
disk_size = 50
block_device_mapping = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 50
}
]


subnet_ids = [] # Define your private/public subnets list with comma seprated subnet_ids = ['subnet1','subnet2','subnet3']
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,14 @@ module "aws-eks-accelerator-for-terraform" {
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT

disk_size = 20
block_device_mappings = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 20
}
]

instance_type = "m5.large"
desired_size = 2
max_size = 10
Expand Down
48 changes: 39 additions & 9 deletions modules/aws-eks-self-managed-node-groups/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,14 @@ This module allows you to create on-demand or spot self managed Linux or Windows
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT

disk_size = 20
block_device_mappings = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 20
}
]

instance_type = "m5.large"
max_size = 10
min_size = 2
Expand All @@ -63,7 +70,14 @@ This module allows you to create on-demand or spot self managed Linux or Windows
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT

disk_size = 20
block_device_mappings = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 20
}
]

instance_type = "m5.large"

max_size = 10
Expand All @@ -90,7 +104,15 @@ This module allows you to create on-demand or spot self managed Linux or Windows
max_size = 3
min_size = 3
instance_types = "m5.large"
disk_size = 50

block_device_mappings = [
{
device_name = "/dev/xvda"
volume_type = "gp3"
volume_size = 50
}
]

subnet_ids = [] # Define your private/public subnets list with comma seprated subnet_ids = ['subnet1','subnet2','subnet3']
additional_tags = {
ExtraTag = "bottlerocket"
Expand All @@ -108,7 +130,14 @@ This module allows you to create on-demand or spot self managed Linux or Windows
# custom_ami_id = "ami-xxxxxxxxxxxxxxxx" # Bring your own custom AMI. Default Windows AMI is the latest EKS Optimized Windows Server 2019 English Core AMI.
public_ip = false # Enable only for public subnets

disk_size = 50
block_device_mappings = [
{
device_name = "/dev/sda1"
volume_type = "gp3"
volume_size = 50
}
]

instance_type = "m5.large"
max_size = 4
min_size = 2
Expand Down Expand Up @@ -138,7 +167,9 @@ No requirements.

## Modules

No modules.
| Name | Source | Version |
|------|--------|---------|
| <a name="module_launch_template_self_managed_ng"></a> [launch\_template\_self\_managed\_ng](#module\_launch\_template\_self\_managed\_ng) | ../launch-templates | n/a |

## Resources

Expand All @@ -150,7 +181,6 @@ No modules.
| [aws_iam_role.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.eks_windows_cni](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.self_managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_primary_sg_ingress_worker_sgr](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.control_plane_egress_to_worker_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
Expand Down Expand Up @@ -192,9 +222,9 @@ No modules.

| Name | Description |
|------|-------------|
| <a name="output_launch_template_arn"></a> [launch\_template\_arn](#output\_launch\_template\_arn) | launch templated id for EKS Self Managed Node Group |
| <a name="output_launch_template_ids"></a> [launch\_template\_ids](#output\_launch\_template\_ids) | launch templated id for EKS Self Managed Node Group |
| <a name="output_launch_template_latest_versions"></a> [launch\_template\_latest\_versions](#output\_launch\_template\_latest\_versions) | launch templated version for EKS Self Managed Node Group |
| <a name="output_launch_template_arn"></a> [launch\_template\_arn](#output\_launch\_template\_arn) | Launch Template ARNs for EKS Self Managed Node Group |
| <a name="output_launch_template_ids"></a> [launch\_template\_ids](#output\_launch\_template\_ids) | Launch Template IDs for EKS Self Managed Node Group |
| <a name="output_launch_template_latest_versions"></a> [launch\_template\_latest\_versions](#output\_launch\_template\_latest\_versions) | Launch Template latest versions for EKS Self Managed Node Group |
| <a name="output_self_managed_asg_names"></a> [self\_managed\_asg\_names](#output\_self\_managed\_asg\_names) | Self managed group ASG names |
| <a name="output_self_managed_iam_role_name"></a> [self\_managed\_iam\_role\_name](#output\_self\_managed\_iam\_role\_name) | Self managed groups IAM role names |
| <a name="output_self_managed_nodegroup_iam_instance_profile_arn"></a> [self\_managed\_nodegroup\_iam\_instance\_profile\_arn](#output\_self\_managed\_nodegroup\_iam\_instance\_profile\_arn) | IAM Instance Profile arnd for EKS Self Managed Node Group |
Expand Down
38 changes: 13 additions & 25 deletions modules/aws-eks-self-managed-node-groups/locals.tf
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
locals {
lt_self_managed_group_map_key = "self-managed-node-group"

default_self_managed_ng = {
node_group_name = "m4_on_demand"
instance_type = "m4.large"
Expand All @@ -11,11 +13,17 @@ locals {
post_userdata = ""
kubelet_extra_args = ""
bootstrap_extra_args = ""
disk_size = 50
disk_type = "gp2"
enable_monitoring = false
public_ip = false

block_device_mappings = [
{
device_name = "/dev/xvda"
volume_type = "gp2"
volume_size = 50
}
]

# AUTOSCALING
max_size = "3"
min_size = "1"
Expand All @@ -42,28 +50,8 @@ locals {
default_custom_ami_id = contains(local.predefined_ami_types, local.self_managed_node_group["launch_template_os"]) ? data.aws_ami.predefined[local.self_managed_node_group["launch_template_os"]].id : ""
custom_ami_id = local.self_managed_node_group["custom_ami_id"] == "" ? local.default_custom_ami_id : local.self_managed_node_group["custom_ami_id"]

userdata_params = {
eks_cluster_id = var.eks_cluster_id
cluster_ca_base64 = var.cluster_ca_base64
cluster_endpoint = var.cluster_endpoint
bootstrap_extra_args = local.self_managed_node_group["bootstrap_extra_args"]
pre_userdata = local.self_managed_node_group["pre_userdata"]
post_userdata = local.self_managed_node_group["post_userdata"]
kubelet_extra_args = local.self_managed_node_group["kubelet_extra_args"]
}

userdata_base64 = {
for launch_template_os in local.predefined_ami_types : launch_template_os => base64encode(
templatefile(
"${path.module}/templates/userdata-${launch_template_os}.tpl",
local.userdata_params
)
)
}

custom_userdata_base64 = contains(local.predefined_ami_types, local.self_managed_node_group["launch_template_os"]) ? local.userdata_base64[local.self_managed_node_group["launch_template_os"]] : null
policy_arn_prefix = "arn:aws:iam::aws:policy"
ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
policy_arn_prefix = "arn:aws:iam::aws:policy"
ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"

# EKS Worker Managed Policies
eks_worker_policies = toset(concat([
Expand All @@ -78,7 +66,7 @@ locals {
var.tags,
local.self_managed_node_group["additional_tags"],
{
Name = "${var.eks_cluster_id}-${local.self_managed_node_group["node_group_name"]}"
Name = "${local.self_managed_node_group["node_group_name"]}-${var.eks_cluster_id}"
"k8s.io/cluster-autoscaler/${var.eks_cluster_id}" = "owned"
"k8s.io/cluster-autoscaler/enabled" = "TRUE"
"kubernetes.io/cluster/${var.eks_cluster_id}" = "owned"
Expand Down
5 changes: 2 additions & 3 deletions modules/aws-eks-self-managed-node-groups/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ resource "aws_autoscaling_group" "self_managed_ng" {
vpc_zone_identifier = local.self_managed_node_group["subnet_ids"]

launch_template {
id = aws_launch_template.self_managed_ng.id
version = aws_launch_template.self_managed_ng.latest_version
id = module.launch_template_self_managed_ng.launch_template_id[local.lt_self_managed_group_map_key]
version = module.launch_template_self_managed_ng.launch_template_latest_version[local.lt_self_managed_group_map_key]
}

lifecycle {
Expand All @@ -29,5 +29,4 @@ resource "aws_autoscaling_group" "self_managed_ng" {
aws_iam_instance_profile.self_managed_ng,
aws_iam_role_policy_attachment.self_managed_ng
]

}
12 changes: 6 additions & 6 deletions modules/aws-eks-self-managed-node-groups/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,18 @@ output "self_managed_asg_names" {
}

output "launch_template_latest_versions" {
description = "launch templated version for EKS Self Managed Node Group"
value = aws_launch_template.self_managed_ng[*].latest_version
description = "Launch Template latest versions for EKS Self Managed Node Group"
value = module.launch_template_self_managed_ng.launch_template_latest_version
}

output "launch_template_ids" {
description = "launch templated id for EKS Self Managed Node Group"
value = aws_launch_template.self_managed_ng[*].id
description = "Launch Template IDs for EKS Self Managed Node Group"
value = module.launch_template_self_managed_ng.launch_template_id
}

output "launch_template_arn" {
description = "launch templated id for EKS Self Managed Node Group"
value = aws_launch_template.self_managed_ng[*].arn
description = "Launch Template ARNs for EKS Self Managed Node Group"
value = module.launch_template_self_managed_ng.launch_template_arn
}

output "self_managed_nodegroup_iam_instance_profile_id" {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,66 +1,31 @@
resource "aws_launch_template" "self_managed_ng" {
name = "${var.eks_cluster_id}-${local.self_managed_node_group["node_group_name"]}"
description = "Launch Template for EKS Self Managed Node Groups"

instance_type = local.self_managed_node_group["instance_type"]
image_id = local.custom_ami_id

update_default_version = true
user_data = local.custom_userdata_base64

dynamic "instance_market_options" {
for_each = local.self_managed_node_group["capacity_type"] == "spot" ? [1] : []
content {
market_type = local.self_managed_node_group["capacity_type"]
module "launch_template_self_managed_ng" {
source = "../launch-templates"

eks_cluster_id = var.eks_cluster_id
launch_template_config = {
"${local.lt_self_managed_group_map_key}" = {
ami = local.custom_ami_id
launch_template_os = local.self_managed_node_group["launch_template_os"]
launch_template_prefix = local.self_managed_node_group["node_group_name"]
instance_type = local.self_managed_node_group["instance_type"]
capacity_type = local.self_managed_node_group["capacity_type"]
iam_instance_profile = aws_iam_instance_profile.self_managed_ng.name

block_device_mappings = local.self_managed_node_group["block_device_mappings"]

network_interfaces = [
{
public_ip = local.self_managed_node_group["public_ip"]
security_groups = (
local.self_managed_node_group["create_worker_security_group"] == true ? compact(
flatten([[aws_security_group.self_managed_ng[0].id],
local.self_managed_node_group["worker_additional_security_group_ids"]])) : compact(
flatten([[var.worker_security_group_id],
var.worker_additional_security_group_ids])))
}
]
}
}

iam_instance_profile {
name = aws_iam_instance_profile.self_managed_ng.name
}

ebs_optimized = true

block_device_mappings {
device_name = "/dev/xvda"

ebs {
volume_type = local.self_managed_node_group["disk_type"]
volume_size = local.self_managed_node_group["disk_size"]
encrypted = true
# kms_key_id = ""
delete_on_termination = true
}
}

metadata_options {
http_endpoint = var.http_endpoint
http_tokens = var.http_tokens
http_put_response_hop_limit = var.http_put_response_hop_limit
}

monitoring {
enabled = local.self_managed_node_group["enable_monitoring"]
}

lifecycle {
create_before_destroy = true
}

network_interfaces {
associate_public_ip_address = local.self_managed_node_group["public_ip"]
security_groups = local.self_managed_node_group["create_worker_security_group"] == true ? compact(flatten([[aws_security_group.self_managed_ng[0].id], local.self_managed_node_group["worker_additional_security_group_ids"]])) : compact(flatten([[var.worker_security_group_id], var.worker_additional_security_group_ids]))
}

tag_specifications {
resource_type = "volume"
tags = local.common_tags
}

depends_on = [
aws_iam_role.self_managed_ng,
aws_iam_instance_profile.self_managed_ng,
aws_iam_role_policy_attachment.self_managed_ng
]

tags = local.common_tags
}

This file was deleted.

Loading