From b82033c8efac737b4d6f65f4cce9f32648292a4d Mon Sep 17 00:00:00 2001 From: Marko Bevc Date: Mon, 10 Jan 2022 20:15:32 +0000 Subject: [PATCH] Docs: Update Getting started and clean-up --- .../getting-started-with-terraform/_index.md | 83 +++++++++++++++---- .../content/en/docs/getting-started/_index.md | 4 - website/content/en/docs/provisioner.md | 12 +-- website/content/main.py | 1 - 4 files changed, 75 insertions(+), 25 deletions(-) diff --git a/website/content/en/docs/getting-started-with-terraform/_index.md b/website/content/en/docs/getting-started-with-terraform/_index.md index e5ee9841ef40..0b74a7ccc6d3 100644 --- a/website/content/en/docs/getting-started-with-terraform/_index.md +++ b/website/content/en/docs/getting-started-with-terraform/_index.md @@ -49,8 +49,8 @@ export CLUSTER_NAME=$USER-karpenter-demo export AWS_DEFAULT_REGION=us-west-2 ``` -The first thing we need to do is create our `main.tf` file and place the -following in it. This will let us pass in a cluster name that will be used +The first thing we need to do is create our `main.tf` file and place the +following in it. This will let us pass in a cluster name that will be used throughout the remainder of our config. ```hcl @@ -65,9 +65,9 @@ variable "cluster_name" { We're going to use two different Terraform modules to create our cluster - one to create the VPC and another for the cluster itself. The key part of this is -that we need to tag the VPC subnets that we want to use for the worker nodes. +that we need to tag the VPC subnets that we want to use for the worker nodes. -Place the following Terraform config into your `main.tf` file. +Place the following Terraform config into your `vpc.tf` file. ```hcl module "vpc" { @@ -88,7 +88,14 @@ module "vpc" { "kubernetes.io/cluster/${var.cluster_name}" = "owned" } } +``` + +And depending what you want to use for backend compute choose one of the +following options to provision EKS control plane and put it into `eks.tf`: + +#### Using EC2 node +```hcl module "eks" { source = "terraform-aws-modules/eks/aws" version = "<18" @@ -109,6 +116,48 @@ module "eks" { } ``` +#### Using Fargate + +```hcl +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "<18" + + cluster_version = "1.21" + cluster_name = var.cluster_name + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets + enable_irsa = true + + # Ensure provisioned nodes have permissions to join cluster control plane + map_roles = [{ + rolearn = module.eks.worker_iam_role_arn + username = "system:node:{{EC2PrivateDNSName}}" + groups = ["system:bootstrappers", "system:nodes"] + } + ] + + # Create Fargate profile to run kube-system and karpenter namespaces + fargate_profiles = { + karpenter = { + name = "karpenter" + selectors = [ + { + namespace = "kube-system" + }, + { + namespace = "karpenter" + } + ] + + tags = { + owner = "karpenter" + } + }, + } +} +``` + At this point, go ahead and apply what we've done to create the VPC and cluster. This may take some time. @@ -134,11 +183,11 @@ Everything should apply successfully now! ### Configure the KarpenterNode IAM Role -The EKS module creates an IAM role for worker nodes. We'll use that for +The EKS module creates an IAM role for worker nodes. We'll use that for Karpenter (so we don't have to reconfigure the aws-auth ConfigMap), but we need to add one more policy and create an instance profile. -Place the following into your `main.tf` to add the policy and create an +Place the following into your `main.tf` to add the policy and create an instance profile. ```hcl @@ -163,14 +212,14 @@ Go ahead and apply the changes. terraform apply -var cluster_name=$CLUSTER_NAME ``` -Now, Karpenter can use this instance profile to launch new EC2 instances and +Now, Karpenter can use this instance profile to launch new EC2 instances and those instances will be able to connect to your cluster. ### Create the KarpenterController IAM Role Karpenter requires permissions like launching instances, which means it needs -an IAM role that grants it access. The config below will create an AWS IAM -Role, attach a policy, and authorize the Service Account to assume the role +an IAM role that grants it access. The config below will create an AWS IAM +Role, attach a policy, and authorize the Service Account to assume the role using [IRSA](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html). We will create the ServiceAccount and connect it to this role during the Helm chart install. @@ -217,7 +266,7 @@ resource "aws_iam_role_policy" "karpenter_controller" { } ``` -Since we've added a new module, you'll need to run `terraform init` again. +Since we've added a new module, you'll need to run `terraform init` again. Then, apply the changes. ```bash @@ -227,7 +276,7 @@ terraform apply -var cluster_name=$CLUSTER_NAME ### Install Karpenter Helm Chart -Use helm to deploy Karpenter to the cluster. We are going to use the +Use helm to deploy Karpenter to the cluster. We are going to use the `helm_release` Terraform resource to do the deploy and pass in the cluster details and IAM role Karpenter needs to assume. @@ -372,13 +421,19 @@ kubectl delete node $NODE_NAME ## Cleanup -To avoid additional charges, remove the demo infrastructure from your AWS +To avoid additional charges, remove the demo infrastructure from your AWS account. Since Karpenter is managing nodes outside of Terraform's view, we need -to remove the pods and node first (if you haven't already). Once the node is -removed, you can remove the rest of the infrastructure. +to remove the pods and node first (if you haven't already). Once the node is +removed, you can remove the rest of the infrastructure and clean up Karpenter +created LaunchTemplates. ```bash kubectl delete deployment inflate kubectl delete node -l karpenter.sh/provisioner-name=default +helm uninstall karpenter --namespace karpenter terraform destroy -var cluster_name=$CLUSTER_NAME +aws ec2 describe-launch-templates \ + | jq -r ".LaunchTemplates[].LaunchTemplateName" \ + | grep -i Karpenter-${CLUSTER_NAME} \ + | xargs -I{} aws ec2 delete-launch-template --launch-template-name {} ``` diff --git a/website/content/en/docs/getting-started/_index.md b/website/content/en/docs/getting-started/_index.md index 54c83fea6e0a..c0b1f29a0c5e 100644 --- a/website/content/en/docs/getting-started/_index.md +++ b/website/content/en/docs/getting-started/_index.md @@ -314,7 +314,3 @@ aws ec2 describe-launch-templates \ | xargs -I{} aws ec2 delete-launch-template --launch-template-name {} eksctl delete cluster --name ${CLUSTER_NAME} ``` - ---- - -## Next Steps: diff --git a/website/content/en/docs/provisioner.md b/website/content/en/docs/provisioner.md index d88bb50455a2..5449fee3bc33 100644 --- a/website/content/en/docs/provisioner.md +++ b/website/content/en/docs/provisioner.md @@ -35,13 +35,13 @@ spec: # These requirements are combined with pod.spec.affinity.nodeAffinity rules. # Operators { In, NotIn } are supported to enable including or excluding values requirements: - - key: "node.kubernetes.io/instance-type" + - key: "node.kubernetes.io/instance-type" operator: In values: ["m5.large", "m5.2xlarge"] - - key: "topology.kubernetes.io/zone" + - key: "topology.kubernetes.io/zone" operator: In values: ["us-west-2a", "us-west-2b"] - - key: "kubernetes.io/arch" + - key: "kubernetes.io/arch" operator: In values: ["arm64", "amd64"] - key: "karpenter.sh/capacity-type" # If not included, the webhook for the AWS cloud provider will default to on-demand @@ -59,7 +59,7 @@ These well known labels may be specified at the provisioner level, or in a workl For example, an instance type may be specified using a nodeSelector in a pod spec. If the instance type requested is not included in the provisioner list and the provisioner has instance type requirements, Karpenter will not create a node or schedule the pod. -📝 None of these values are required. +📝 None of these values are required. ### Instance Types @@ -132,14 +132,14 @@ Karpenter supports `amd64` nodes, and `arm64` nodes. - values - `spot` (default) - - `on-demand` + - `on-demand` Karpenter supports specifying capacity type, which is analogous to [EC2 purchase options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-purchasing-options.html). ## spec.kubeletConfiguration -Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for +Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for additional customization and use cases. Adjust these only if you know you need to do so. ```yaml diff --git a/website/content/main.py b/website/content/main.py index 368f0a151897..8305f29ed5e0 100644 --- a/website/content/main.py +++ b/website/content/main.py @@ -46,7 +46,6 @@ def searchdirectory(dirname): # problem: csv file ends up empty? writer.writerow(row) print(row) -