diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 768a8e081b..b4dc12dd44 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -28,7 +28,7 @@ jobs: fetch-depth: 0 - name: Set up Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 2ad03ab5a6..def0f323e4 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -71,6 +71,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 + uses: github/codeql-action/upload-sarif@c0d1daa7f7e14667747d73a7dbbe8c074bc8bfe2 # v2.22.9 with: sarif_file: results.sarif diff --git a/patterns/istio/main.tf b/patterns/istio/main.tf index 254cc26c4f..bf0edfc6d8 100644 --- a/patterns/istio/main.tf +++ b/patterns/istio/main.tf @@ -127,19 +127,19 @@ module "eks_blueprints_addons" { helm_releases = { istio-base = { - chart = "base" - version = local.istio_chart_version - repository = local.istio_chart_url - name = "istio-base" - namespace = kubernetes_namespace_v1.istio_system.metadata[0].name + chart = "base" + chart_version = local.istio_chart_version + repository = local.istio_chart_url + name = "istio-base" + namespace = kubernetes_namespace_v1.istio_system.metadata[0].name } istiod = { - chart = "istiod" - version = local.istio_chart_version - repository = local.istio_chart_url - name = "istiod" - namespace = kubernetes_namespace_v1.istio_system.metadata[0].name + chart = "istiod" + chart_version = local.istio_chart_version + repository = local.istio_chart_url + name = "istiod" + namespace = kubernetes_namespace_v1.istio_system.metadata[0].name set = [ { @@ -151,7 +151,7 @@ module "eks_blueprints_addons" { istio-ingress = { chart = "gateway" - version = local.istio_chart_version + chart_version = local.istio_chart_version repository = local.istio_chart_url name = "istio-ingress" namespace = "istio-ingress" # per https://github.com/istio/istio/blob/master/manifests/charts/gateways/istio-ingress/values.yaml#L2 diff --git a/patterns/karpenter/README.md b/patterns/karpenter/README.md index 9efc642074..7b03643de0 100644 --- a/patterns/karpenter/README.md +++ b/patterns/karpenter/README.md @@ -8,15 +8,55 @@ See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started ## Validate -!!! danger "TODO" - Add in validation steps +1. Test by listing the nodes in the cluster. You should see four Fargate nodes in the cluster: + + ```sh + kubectl get nodes + + NAME STATUS ROLES AGE VERSION + fargate-ip-10-0-11-195.us-west-2.compute.internal Ready 5m20s v1.28.2-eks-f8587cb + fargate-ip-10-0-27-183.us-west-2.compute.internal Ready 5m2s v1.28.2-eks-f8587cb + fargate-ip-10-0-4-169.us-west-2.compute.internal Ready 5m3s v1.28.2-eks-f8587cb + fargate-ip-10-0-44-106.us-west-2.compute.internal Ready 5m12s v1.28.2-eks-f8587cb + ``` + +2. Provision the Karpenter `EC2NodeClass` and `NodePool` resources which provide Karpenter the necessary configurations to provision EC2 resources: + + ```sh + kubectl apply -f karpenter.yaml + ``` + +3. Once the Karpenter resources are in place, Karpenter will provision the necessary EC2 resources to satisfy any pending pods in the scheduler's queue. You can demonstrate this with the example deployment provided. First deploy the example deployment which has the initial number replicas set to 0: + + ```sh + kubectl apply -f example.yaml + ``` + +4. When you scale the example deployment, you should see Karpenter respond by quickly provisioning EC2 resources to satisfy those pending pod requests: + + ```sh + kubectl scale deployment inflate --replicas=3 + ``` + +5. Listing the nodes should now show some EC2 compute that Karpenter has created for the example deployment: + + ```sh + kubectl get nodes + + NAME STATUS ROLES AGE VERSION + fargate-ip-10-0-11-195.us-west-2.compute.internal Ready 13m v1.28.2-eks-f8587cb + fargate-ip-10-0-27-183.us-west-2.compute.internal Ready 12m v1.28.2-eks-f8587cb + fargate-ip-10-0-4-169.us-west-2.compute.internal Ready 12m v1.28.2-eks-f8587cb + fargate-ip-10-0-44-106.us-west-2.compute.internal Ready 13m v1.28.2-eks-f8587cb + ip-10-0-32-199.us-west-2.compute.internal Ready 29s v1.28.2-eks-a5df82a # <== EC2 created by Karpenter + ``` ## Destroy Scale down the deployment to de-provision Karpenter created resources first: ```sh -kubectl delete deployment inflate +kubectl delete -f example.yaml ``` {% diff --git a/patterns/karpenter/example.yaml b/patterns/karpenter/example.yaml new file mode 100644 index 0000000000..25ee0fef3c --- /dev/null +++ b/patterns/karpenter/example.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inflate +spec: + replicas: 0 + selector: + matchLabels: + app: inflate + template: + metadata: + labels: + app: inflate + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: inflate + image: public.ecr.aws/eks-distro/kubernetes/pause:3.7 + resources: + requests: + cpu: 1 diff --git a/patterns/karpenter/karpenter.yaml b/patterns/karpenter/karpenter.yaml new file mode 100644 index 0000000000..c07b61db57 --- /dev/null +++ b/patterns/karpenter/karpenter.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass +metadata: + name: default +spec: + amiFamily: AL2 + role: karpenter-ex-karpenter + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: ex-karpenter + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: ex-karpenter + tags: + karpenter.sh/discovery: ex-karpenter +--- +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + template: + spec: + nodeClassRef: + name: default + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: ["c", "m", "r"] + - key: "karpenter.k8s.aws/instance-cpu" + operator: In + values: ["4", "8", "16", "32"] + - key: "karpenter.k8s.aws/instance-hypervisor" + operator: In + values: ["nitro"] + - key: "karpenter.k8s.aws/instance-generation" + operator: Gt + values: ["2"] + limits: + cpu: 1000 + disruption: + consolidationPolicy: WhenEmpty + consolidateAfter: 30s diff --git a/patterns/karpenter/main.tf b/patterns/karpenter/main.tf index 5b9d732940..7055526364 100644 --- a/patterns/karpenter/main.tf +++ b/patterns/karpenter/main.tf @@ -34,20 +34,6 @@ provider "helm" { } } -provider "kubectl" { - apply_retry_count = 5 - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - load_config_file = false - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - data "aws_ecrpublic_authorization_token" "token" { provider = aws.virginia } @@ -55,7 +41,7 @@ data "aws_ecrpublic_authorization_token" "token" { data "aws_availability_zones" "available" {} locals { - name = basename(path.cwd) + name = "ex-${basename(path.cwd)}" region = "us-west-2" vpc_cidr = "10.0.0.0/16" @@ -73,10 +59,10 @@ locals { module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.16" + version = "~> 19.18" cluster_name = local.name - cluster_version = "1.27" + cluster_version = "1.28" cluster_endpoint_public_access = true vpc_id = module.vpc.vpc_id @@ -127,7 +113,7 @@ module "eks" { module "eks_blueprints_addons" { source = "aws-ia/eks-blueprints-addons/aws" - version = "~> 1.0" + version = "~> 1.11" cluster_name = module.eks.cluster_name cluster_endpoint = module.eks.cluster_endpoint @@ -172,107 +158,14 @@ module "eks_blueprints_addons" { repository_username = data.aws_ecrpublic_authorization_token.token.user_name repository_password = data.aws_ecrpublic_authorization_token.token.password } + karpenter_node = { + # Use static name so that it matches what is defined in `karpenter.yaml` example manifest + iam_role_use_name_prefix = false + } tags = local.tags } -################################################################################ -# Karpenter -################################################################################ - -resource "kubectl_manifest" "karpenter_provisioner" { - yaml_body = <<-YAML - apiVersion: karpenter.sh/v1alpha5 - kind: Provisioner - metadata: - name: default - spec: - requirements: - - key: "karpenter.k8s.aws/instance-category" - operator: In - values: ["c", "m"] - - key: "karpenter.k8s.aws/instance-cpu" - operator: In - values: ["8", "16", "32"] - - key: "karpenter.k8s.aws/instance-hypervisor" - operator: In - values: ["nitro"] - - key: "topology.kubernetes.io/zone" - operator: In - values: ${jsonencode(local.azs)} - - key: "kubernetes.io/arch" - operator: In - values: ["arm64", "amd64"] - - key: "karpenter.sh/capacity-type" # If not included, the webhook for the AWS cloud provider will default to on-demand - operator: In - values: ["spot", "on-demand"] - kubeletConfiguration: - containerRuntime: containerd - maxPods: 110 - limits: - resources: - cpu: 1000 - consolidation: - enabled: true - providerRef: - name: default - ttlSecondsUntilExpired: 604800 # 7 Days = 7 * 24 * 60 * 60 Seconds - YAML - - depends_on = [ - module.eks_blueprints_addons - ] -} - -resource "kubectl_manifest" "karpenter_node_template" { - yaml_body = <<-YAML - apiVersion: karpenter.k8s.aws/v1alpha1 - kind: AWSNodeTemplate - metadata: - name: default - spec: - subnetSelector: - karpenter.sh/discovery: ${module.eks.cluster_name} - securityGroupSelector: - karpenter.sh/discovery: ${module.eks.cluster_name} - instanceProfile: ${module.eks_blueprints_addons.karpenter.node_instance_profile_name} - tags: - karpenter.sh/discovery: ${module.eks.cluster_name} - YAML -} - -# Example deployment using the [pause image](https://www.ianlewis.org/en/almighty-pause-container) -# and starts with zero replicas -resource "kubectl_manifest" "karpenter_example_deployment" { - yaml_body = <<-YAML - apiVersion: apps/v1 - kind: Deployment - metadata: - name: inflate - spec: - replicas: 0 - selector: - matchLabels: - app: inflate - template: - metadata: - labels: - app: inflate - spec: - terminationGracePeriodSeconds: 0 - containers: - - name: inflate - image: public.ecr.aws/eks-distro/kubernetes/pause:3.7 - resources: - requests: - cpu: 1 - YAML - - depends_on = [ - kubectl_manifest.karpenter_node_template - ] -} - ################################################################################ # Supporting Resources ################################################################################ diff --git a/patterns/karpenter/versions.tf b/patterns/karpenter/versions.tf index 24d5350878..2c63637eba 100644 --- a/patterns/karpenter/versions.tf +++ b/patterns/karpenter/versions.tf @@ -14,10 +14,6 @@ terraform { source = "hashicorp/kubernetes" version = ">= 2.20" } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.14" - } } # ## Used for end-to-end testing on project; update to suit your needs