Skip to content

Commit

Permalink
Merge branch 'main' into blueprint-vpc-lattice
Browse files Browse the repository at this point in the history
  • Loading branch information
neelaruban authored Dec 10, 2023
2 parents c921ec0 + aa8222b commit da4da14
Show file tree
Hide file tree
Showing 8 changed files with 130 additions and 135 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/publish-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
fetch-depth: 0

- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/scorecards.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,6 @@ jobs:

# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
uses: github/codeql-action/upload-sarif@c0d1daa7f7e14667747d73a7dbbe8c074bc8bfe2 # v2.22.9
with:
sarif_file: results.sarif
22 changes: 11 additions & 11 deletions patterns/istio/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -127,19 +127,19 @@ module "eks_blueprints_addons" {

helm_releases = {
istio-base = {
chart = "base"
version = local.istio_chart_version
repository = local.istio_chart_url
name = "istio-base"
namespace = kubernetes_namespace_v1.istio_system.metadata[0].name
chart = "base"
chart_version = local.istio_chart_version
repository = local.istio_chart_url
name = "istio-base"
namespace = kubernetes_namespace_v1.istio_system.metadata[0].name
}

istiod = {
chart = "istiod"
version = local.istio_chart_version
repository = local.istio_chart_url
name = "istiod"
namespace = kubernetes_namespace_v1.istio_system.metadata[0].name
chart = "istiod"
chart_version = local.istio_chart_version
repository = local.istio_chart_url
name = "istiod"
namespace = kubernetes_namespace_v1.istio_system.metadata[0].name

set = [
{
Expand All @@ -151,7 +151,7 @@ module "eks_blueprints_addons" {

istio-ingress = {
chart = "gateway"
version = local.istio_chart_version
chart_version = local.istio_chart_version
repository = local.istio_chart_url
name = "istio-ingress"
namespace = "istio-ingress" # per https://github.com/istio/istio/blob/master/manifests/charts/gateways/istio-ingress/values.yaml#L2
Expand Down
46 changes: 43 additions & 3 deletions patterns/karpenter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,55 @@ See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started

## Validate

!!! danger "TODO"
Add in validation steps
1. Test by listing the nodes in the cluster. You should see four Fargate nodes in the cluster:

```sh
kubectl get nodes

NAME STATUS ROLES AGE VERSION
fargate-ip-10-0-11-195.us-west-2.compute.internal Ready <none> 5m20s v1.28.2-eks-f8587cb
fargate-ip-10-0-27-183.us-west-2.compute.internal Ready <none> 5m2s v1.28.2-eks-f8587cb
fargate-ip-10-0-4-169.us-west-2.compute.internal Ready <none> 5m3s v1.28.2-eks-f8587cb
fargate-ip-10-0-44-106.us-west-2.compute.internal Ready <none> 5m12s v1.28.2-eks-f8587cb
```

2. Provision the Karpenter `EC2NodeClass` and `NodePool` resources which provide Karpenter the necessary configurations to provision EC2 resources:

```sh
kubectl apply -f karpenter.yaml
```

3. Once the Karpenter resources are in place, Karpenter will provision the necessary EC2 resources to satisfy any pending pods in the scheduler's queue. You can demonstrate this with the example deployment provided. First deploy the example deployment which has the initial number replicas set to 0:
```sh
kubectl apply -f example.yaml
```
4. When you scale the example deployment, you should see Karpenter respond by quickly provisioning EC2 resources to satisfy those pending pod requests:
```sh
kubectl scale deployment inflate --replicas=3
```
5. Listing the nodes should now show some EC2 compute that Karpenter has created for the example deployment:
```sh
kubectl get nodes
NAME STATUS ROLES AGE VERSION
fargate-ip-10-0-11-195.us-west-2.compute.internal Ready <none> 13m v1.28.2-eks-f8587cb
fargate-ip-10-0-27-183.us-west-2.compute.internal Ready <none> 12m v1.28.2-eks-f8587cb
fargate-ip-10-0-4-169.us-west-2.compute.internal Ready <none> 12m v1.28.2-eks-f8587cb
fargate-ip-10-0-44-106.us-west-2.compute.internal Ready <none> 13m v1.28.2-eks-f8587cb
ip-10-0-32-199.us-west-2.compute.internal Ready <none> 29s v1.28.2-eks-a5df82a # <== EC2 created by Karpenter
```
## Destroy
Scale down the deployment to de-provision Karpenter created resources first:
```sh
kubectl delete deployment inflate
kubectl delete -f example.yaml
```
{%
Expand Down
22 changes: 22 additions & 0 deletions patterns/karpenter/example.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: inflate
spec:
replicas: 0
selector:
matchLabels:
app: inflate
template:
metadata:
labels:
app: inflate
spec:
terminationGracePeriodSeconds: 0
containers:
- name: inflate
image: public.ecr.aws/eks-distro/kubernetes/pause:3.7
resources:
requests:
cpu: 1
44 changes: 44 additions & 0 deletions patterns/karpenter/karpenter.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
---
apiVersion: karpenter.k8s.aws/v1beta1
kind: EC2NodeClass
metadata:
name: default
spec:
amiFamily: AL2
role: karpenter-ex-karpenter
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: ex-karpenter
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: ex-karpenter
tags:
karpenter.sh/discovery: ex-karpenter
---
apiVersion: karpenter.sh/v1beta1
kind: NodePool
metadata:
name: default
spec:
template:
spec:
nodeClassRef:
name: default
requirements:
- key: "karpenter.k8s.aws/instance-category"
operator: In
values: ["c", "m", "r"]
- key: "karpenter.k8s.aws/instance-cpu"
operator: In
values: ["4", "8", "16", "32"]
- key: "karpenter.k8s.aws/instance-hypervisor"
operator: In
values: ["nitro"]
- key: "karpenter.k8s.aws/instance-generation"
operator: Gt
values: ["2"]
limits:
cpu: 1000
disruption:
consolidationPolicy: WhenEmpty
consolidateAfter: 30s
123 changes: 8 additions & 115 deletions patterns/karpenter/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -34,28 +34,14 @@ provider "helm" {
}
}

provider "kubectl" {
apply_retry_count = 5
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false

exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
}
}

data "aws_ecrpublic_authorization_token" "token" {
provider = aws.virginia
}

data "aws_availability_zones" "available" {}

locals {
name = basename(path.cwd)
name = "ex-${basename(path.cwd)}"
region = "us-west-2"

vpc_cidr = "10.0.0.0/16"
Expand All @@ -73,10 +59,10 @@ locals {

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 19.16"
version = "~> 19.18"

cluster_name = local.name
cluster_version = "1.27"
cluster_version = "1.28"
cluster_endpoint_public_access = true

vpc_id = module.vpc.vpc_id
Expand Down Expand Up @@ -127,7 +113,7 @@ module "eks" {

module "eks_blueprints_addons" {
source = "aws-ia/eks-blueprints-addons/aws"
version = "~> 1.0"
version = "~> 1.11"

cluster_name = module.eks.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
Expand Down Expand Up @@ -172,107 +158,14 @@ module "eks_blueprints_addons" {
repository_username = data.aws_ecrpublic_authorization_token.token.user_name
repository_password = data.aws_ecrpublic_authorization_token.token.password
}
karpenter_node = {
# Use static name so that it matches what is defined in `karpenter.yaml` example manifest
iam_role_use_name_prefix = false
}

tags = local.tags
}

################################################################################
# Karpenter
################################################################################

resource "kubectl_manifest" "karpenter_provisioner" {
yaml_body = <<-YAML
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
requirements:
- key: "karpenter.k8s.aws/instance-category"
operator: In
values: ["c", "m"]
- key: "karpenter.k8s.aws/instance-cpu"
operator: In
values: ["8", "16", "32"]
- key: "karpenter.k8s.aws/instance-hypervisor"
operator: In
values: ["nitro"]
- key: "topology.kubernetes.io/zone"
operator: In
values: ${jsonencode(local.azs)}
- key: "kubernetes.io/arch"
operator: In
values: ["arm64", "amd64"]
- key: "karpenter.sh/capacity-type" # If not included, the webhook for the AWS cloud provider will default to on-demand
operator: In
values: ["spot", "on-demand"]
kubeletConfiguration:
containerRuntime: containerd
maxPods: 110
limits:
resources:
cpu: 1000
consolidation:
enabled: true
providerRef:
name: default
ttlSecondsUntilExpired: 604800 # 7 Days = 7 * 24 * 60 * 60 Seconds
YAML

depends_on = [
module.eks_blueprints_addons
]
}

resource "kubectl_manifest" "karpenter_node_template" {
yaml_body = <<-YAML
apiVersion: karpenter.k8s.aws/v1alpha1
kind: AWSNodeTemplate
metadata:
name: default
spec:
subnetSelector:
karpenter.sh/discovery: ${module.eks.cluster_name}
securityGroupSelector:
karpenter.sh/discovery: ${module.eks.cluster_name}
instanceProfile: ${module.eks_blueprints_addons.karpenter.node_instance_profile_name}
tags:
karpenter.sh/discovery: ${module.eks.cluster_name}
YAML
}

# Example deployment using the [pause image](https://www.ianlewis.org/en/almighty-pause-container)
# and starts with zero replicas
resource "kubectl_manifest" "karpenter_example_deployment" {
yaml_body = <<-YAML
apiVersion: apps/v1
kind: Deployment
metadata:
name: inflate
spec:
replicas: 0
selector:
matchLabels:
app: inflate
template:
metadata:
labels:
app: inflate
spec:
terminationGracePeriodSeconds: 0
containers:
- name: inflate
image: public.ecr.aws/eks-distro/kubernetes/pause:3.7
resources:
requests:
cpu: 1
YAML

depends_on = [
kubectl_manifest.karpenter_node_template
]
}

################################################################################
# Supporting Resources
################################################################################
Expand Down
4 changes: 0 additions & 4 deletions patterns/karpenter/versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,6 @@ terraform {
source = "hashicorp/kubernetes"
version = ">= 2.20"
}
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.14"
}
}

# ## Used for end-to-end testing on project; update to suit your needs
Expand Down

0 comments on commit da4da14

Please sign in to comment.