Skip to content

Commit

Permalink
Add example manifests and provisioners (#1592)
Browse files Browse the repository at this point in the history
* Add example manifests and provisioners

* Update provisioner examples
  • Loading branch information
rothgar authored Mar 31, 2022
1 parent 5158b6f commit eca610f
Show file tree
Hide file tree
Showing 14 changed files with 393 additions and 0 deletions.
17 changes: 17 additions & 0 deletions examples/provisioner/100-cpu-limit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# This example provisioner limits the amount of compute
# provisioned by Karpenter to 100 CPU cores

apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
provider:
# replace with your worker node instance profile
instanceProfile: "KarpenterNodeRole"
securityGroupSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
limits:
resources:
cpu: 100
14 changes: 14 additions & 0 deletions examples/provisioner/bottlerocket.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# This example provisioner will provision instances
# running Bottlerocket OS

apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
provider:
amiFamily: Bottlerocket
# replace with your worker node instance profile
securityGroupSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
86 changes: 86 additions & 0 deletions examples/provisioner/large-instances.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# This example provisioner will avoid small instance types in the cluster

apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
requirements:
- key: "node.kubernetes.io/instance-type"
operator: NotIn
values:
# exclude instances with < 4 cores and < 8gb memory
# got list from
# ec2-instance-selector --vcpus-max 4 --memory-max 8 --service eks --max-results 100
- a1.large
- a1.medium
- a1.xlarge
- c3.large
- c3.xlarge
- c4.large
- c4.xlarge
- c5.large
- c5.xlarge
- c5a.large
- c5a.xlarge
- c5ad.large
- c5ad.xlarge
- c5d.large
- c5d.xlarge
- c5n.large
- c6g.large
- c6g.medium
- c6g.xlarge
- c6gd.large
- c6gd.medium
- c6gd.xlarge
- c6gn.large
- c6gn.medium
- c6gn.xlarge
- inf1.xlarge
- m3.large
- m3.medium
- m4.large
- m5.large
- m5a.large
- m5ad.large
- m5d.large
- m5dn.large
- m5n.large
- m5zn.large
- m6g.large
- m6g.medium
- m6gd.large
- m6gd.medium
- m6i.large
- r6g.medium
- r6gd.medium
- t2.large
- t2.medium
- t2.micro
- t2.nano
- t2.small
- t3.large
- t3.medium
- t3.micro
- t3.nano
- t3.small
- t3a.large
- t3a.medium
- t3a.micro
- t3a.nano
- t3a.small
- t4g.large
- t4g.medium
- t4g.micro
- t4g.nano
- t4g.small
provider:
# replace with your worker node instance profile
instanceProfile: "KarpenterNodeRole"
subnetSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
securityGroupSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
17 changes: 17 additions & 0 deletions examples/provisioner/node-ttls.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# This example provisioner will provision instances
# that are replaced every 7 days and drain after 1 minute
# with no workloads

apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
provider:
# expire nodes after 7 days
# 7 days (in seconds) = 7 * 60 * 60 * 24
ttlSecondsUntilExpired: 604800
ttlSecondsAfterEmpty: 60
securityGroupSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
21 changes: 21 additions & 0 deletions examples/provisioner/spot.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# This example will use spot instance type for all
# provisioned instances

apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
requirements:
- key: karpenter.sh/capacity-type
operator: In
values: ["spot"]
provider:
# replace with your worker node instance profile
instanceProfile: "KarpenterNodeRole"
subnetSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
securityGroupSelector:
# replace with your cluster name
karpenter.sh/discovery: "CLUSTER_NAME"
23 changes: 23 additions & 0 deletions examples/workloads/arm64.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: arm64
spec:
replicas: 0
selector:
matchLabels:
app: arm64
template:
metadata:
labels:
app: arm64
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: arm64
resources:
requests:
cpu: "1"
memory: 256M
nodeSelector:
kubernetes.io/arch: arm64
33 changes: 33 additions & 0 deletions examples/workloads/disruption-budget.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: pdb
spec:
minAvailable: "80%"
selector:
matchLabels:
app: pdb
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pdb
spec:
replicas: 10
selector:
matchLabels:
app: pdb
template:
metadata:
labels:
app: pdb
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: pdb
resources:
requests:
cpu: "1"
memory: 256M
nodeSelector:
kubernetes.io/arch: amd64
23 changes: 23 additions & 0 deletions examples/workloads/gpu-amd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gpu-amd
spec:
replicas: 0
selector:
matchLabels:
app: gpu-amd
template:
metadata:
labels:
app: gpu-amd
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: gpu-amd
resources:
limits:
amd.com/gpu: "1"
requests:
cpu: "1"
memory: 256M
23 changes: 23 additions & 0 deletions examples/workloads/gpu-nvidia.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gpu-nvidia
spec:
replicas: 0
selector:
matchLabels:
app: gpu-nvidia
template:
metadata:
labels:
app: gpu-nvidia
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: gpu-nvidia
resources:
limits:
nvidia.com/gpu: "1"
requests:
cpu: "1"
memory: 256M
23 changes: 23 additions & 0 deletions examples/workloads/neuron.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: neuron
spec:
replicas: 0
selector:
matchLabels:
app: neuron
template:
metadata:
labels:
app: neuron
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: neuron
resources:
limits:
aws.amazon.com/neuron: "1"
requests:
cpu: "1"
memory: 256M
23 changes: 23 additions & 0 deletions examples/workloads/spot.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: spot
spec:
replicas: 0
selector:
matchLabels:
app: spot
template:
metadata:
labels:
app: spot
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: spot
resources:
requests:
cpu: "1"
memory: 256M
nodeSelector:
karpenter.sh/capacity-type: spot
34 changes: 34 additions & 0 deletions examples/workloads/spread-hostname-zone.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: host-zone-spread
spec:
replicas: 50
selector:
matchLabels:
app: host-zone-spread
template:
metadata:
labels:
app: host-zone-spread
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: host-zone-spread
resources:
requests:
cpu: "1"
memory: 256M
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: host-zone-spread
maxSkew: 2
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
- labelSelector:
matchLabels:
app: host-zone-spread
maxSkew: 5
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
28 changes: 28 additions & 0 deletions examples/workloads/spread-hostname.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: host-spread
spec:
replicas: 10
selector:
matchLabels:
app: host-spread
template:
metadata:
labels:
app: host-spread
spec:
containers:
- image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
name: host-spread
resources:
requests:
cpu: "1"
memory: 256M
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: host-spread
maxSkew: 2
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
Loading

0 comments on commit eca610f

Please sign in to comment.