Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(chart): Updated release logic to use SemVer #5561

Merged
merged 1 commit into from
Feb 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/actions/e2e/install-karpenter/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,16 @@ runs:
aws eks update-kubeconfig --name "$CLUSTER_NAME"

# Parse minor version to determine whether to enable the webhooks
RELEASE_VERSION_MINOR="${K8S_VERSION#*.}"
K8S_VERSION_MINOR="${K8S_VERSION#*.}"
WEBHOOK_ENABLED=false
if (( RELEASE_VRESION_MINOR < 25 )); then
if (( K8S_VERSION_MINOR < 25 )); then
WEBHOOK_ENABLED=true
fi

# Remove service account annotation when dropping support for 1.23
helm upgrade --install karpenter "oci://$ECR_ACCOUNT_ID.dkr.ecr.$ECR_REGION.amazonaws.com/karpenter/snapshot/karpenter" \
-n kube-system \
--version "v0-$(git rev-parse HEAD)" \
--version "0-$(git rev-parse HEAD)" \
--set logLevel=debug \
--set webhook.enabled=${WEBHOOK_ENABLED} \
--set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::$ACCOUNT_ID:role/karpenter-irsa-$CLUSTER_NAME" \
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ HELM_OPTS ?= --set serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn=${K

# CR for local builds of Karpenter
KARPENTER_NAMESPACE ?= kube-system
KARPENTER_VERSION ?= $(shell git tag --sort=committerdate | tail -1)
KARPENTER_VERSION ?= $(shell git tag --sort=committerdate | tail -1 | cut -d"v" -f2)
KO_DOCKER_REPO ?= ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/dev
GETTING_STARTED_SCRIPT_DIR = website/content/en/preview/getting-started/getting-started-with-karpenter/scripts

Expand Down
20 changes: 19 additions & 1 deletion charts/karpenter/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,22 @@ keywords:
home: https://karpenter.sh/
icon: https://repository-images.githubusercontent.com/278480393/dab059c8-caa1-4b55-aaa7-3d30e47a5616
sources:
- https://github.com/aws/karpenter/
- https://github.com/aws/karpenter-provider-aws/
annotations:
artifacthub.io/alternativeName: karpenter-provider-aws
artifacthub.io/crds: |
- kind: EC2NodeClass
version: v1beta1
name: ec2nodeclasses.karpenter.k8s.aws
displayName: EC2NodeClass
description: EC2NodeClass is the Schema for the EC2NodeClass API.
- kind: NodeClaim
version: v1beta1
name: nodeclaims.karpenter.sh
displayName: NodeClaim
description: NodeClaim is the Schema for the NodeClaims API.
- kind: NodePool
version: v1beta1
name: nodepools.karpenter.sh
displayName: NodePool
description: NodePool is the Schema for the NodePools API.
2 changes: 1 addition & 1 deletion charts/karpenter/README.md.gotmpl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ You can follow the detailed installation instruction in the [documentation](http
```bash
helm upgrade --install --namespace karpenter --create-namespace \
karpenter oci://public.ecr.aws/karpenter/{{ template "chart.name" . }} \
--version v{{ template "chart.version" . }} \
--version {{ template "chart.version" . }} \
--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \
--set settings.clusterName=${CLUSTER_NAME} \
--set settings.interruptionQueue=${CLUSTER_NAME} \
Expand Down
14 changes: 7 additions & 7 deletions hack/release/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ snapshot() {
echo "Release Type: snapshot
Release Version: ${RELEASE_VERSION}
Commit: $(git rev-parse HEAD)
Helm Chart Version $(helmChartVersion $RELEASE_VERSION)"
Helm Chart Version $(helmChartVersion "${RELEASE_VERSION}")"

authenticatePrivateRepo
buildImages "${SNAPSHOT_REPO_ECR}"
Expand All @@ -49,7 +49,7 @@ release() {
echo "Release Type: stable
Release Version: ${RELEASE_VERSION}
Commit: $(git rev-parse HEAD)
Helm Chart Version $(helmChartVersion $RELEASE_VERSION)"
Helm Chart Version $(helmChartVersion "${RELEASE_VERSION}")"

authenticate
buildImages "${RELEASE_REPO_ECR}"
Expand Down Expand Up @@ -88,20 +88,20 @@ releaseType(){
RELEASE_VERSION=$1

if [[ "${RELEASE_VERSION}" == v* ]]; then
echo $RELEASE_TYPE_STABLE
echo "${RELEASE_TYPE_STABLE}"
else
echo $RELEASE_TYPE_SNAPSHOT
echo "${RELEASE_TYPE_SNAPSHOT}"
fi
}

helmChartVersion(){
RELEASE_VERSION=$1
if [[ $(releaseType "$RELEASE_VERSION") == "$RELEASE_TYPE_STABLE" ]]; then
echo "$RELEASE_VERSION"
echo "${RELEASE_VERSION#v}"
fi

if [[ $(releaseType "$RELEASE_VERSION") == "$RELEASE_TYPE_SNAPSHOT" ]]; then
echo "v${CURRENT_MAJOR_VERSION}-${RELEASE_VERSION}"
echo "${CURRENT_MAJOR_VERSION}-${RELEASE_VERSION}"
fi
}

Expand Down Expand Up @@ -131,7 +131,7 @@ publishHelmChart() {
cd charts
helm dependency update "${CHART_NAME}"
helm lint "${CHART_NAME}"
helm package "${CHART_NAME}" --version "$HELM_CHART_VERSION"
helm package "${CHART_NAME}" --version "${HELM_CHART_VERSION}"
helm push "${HELM_CHART_FILE_NAME}" "oci://${RELEASE_REPO}"
rm "${HELM_CHART_FILE_NAME}"
cd ..
Expand Down
2 changes: 1 addition & 1 deletion hack/toolchain.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ kubebuilder() {
sudo mkdir -p ${KUBEBUILDER_ASSETS}
sudo chown "${USER}" ${KUBEBUILDER_ASSETS}
arch=$(go env GOARCH)
ln -sf $(setup-envtest use -p path "${K8S_VERSION}" --arch="${arch}" --bin-dir="${KUBEBUILDER_ASSETS}")/* ${KUBEBUILDER_ASSETS}
ln -sf "$(setup-envtest use -p path "${K8S_VERSION}" --arch="${arch}" --bin-dir="${KUBEBUILDER_ASSETS}")"/* ${KUBEBUILDER_ASSETS}
find $KUBEBUILDER_ASSETS
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ make test # E2E correctness tests

### Change Log Level

By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your helm values.
By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your Helm values.

```bash
--set logLevel=debug
```
Expand Down
4 changes: 2 additions & 2 deletions website/content/en/preview/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ Karpenter has multiple mechanisms for configuring the [operating system]({{< ref
Karpenter is flexible to multi-architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}).

### What RBAC access is required?
All the required RBAC rules can be found in the helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/role.yaml) files for details.
All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/role.yaml) files for details.

### Can I run Karpenter outside of a Kubernetes cluster?
Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API.
Expand Down Expand Up @@ -202,7 +202,7 @@ Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes

Karpenter requires proper permissions in the `KarpenterNode IAM Role` and the `KarpenterController IAM Role`.
To upgrade Karpenter to version `$VERSION`, make sure that the `KarpenterNode IAM Role` and the `KarpenterController IAM Role` have the right permission described in `https://karpenter.sh/$VERSION/getting-started/getting-started-with-karpenter/cloudformation.yaml`.
Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the helm upgrade command.
Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the Helm upgrade command.
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}

For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgrading/upgrade-guide/" >}}).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ authenticate properly by running `aws sts get-caller-identity`.
After setting up the tools, set the Karpenter and Kubernetes version:

```bash
export KARPENTER_NAMESPACE=kube-system
export KARPENTER_VERSION=v0.34.0
export K8S_VERSION={{< param "latest_k8s_version" >}}
export KARPENTER_NAMESPACE="kube-system"
export KARPENTER_VERSION="{{< param "latest_release_version" >}}"
export K8S_VERSION="{{< param "latest_k8s_version" >}}"
```

Then set the following environment variable:
Expand All @@ -58,7 +58,7 @@ If you open a new shell to run steps in this procedure, you need to set some or
To remind yourself of these values, type:

```bash
echo $KARPENTER_NAMESPACE $KARPENTER_VERSION $K8S_VERSION $CLUSTER_NAME $AWS_DEFAULT_REGION $AWS_ACCOUNT_ID $TEMPOUT
echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${K8S_VERSION}" "${CLUSTER_NAME}" "${AWS_DEFAULT_REGION}" "${AWS_ACCOUNT_ID}" "${TEMPOUT}"
```

{{% /alert %}}
Expand All @@ -75,7 +75,7 @@ The following cluster configuration will:
* Use [AWS EKS managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for the kube-system and karpenter namespaces. Uncomment fargateProfiles settings (and comment out managedNodeGroups settings) to use Fargate for both namespaces instead.
* Set KARPENTER_IAM_ROLE_ARN variables.
* Create a role to allow spot instances.
* Run helm to install karpenter
* Run Helm to install Karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}

Expand All @@ -97,11 +97,11 @@ If you need Karpenter to manage the DNS service pods' capacity, this means that
{{% /alert %}}

{{% alert title="Common Expression Language/Webhooks Notice" color="warning" %}}
Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter helm chart.
Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart.
{{% /alert %}}

{{% alert title="Pod Identity Supports Notice" color="warning" %}}
Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter helm chart.
Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter Helm chart.
{{% /alert %}}

{{% alert title="Warning" color="warning" %}}
Expand Down Expand Up @@ -177,7 +177,7 @@ The section below covers advanced installation techniques for installing Karpent

### Private Clusters

You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` helm chart.
You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` Helm chart.

```bash
privateCluster:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ export AWS_PARTITION="aws" # if you are not using standard partitions, you may n
export CLUSTER_NAME="${USER}-karpenter-demo"
export AWS_DEFAULT_REGION="us-west-2"
export AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)"
export TEMPOUT=$(mktemp)
export TEMPOUT="$(mktemp)"
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down Expand Up @@ -26,7 +26,7 @@ iam:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}

## Optionally run on fargate or on k8s 1.23
# Pod Identity is not available on fargate
# Pod Identity is not available on fargate
# https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html
# iam:
# withOIDC: true
Expand Down Expand Up @@ -67,7 +67,7 @@ addons:
# - namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)"
export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.endpoint" --output text)"
export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter"
stevehipwell marked this conversation as resolved.
Show resolved Hide resolved

echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN
echo "${CLUSTER_ENDPOINT} ${KARPENTER_IAM_ROLE_ARN}"
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
TEMPOUT=$(mktemp)
TEMPOUT="$(mktemp)"

curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ helm repo update

kubectl create namespace monitoring

curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | tee prometheus-values.yaml
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | tee prometheus-values.yaml
helm install --namespace monitoring prometheus prometheus-community/prometheus --values prometheus-values.yaml

curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml
helm install --namespace monitoring grafana grafana-charts/grafana --values grafana-values.yaml
Original file line number Diff line number Diff line change
@@ -1 +1 @@
kubectl delete node $NODE_NAME
kubectl delete node "${NODE_NAME}"
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
helm uninstall karpenter --namespace "${KARPENTER_NAMESPACE}"
aws cloudformation delete-stack --stack-name "Karpenter-${CLUSTER_NAME}"
aws ec2 describe-launch-templates --filters Name=tag:karpenter.k8s.aws/cluster,Values=${CLUSTER_NAME} |
aws ec2 describe-launch-templates --filters "Name=tag:karpenter.k8s.aws/cluster,Values=${CLUSTER_NAME}" |
jq -r ".LaunchTemplates[].LaunchTemplateName" |
xargs -I{} aws ec2 delete-launch-template --launch-template-name {}
eksctl delete cluster --name "${CLUSTER_NAME}"
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,10 @@ One for your Karpenter node role and one for your existing node group.
First set the Karpenter release you want to deploy.

```bash
export KARPENTER_VERSION={{< param "latest_release_version" >}}
export KARPENTER_VERSION="{{< param "latest_release_version" >}}"
jonathan-innis marked this conversation as resolved.
Show resolved Hide resolved
```

We can now generate a full Karpenter deployment yaml from the helm chart.
We can now generate a full Karpenter deployment yaml from the Helm chart.

{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh" language="bash" %}}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
AWS_PARTITION="aws" # if you are not using standard partitions, you may need to configure to aws-cn / aws-us-gov
AWS_REGION="$(aws configure list | grep region | tr -s " " | cut -d" " -f3)"
OIDC_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} \
OIDC_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" \
stevehipwell marked this conversation as resolved.
Show resolved Hide resolved
--query "cluster.identity.oidc.issuer" --output text)"
AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' \
--output text)
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \
--policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy
--policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy"

aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \
--policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy
--policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy"

aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \
--policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
--policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"

aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \
--policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore
--policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore"
Loading
Loading