From 6df0fa4a2a147c4cf32bb45fed61c94bd7416dbb Mon Sep 17 00:00:00 2001 From: Jon Edvald Date: Thu, 4 Feb 2021 18:41:07 +0100 Subject: [PATCH] fix(k8s): fix potential GCR auth issue + simpler GKE+GCR instructions Turns out automatically including the `credHelpers` entries for GCR could cause conflicts if users already had token auth set up. I've also gone with a different suggested approach for GCR auth with Kaniko, that is both simpler to implement and will also work seamlessly for `cluster-docker` and `cluster-buildkit`. --- core/src/plugins/kubernetes/init.ts | 13 +----- examples/gke/README.md | 62 ++++++++++++++--------------- examples/gke/garden.yml | 24 ++++++----- 3 files changed, 46 insertions(+), 53 deletions(-) diff --git a/core/src/plugins/kubernetes/init.ts b/core/src/plugins/kubernetes/init.ts index ad01530e9f..c2cf933c89 100644 --- a/core/src/plugins/kubernetes/init.ts +++ b/core/src/plugins/kubernetes/init.ts @@ -49,17 +49,6 @@ See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private- a registry auth secret. ` -// Used to automatically support GCR auth on GKE. -// Users can override by setting other values for any of these keys in any of their imagePullSecrets. -const defaultCredHelpers = { - "asia.gcr.io": "gcr", - "eu.gcr.io": "gcr", - "gcr.io": "gcr", - "marketplace.gcr.io": "gcr", - "staging-k8s.gcr.io": "gcr", - "us.gcr.io": "gcr", -} - interface KubernetesProviderOutputs extends PrimitiveMap { "app-namespace": string "metadata-namespace": string @@ -502,7 +491,7 @@ export async function buildDockerAuthConfig( credHelpers: { ...accumulator.credHelpers, ...decoded.credHelpers }, } }, - { experimental: "enabled", auths: {}, credHelpers: defaultCredHelpers } + { experimental: "enabled", auths: {}, credHelpers: {} } ) } diff --git a/examples/gke/README.md b/examples/gke/README.md index 80e1b23c42..087a0c495c 100644 --- a/examples/gke/README.md +++ b/examples/gke/README.md @@ -28,41 +28,20 @@ gcloud alpha billing projects link $PROJECT_ID --billing-account= # Enable the required APIs (this can sometimes take a while). gcloud services enable compute.googleapis.com container.googleapis.com servicemanagement.googleapis.com --project $PROJECT_ID ``` - ### Step 2 - Create a GKE cluster (if you don't already have one) -If you don't already have a GKE cluster to work with, you can create one like this: - -```sh -# Replace the cluster name as you see fit, of course. -# The --workload-pool flag makes sure Workload Identity is enabled for the cluster. -gcloud container clusters create garden-gke-example --workload-pool=${PROJECT_ID}.svc.id.goog -``` - -You can of course also use the GKE console to do this or add many configuration parameters with the command line, **just make sure _Workload Identity_ is enabled when you create the cluster** (note the `--workload-pool` flag in the above example). See the general GKE instructions [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster). - -### Step 3 - Configure Workload Identity - -For Kaniko to be able to seamlessly authenticate with your GCR registry, you need to use _Workload Identity_ and give the service account in the `garden-system` namespace access to the GCR registry through that. - -To quote the [Kaniko docs](https://github.com/GoogleContainerTools/kaniko#pushing-to-gcr-using-workload-identity) on the subject: - -> To authenticate using workload identity you need to run the kaniko pod using a Kubernetes Service Account (KSA) bound to Google Service Account (GSA) which has Storage.Admin permissions to push images to Google Container registry. - -In our case, we will use the existing `default` service account in the `garden-system` namespace. - -Follow these steps to set all this up: - -#### Make sure Workload Identity is enabled for your cluster +See the general GKE instructions [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster). -If you're using an existing cluster, please see the GKE docs for how to [enable Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_cluster). -You'll need the cluster to have Workload Identity enabled, and for your node pools to have it enabled as well. +### Step 3 - Create and configure a Google Service Account (GSA) and role -#### Create and configure a Google Service Account (GSA) +First, create a Google Service Account: -Please follow the detailed steps [here](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to) **(noting that you should skip creating a new Kubernetes service account and instead attach to the `default` service account in the `garden-system` namespace)** to create a Google Service Account and an IAM policy binding between the GSA and the Kubernetes Service account to allow it to act as the Google service account. +```sh +# You can replace the gcr-access name of course, but make sure you also replace it in the commands below +gcloud iam service-accounts create gcr-access --project ${PROJECT_ID} +``` -Then, to grant the Google Service account the right permission to push to GCR, run the following GCR commands (replacing `[google-service-account-name]` with your new GSA name): +Then, to grant the Google Service account the right permission to push to GCR, run the following GCR commands: ```sh # Create a role with the required permissions @@ -72,13 +51,32 @@ gcloud iam roles create gcrAccess \ # Attach the role to the newly create Google Service Account gcloud projects add-iam-policy-binding ${PROJECT_ID} \ - --member=serviceAccount:[google-service-account-name]@${PROJECT_ID}.iam.gserviceaccount.com \ + --member=serviceAccount:gcr-access@${PROJECT_ID}.iam.gserviceaccount.com \ --role==projects/${PROJECT_ID}/roles/gcrAccess ``` -### Step 4 - Set the variables in the project config +### Step 4 - Get a JSON key and create an imagePullSecret + +You'll need to prepare the authentication for the builders to use when pulling from and pushing to GCR. + +First create a JSON key file for the GSA: + +```sh +gcloud iam service-accounts keys create keyfile.json --iam-account gcr-access@${PROJECT_ID}.iam.gserviceaccount.com +``` + +Then prepare the _imagePullSecret_ in your Kubernetes cluster. Run the following command, if appropriate replacing `gcr.io` with the correct registry hostname (e.g. `eu.gcr.io` or `asia.gcr.io`): + +```sh +kubectl --namespace default create secret docker-registry gcr-config \ + --docker-server=gcr.io \ + --docker-username=_json_key \ + --docker-password="$(cat keyfile.json)" +``` + +### Step 5 - Set the variables in the project config -Simply replace the values under the `variables` keys in the `garden.yml` file, as instructed in the comments in the file. +You'll need to replace the values under the `variables` keys in the `garden.yml` file, as instructed in the comments in the file. You can optionally set up an ingress controller in the cluster and point a DNS hostname to it, and set that under `variables.default-hostname`. diff --git a/examples/gke/garden.yml b/examples/gke/garden.yml index bb24293c3c..25ee66b22f 100644 --- a/examples/gke/garden.yml +++ b/examples/gke/garden.yml @@ -3,26 +3,32 @@ name: gke environments: - name: gke-kaniko variables: - build-mode: kaniko + buildMode: kaniko + imagePullSecrets: [] - name: gke-kaniko-gcr variables: - build-mode: kaniko - deployment-registry: + buildMode: kaniko + deploymentRegistry: # Replace these values as appropriate hostname: eu.gcr.io # <- set this according to the region your cluster runs in namespace: garden-dev-200012 # <- set this to the project ID of the target cluster + imagePullSecrets: + # Make sure this matches the name and namespace of the secret you created + - name: gcr-config + namespace: default providers: - name: kubernetes - context: ${var.gke-context} + context: ${var.gkeContext} namespace: ${var.namespace} - defaultHostname: ${var.default-hostname} - buildMode: ${var.build-mode} - deploymentRegistry: ${var.deployment-registry}? # <- note the ? suffix, which allows this to be undefined + defaultHostname: ${var.defaultHostname} + buildMode: ${var.buildMode} + deploymentRegistry: ${var.deploymentRegistry}? # <- note the ? suffix, which allows this to be undefined + imagePullSecrets: ${var.imagePullSecrets} variables: # Replace these values as appropriate # > the kube context of the cluster - gke-context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 + gkeContext: gke_garden-dev-200012_europe-west1-b_garden-dev-1 # > any hostname that points to your cluster's ingress controller - default-hostname: ${local.env.CIRCLE_BUILD_NUM || local.username}-gke.dev-1.sys.garden + defaultHostname: ${local.env.CIRCLE_BUILD_NUM || local.username}-gke.dev-1.sys.garden # > the namespace to deploy to in the cluster namespace: gke-testing-${local.env.CIRCLE_BUILD_NUM || local.username}