From dde2ee78cdfaca1153adee3776ce4604141b702b Mon Sep 17 00:00:00 2001 From: Jon Edvald Date: Mon, 3 May 2021 22:52:32 +0000 Subject: [PATCH] improvement(k8s): get rid of NFS when using kaniko build mode Closes #1798 --- .circleci/config.yml | 2 + core/src/plugins/container/config.ts | 2 +- core/src/plugins/kubernetes/api.ts | 48 +- .../kubernetes/commands/cluster-init.ts | 2 +- .../plugins/kubernetes/commands/pull-image.ts | 17 +- .../commands/uninstall-garden-services.ts | 23 +- core/src/plugins/kubernetes/config.ts | 35 +- core/src/plugins/kubernetes/constants.ts | 2 +- .../kubernetes/container/build/build.ts | 155 +------ .../kubernetes/container/build/buildkit.ts | 325 +++++-------- .../container/build/cluster-docker.ts | 115 ++++- .../kubernetes/container/build/common.ts | 143 +++++- .../kubernetes/container/build/kaniko.ts | 432 ++++++++++++++++-- core/src/plugins/kubernetes/init.ts | 4 +- core/src/plugins/kubernetes/kubernetes.ts | 20 +- core/src/plugins/kubernetes/util.ts | 17 +- .../data/test-projects/container/garden.yml | 6 + .../kubernetes/container/build/build.ts | 33 +- .../kubernetes/container/build/buildkit.ts | 21 +- docs/advanced/terraform.md | 2 +- docs/guides/cloud-provider-setup.md | 8 +- docs/guides/in-cluster-building.md | 113 +++-- docs/guides/using-garden-in-ci.md | 2 +- docs/reference/providers/kubernetes.md | 79 +++- docs/reference/providers/local-kubernetes.md | 71 ++- examples/build-dependencies/garden.yml | 2 +- examples/demo-project/garden.yml | 2 +- examples/deployment-strategies/garden.yml | 2 +- examples/disabled-configs/garden.yml | 2 +- examples/hadolint/garden.yml | 2 +- examples/hot-reload-k8s/garden.yml | 2 +- .../hot-reload-post-sync-command/garden.yml | 2 +- examples/kaniko/garden.yml | 4 +- examples/openfaas/garden.yml | 2 +- examples/remote-k8s/garden.yml | 2 +- examples/remote-sources/garden.yml | 2 +- examples/tasks/garden.yml | 2 +- .../project.garden.yml | 2 +- examples/terraform-gke/garden.yml | 2 +- examples/vote/garden.yml | 2 +- 40 files changed, 1100 insertions(+), 609 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 37aa48f055..fde69d3a90 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -192,10 +192,12 @@ jobs: - plugins/ build-dist: <<: *node-config + resource_class: large steps: - build_dist build-dist-edge: <<: *node-config + resource_class: large steps: - build_dist: version: edge diff --git a/core/src/plugins/container/config.ts b/core/src/plugins/container/config.ts index 78a0bbfb6b..6bfef92448 100644 --- a/core/src/plugins/container/config.ts +++ b/core/src/plugins/container/config.ts @@ -486,7 +486,7 @@ export const containerRegistryConfigSchema = () => }).description(dedent` The registry where built containers should be pushed to, and then pulled to the cluster when deploying services. - Important: If you specify this in combination with \`buildMode: cluster-docker\` or \`buildMode: kaniko\`, you must make sure \`imagePullSecrets\` includes authentication with the specified deployment registry, that has the appropriate write privileges (usually full write access to the configured \`deploymentRegistry.namespace\`). + Important: If you specify this in combination with in-cluster building, you must make sure \`imagePullSecrets\` includes authentication with the specified deployment registry, that has the appropriate write privileges (usually full write access to the configured \`deploymentRegistry.namespace\`). `) export interface ContainerService extends GardenService {} diff --git a/core/src/plugins/kubernetes/api.ts b/core/src/plugins/kubernetes/api.ts index 6bed8c81fa..207d5380a9 100644 --- a/core/src/plugins/kubernetes/api.ts +++ b/core/src/plugins/kubernetes/api.ts @@ -30,6 +30,7 @@ import { Exec, Attach, V1Deployment, + V1Service, } from "@kubernetes/client-node" import AsyncLock = require("async-lock") import request = require("request-promise") @@ -103,6 +104,15 @@ const apiTypes: { [key: string]: K8sApiConstructor } = { } const crudMap = { + Deployment: { + cls: new V1Deployment(), + group: "apps", + read: "readNamespacedDeployment", + create: "createNamespacedDeployment", + replace: "replaceNamespacedDeployment", + delete: "deleteNamespacedDeployment", + patch: "patchNamespacedDeployment", + }, Secret: { cls: new V1Secret(), group: "core", @@ -110,14 +120,16 @@ const crudMap = { create: "createNamespacedSecret", replace: "replaceNamespacedSecret", delete: "deleteNamespacedSecret", + patch: "patchNamespacedSecret", }, - Deployment: { - cls: new V1Deployment(), - group: "apps", - read: "readNamespacedDeployment", - create: "createNamespacedDeployment", - replace: "replaceNamespacedDeployment", - delete: "deleteNamespacedDeployment", + Service: { + cls: new V1Service(), + group: "core", + read: "readNamespacedService", + create: "createNamespacedService", + replace: null, + delete: "deleteNamespacedService", + patch: "patchNamespacedService", }, } @@ -547,19 +559,27 @@ export class KubeApi { log.debug(`Upserting ${kind} ${namespace}/${name}`) - try { + const replace = async () => { await api[crudMap[kind].read](name, namespace) - await api[crudMap[kind].replace](name, namespace, obj) - log.debug(`Replaced ${kind} ${namespace}/${name}`) + if (api[crudMap[kind].replace]) { + await api[crudMap[kind].replace](name, namespace, obj) + log.debug(`Replaced ${kind} ${namespace}/${name}`) + } else { + await api[crudMap[kind].patch](name, namespace, obj) + log.debug(`Patched ${kind} ${namespace}/${name}`) + } + } + + try { + await replace() } catch (err) { if (err.statusCode === 404) { try { await api[crudMap[kind].create](namespace, obj) log.debug(`Created ${kind} ${namespace}/${name}`) } catch (err) { - if (err.statusCode === 409) { - log.debug(`Patched ${kind} ${namespace}/${name}`) - await api[crudMap[kind].replace](name, namespace, obj) + if (err.statusCode === 409 || err.statusCode === 422) { + await replace() } else { throw err } @@ -592,7 +612,7 @@ export class KubeApi { if (name.startsWith("patch")) { // patch the patch bug... (https://github.com/kubernetes-client/javascript/issues/19) - target["defaultHeaders"] = { ...defaultHeaders, "content-type": "application/strategic-merge-patch+json" } + target["defaultHeaders"] = { ...defaultHeaders, "content-type": "application/merge-patch+json" } } const output = target[name](...args) diff --git a/core/src/plugins/kubernetes/commands/cluster-init.ts b/core/src/plugins/kubernetes/commands/cluster-init.ts index 5b9d3bb92d..5a4c3fcf48 100644 --- a/core/src/plugins/kubernetes/commands/cluster-init.ts +++ b/core/src/plugins/kubernetes/commands/cluster-init.ts @@ -48,7 +48,7 @@ export const clusterInit: PluginCommand = { ctx: k8sCtx, log, namespace: systemNamespace, - args: ["delete", "--purge", "garden-nfs-provisioner"], + args: ["uninstall", "garden-nfs-provisioner"], }) } catch (_) {} diff --git a/core/src/plugins/kubernetes/commands/pull-image.ts b/core/src/plugins/kubernetes/commands/pull-image.ts index 591ba90721..11aeb8d3ae 100644 --- a/core/src/plugins/kubernetes/commands/pull-image.ts +++ b/core/src/plugins/kubernetes/commands/pull-image.ts @@ -20,12 +20,17 @@ import { LogEntry } from "../../../logger/log-entry" import { containerHelpers } from "../../container/helpers" import { RuntimeError } from "../../../exceptions" import { PodRunner } from "../run" -import { dockerAuthSecretKey, dockerAuthSecretName, inClusterRegistryHostname, k8sUtilImageName } from "../constants" +import { + dockerAuthSecretKey, + systemDockerAuthSecretName, + inClusterRegistryHostname, + k8sUtilImageName, +} from "../constants" import { getAppNamespace, getSystemNamespace } from "../namespace" import { getRegistryPortForward } from "../container/util" import { randomString } from "../../../util/string" -import { buildkitAuthSecretName, ensureBuilderSecret } from "../container/build/buildkit" import { PluginContext } from "../../../plugin-context" +import { ensureBuilderSecret } from "../container/build/common" const tmpTarPath = "/tmp/image.tar" @@ -149,18 +154,18 @@ async function pullFromExternalRegistry( if (buildMode === "cluster-buildkit") { namespace = await getAppNamespace(ctx, log, ctx.provider) - authSecretName = buildkitAuthSecretName - await ensureBuilderSecret({ + const { authSecret } = await ensureBuilderSecret({ provider: ctx.provider, log, api, namespace, - waitForUpdate: false, }) + + authSecretName = authSecret.metadata.name } else { namespace = await getSystemNamespace(ctx, ctx.provider, log) - authSecretName = dockerAuthSecretName + authSecretName = systemDockerAuthSecretName } const imageId = containerHelpers.getDeploymentImageId(module, module.version, ctx.provider.config.deploymentRegistry) diff --git a/core/src/plugins/kubernetes/commands/uninstall-garden-services.ts b/core/src/plugins/kubernetes/commands/uninstall-garden-services.ts index 5865c4cd70..b517e63b4d 100644 --- a/core/src/plugins/kubernetes/commands/uninstall-garden-services.ts +++ b/core/src/plugins/kubernetes/commands/uninstall-garden-services.ts @@ -11,6 +11,8 @@ import { PluginCommand } from "../../../types/plugin/command" import { getKubernetesSystemVariables } from "../init" import { KubernetesPluginContext } from "../config" import { getSystemGarden } from "../system" +import { getSystemNamespace } from "../namespace" +import { helm } from "../helm/helm-cli" export const uninstallGardenServices: PluginCommand = { name: "uninstall-garden-services", @@ -36,10 +38,23 @@ export const uninstallGardenServices: PluginCommand = { const serviceNames = services.map((s) => s.name).filter((name) => name !== "nfs-provisioner") const serviceStatuses = await actions.deleteServices(log, serviceNames) - if (k8sCtx.provider.config._systemServices.includes("nfs-provisioner")) { - const service = graph.getService("nfs-provisioner") - await actions.deleteService({ service, log }) - } + const systemNamespace = await getSystemNamespace(ctx, k8sCtx.provider, log) + try { + await helm({ + ctx: k8sCtx, + log, + namespace: systemNamespace, + args: ["uninstall", "garden-nfs-provisioner"], + }) + } catch (_) {} + try { + await helm({ + ctx: k8sCtx, + log, + namespace: systemNamespace, + args: ["uninstall", "garden-nfs-provisioner-v2"], + }) + } catch (_) {} log.info("") diff --git a/core/src/plugins/kubernetes/config.ts b/core/src/plugins/kubernetes/config.ts index 0a154e3500..be854f23c5 100644 --- a/core/src/plugins/kubernetes/config.ts +++ b/core/src/plugins/kubernetes/config.ts @@ -38,7 +38,7 @@ import { ArtifactSpec } from "../../config/validation" import { V1Toleration } from "@kubernetes/client-node" import { runPodSpecWhitelist } from "./run" -export const DEFAULT_KANIKO_IMAGE = "gcr.io/kaniko-project/executor:debug-v1.2.0" +export const DEFAULT_KANIKO_IMAGE = "gcr.io/kaniko-project/executor:v1.6.0-debug" export interface ProviderSecretRef { name: string namespace: string @@ -116,6 +116,8 @@ export interface KubernetesConfig extends GenericProviderConfig { kaniko?: { image?: string extraFlags?: string[] + namespace?: string | null + nodeSelector?: StringMap } context: string defaultHostname?: string @@ -150,7 +152,7 @@ export const defaultResources: KubernetesResources = { memory: 8192, }, requests: { - cpu: 200, + cpu: 100, memory: 512, }, }, @@ -362,17 +364,34 @@ export const kubernetesConfigBase = () => kaniko: joi .object() .keys({ + extraFlags: joi + .array() + .items(joi.string()) + .description( + `Specify extra flags to use when building the container image with kaniko. Flags set on \`container\` modules take precedence over these.` + ), image: joi .string() .default(DEFAULT_KANIKO_IMAGE) + .description(`Change the kaniko image (repository/image:tag) to use when building in kaniko mode.`), + namespace: joi + .string() + .allow(null) + .default(defaultSystemNamespace) .description( - deline` - Change the kaniko image (repository/image:tag) to use when building in kaniko mode. - ` + dedent` + Choose the namespace where the Kaniko pods will be run. Set to \`null\` to use the project namespace. + + **IMPORTANT: The default namespace will change to the project namespace instead of the garden-system namespace in an upcoming release!** + ` ), - extraFlags: joi.array().items(joi.string()).description(deline` - Specify extra flags to use when building the container image with kaniko. - Flags set on container module take precedence over these.`), + nodeSelector: joiStringMap(joi.string()).description( + dedent` + Exposes the \`nodeSelector\` field on the PodSpec of the Kaniko pods. This allows you to constrain the Kaniko pods to only run on particular nodes. + + [See here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) for the official Kubernetes guide to assigning Pods to nodes. + ` + ), }) .default(() => {}) .description("Configuration options for the `kaniko` build mode."), diff --git a/core/src/plugins/kubernetes/constants.ts b/core/src/plugins/kubernetes/constants.ts index 09988eea43..2dde8999d8 100644 --- a/core/src/plugins/kubernetes/constants.ts +++ b/core/src/plugins/kubernetes/constants.ts @@ -17,7 +17,7 @@ export const MAX_CONFIGMAP_DATA_SIZE = 1024 * 1024 // max ConfigMap data size is // the outputs field, so we cap at 250kB. export const MAX_RUN_RESULT_LOG_LENGTH = 250 * 1024 -export const dockerAuthSecretName = "builder-docker-config" +export const systemDockerAuthSecretName = "builder-docker-config" export const dockerAuthSecretKey = ".dockerconfigjson" export const inClusterRegistryHostname = "127.0.0.1:5000" diff --git a/core/src/plugins/kubernetes/container/build/build.ts b/core/src/plugins/kubernetes/container/build/build.ts index cd71e74b27..073cd2e0b2 100644 --- a/core/src/plugins/kubernetes/container/build/build.ts +++ b/core/src/plugins/kubernetes/container/build/build.ts @@ -6,25 +6,15 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -import split2 = require("split2") import { ContainerModule } from "../../../container/config" import { containerHelpers } from "../../../container/helpers" -import { getDockerBuildFlags } from "../../../container/build" import { GetBuildStatusParams, BuildStatus } from "../../../../types/plugin/module/getBuildStatus" import { BuildModuleParams, BuildResult } from "../../../../types/plugin/module/build" -import { inClusterRegistryHostname, dockerDaemonContainerName, rsyncPort } from "../../constants" -import { posix } from "path" -import { KubeApi } from "../../api" import { KubernetesProvider, ContainerBuildMode } from "../../config" -import { BuildError, ConfigurationError } from "../../../../exceptions" -import { LogLevel } from "../../../../logger/log-node" -import { renderOutputStream } from "../../../../util/util" -import { getSystemNamespace } from "../../namespace" -import chalk = require("chalk") -import { getKanikoBuildStatus, runKaniko, kanikoBuildFailed, getKanikoFlags } from "./kaniko" -import { getClusterDockerBuildStatus, getDockerDaemonPodRunner } from "./cluster-docker" +import { getKanikoBuildStatus, kanikoBuild } from "./kaniko" +import { clusterDockerBuild, getClusterDockerBuildStatus } from "./cluster-docker" import { getLocalBuildStatus, localBuild } from "./local" -import { BuildStatusHandler, BuildHandler, syncToBuildSync, sharedBuildSyncDeploymentName } from "./common" +import { BuildStatusHandler, BuildHandler } from "./common" import { buildkitBuildHandler, getBuildkitBuildStatus } from "./buildkit" export async function k8sGetContainerBuildStatus(params: GetBuildStatusParams): Promise { @@ -62,144 +52,9 @@ const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler } "kaniko": getKanikoBuildStatus, } -const remoteBuild: BuildHandler = async (params) => { - const { ctx, module, log } = params - const provider = ctx.provider - const systemNamespace = await getSystemNamespace(ctx, provider, log) - const api = await KubeApi.factory(log, ctx, provider) - - const localId = containerHelpers.getLocalImageId(module, module.version) - const deploymentImageId = containerHelpers.getDeploymentImageId( - module, - module.version, - provider.config.deploymentRegistry - ) - const dockerfile = module.spec.dockerfile || "Dockerfile" - - const { contextPath } = await syncToBuildSync({ - ...params, - api, - namespace: systemNamespace, - deploymentName: sharedBuildSyncDeploymentName, - rsyncPort, - }) - - log.setState(`Building image ${localId}...`) - - let buildLog = "" - - // Stream debug log to a status line - const stdout = split2() - const statusLine = log.placeholder({ level: LogLevel.verbose }) - - stdout.on("error", () => {}) - stdout.on("data", (line: Buffer) => { - statusLine.setState(renderOutputStream(line.toString())) - }) - - if (provider.config.buildMode === "cluster-docker") { - // Prepare the build command - const dockerfilePath = posix.join(contextPath, dockerfile) - - let args = [ - "docker", - "build", - "-t", - deploymentImageId, - "-f", - dockerfilePath, - contextPath, - ...getDockerBuildFlags(module), - ] - - // Execute the build - const containerName = dockerDaemonContainerName - const buildTimeout = module.spec.build.timeout - - if (provider.config.clusterDocker && provider.config.clusterDocker.enableBuildKit) { - args = ["/bin/sh", "-c", "DOCKER_BUILDKIT=1 " + args.join(" ")] - } - - const runner = await getDockerDaemonPodRunner({ api, ctx, provider, systemNamespace }) - - const buildRes = await runner.exec({ - log, - command: args, - timeoutSec: buildTimeout, - containerName, - stdout, - buffer: true, - }) - - buildLog = buildRes.log - - // Push the image to the registry - log.setState({ msg: `Pushing image ${localId} to registry...` }) - - const dockerCmd = ["docker", "push", deploymentImageId] - const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")] - - const pushRes = await runner.exec({ - log, - command: pushArgs, - timeoutSec: 300, - containerName, - stdout, - buffer: true, - }) - - buildLog += pushRes.log - } else if (provider.config.buildMode === "kaniko") { - // build with Kaniko - const args = [ - "--context", - "dir://" + contextPath, - "--dockerfile", - dockerfile, - "--destination", - deploymentImageId, - ...getKanikoFlags(module.spec.extraFlags, provider.config.kaniko?.extraFlags), - ] - - if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { - // The in-cluster registry is not exposed, so we don't configure TLS on it. - args.push("--insecure") - } - - args.push(...getDockerBuildFlags(module)) - - // Execute the build - const buildRes = await runKaniko({ - ctx, - provider, - log, - namespace: systemNamespace, - module, - args, - outputStream: stdout, - }) - buildLog = buildRes.log - - if (kanikoBuildFailed(buildRes)) { - throw new BuildError(`Failed building module ${chalk.bold(module.name)}:\n\n${buildLog}`, { buildLog }) - } - } else { - throw new ConfigurationError("Uknown build mode", { buildMode: provider.config.buildMode }) - } - - log.silly(buildLog) - - return { - buildLog, - fetched: false, - fresh: true, - version: module.version.versionString, - } -} - const buildHandlers: { [mode in ContainerBuildMode]: BuildHandler } = { "local-docker": localBuild, "cluster-buildkit": buildkitBuildHandler, - "cluster-docker": remoteBuild, - "kaniko": remoteBuild, + "cluster-docker": clusterDockerBuild, + "kaniko": kanikoBuild, } diff --git a/core/src/plugins/kubernetes/container/build/buildkit.ts b/core/src/plugins/kubernetes/container/build/buildkit.ts index 0742d418fd..5c3fde801f 100644 --- a/core/src/plugins/kubernetes/container/build/buildkit.ts +++ b/core/src/plugins/kubernetes/container/build/buildkit.ts @@ -9,22 +9,26 @@ import AsyncLock from "async-lock" import chalk from "chalk" import split2 = require("split2") -import { cloneDeep, isEmpty } from "lodash" -import { - buildSyncVolumeName, - dockerAuthSecretKey, - inClusterRegistryHostname, - k8sUtilImageName, - rsyncPortName, -} from "../../constants" +import { isEmpty } from "lodash" +import { buildSyncVolumeName, dockerAuthSecretKey, inClusterRegistryHostname } from "../../constants" import { KubeApi } from "../../api" import { KubernetesDeployment } from "../../types" import { LogEntry } from "../../../../logger/log-entry" import { waitForResources, compareDeployedResources } from "../../status/status" import { KubernetesProvider, KubernetesPluginContext } from "../../config" import { PluginContext } from "../../../../plugin-context" -import { prepareDockerAuth, getRegistryHostname } from "../../init" -import { BuildStatusHandler, skopeoBuildStatus, BuildHandler, syncToBuildSync, getSocatContainer } from "./common" +import { getRegistryHostname } from "../../init" +import { + BuildStatusHandler, + skopeoBuildStatus, + BuildHandler, + syncToBuildSync, + getSocatContainer, + utilRsyncPort, + ensureBuilderSecret, + builderToleration, + getUtilContainer, +} from "./common" import { getNamespaceStatus } from "../../namespace" import { containerHelpers } from "../../../container/helpers" import { LogLevel } from "../../../../logger/log-node" @@ -33,13 +37,10 @@ import { ContainerModule } from "../../../container/config" import { getDockerBuildArgs } from "../../../container/build" import { getDeploymentPod, millicpuToString, megabytesToString } from "../../util" import { PodRunner } from "../../run" -import { V1Container } from "@kubernetes/client-node" export const buildkitImageName = "gardendev/buildkit:v0.8.1-4" export const buildkitDeploymentName = "garden-buildkit" -export const buildkitAuthSecretName = "garden-docker-auth" const buildkitContainerName = "buildkitd" -const utilRsyncPort = 8730 const deployLock = new AsyncLock() @@ -51,7 +52,7 @@ export const getBuildkitBuildStatus: BuildStatusHandler = async (params) => { const api = await KubeApi.factory(log, ctx, provider) const namespace = (await getNamespaceStatus({ log, ctx, provider })).namespaceName - await ensureBuildkit({ + const { authSecret } = await ensureBuildkit({ ctx, provider, log, @@ -62,7 +63,7 @@ export const getBuildkitBuildStatus: BuildStatusHandler = async (params) => { return skopeoBuildStatus({ namespace, deploymentName: buildkitDeploymentName, - containerName: utilContainer.name, + containerName: getUtilContainer(authSecret.metadata.name).name, log, api, ctx, @@ -190,23 +191,27 @@ export async function ensureBuildkit({ api: KubeApi namespace: string }) { - return deployLock.acquire("deploy", async () => { + return deployLock.acquire(namespace, async () => { const deployLog = log.placeholder() - // Check status of the buildkit deployment - const manifest = getBuildkitDeployment(provider) - const status = await compareDeployedResources(ctx as KubernetesPluginContext, api, namespace, [manifest], deployLog) - - await ensureBuilderSecret({ + // Make sure auth secret is in place + const { authSecret, updated: secretUpdated } = await ensureBuilderSecret({ provider, log, api, namespace, - waitForUpdate: status.state === "ready", }) + // Check status of the buildkit deployment + const manifest = getBuildkitDeployment(provider, authSecret.metadata.name) + const status = await compareDeployedResources(ctx as KubernetesPluginContext, api, namespace, [manifest], deployLog) + if (status.state === "ready") { - return false + // Need to wait a little to ensure the secret is updated in the deployment + if (secretUpdated) { + await sleep(5) + } + return { authSecret, updated: false } } // Deploy the buildkit daemon @@ -227,36 +232,9 @@ export async function ensureBuildkit({ }) deployLog.setState({ append: true, msg: "Done!" }) - return true - }) -} -export async function ensureBuilderSecret({ - provider, - log, - api, - namespace, - waitForUpdate, -}: { - provider: KubernetesProvider - log: LogEntry - api: KubeApi - namespace: string - waitForUpdate: boolean -}) { - // Ensure docker auth secret is available and up-to-date in the namespace - const authSecret = await prepareDockerAuth(api, provider, namespace) - authSecret.metadata.name = buildkitAuthSecretName - const existingSecret = await api.readOrNull({ log, namespace, manifest: authSecret }) - - if (!existingSecret || authSecret.data?.[dockerAuthSecretKey] !== existingSecret.data?.[dockerAuthSecretKey]) { - log.setState(chalk.gray(`-> Updating Docker auth secret in namespace ${namespace}`)) - await api.upsert({ kind: "Secret", namespace, log, obj: authSecret }) - // Need to wait a little to ensure the secret is updated in the buildkit deployment - if (waitForUpdate) { - await sleep(5) - } - } + return { authSecret, updated: true } + }) } export function getBuildkitFlags(module: ContainerModule) { @@ -275,8 +253,97 @@ export function getBuildkitFlags(module: ContainerModule) { return args } -export function getBuildkitDeployment(provider: KubernetesProvider) { - const deployment = cloneDeep(baseBuildkitDeployment) +export function getBuildkitDeployment(provider: KubernetesProvider, authSecretName: string) { + const deployment: KubernetesDeployment = { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { + labels: { + app: buildkitDeploymentName, + }, + name: buildkitDeploymentName, + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: buildkitDeploymentName, + }, + }, + template: { + metadata: { + labels: { + app: buildkitDeploymentName, + }, + }, + spec: { + containers: [ + { + name: buildkitContainerName, + image: buildkitImageName, + args: ["--addr", "unix:///run/buildkit/buildkitd.sock"], + readinessProbe: { + exec: { + command: ["buildctl", "debug", "workers"], + }, + initialDelaySeconds: 3, + periodSeconds: 5, + }, + livenessProbe: { + exec: { + command: ["buildctl", "debug", "workers"], + }, + initialDelaySeconds: 5, + periodSeconds: 30, + }, + securityContext: { + privileged: true, + }, + volumeMounts: [ + { + name: authSecretName, + mountPath: "/.docker", + readOnly: true, + }, + { + name: buildSyncVolumeName, + mountPath: "/garden-build", + }, + ], + env: [ + { + name: "DOCKER_CONFIG", + value: "/.docker", + }, + ], + }, + // Attach a util container for the rsync server and to use skopeo + getUtilContainer(authSecretName), + ], + volumes: [ + { + name: authSecretName, + secret: { + secretName: authSecretName, + items: [ + { + key: dockerAuthSecretKey, + path: "config.json", + }, + ], + }, + }, + { + name: buildSyncVolumeName, + emptyDir: {}, + }, + ], + tolerations: [builderToleration], + }, + }, + }, + } + const buildkitContainer = deployment.spec!.template.spec!.containers[0] // Optionally run buildkit in rootless mode @@ -319,153 +386,3 @@ export function getBuildkitDeployment(provider: KubernetesProvider) { return deployment } - -const utilContainer: V1Container = { - name: "util", - image: k8sUtilImageName, - imagePullPolicy: "IfNotPresent", - command: ["/rsync-server.sh"], - env: [ - // This makes sure the server is accessible on any IP address, because CIDRs can be different across clusters. - // K8s can be trusted to secure the port. - JE - { name: "ALLOW", value: "0.0.0.0/0" }, - { - name: "RSYNC_PORT", - value: "" + utilRsyncPort, - }, - ], - volumeMounts: [ - { - name: buildkitAuthSecretName, - mountPath: "/home/user/.docker", - readOnly: true, - }, - { - name: buildSyncVolumeName, - mountPath: "/data", - }, - ], - ports: [ - { - name: rsyncPortName, - protocol: "TCP", - containerPort: utilRsyncPort, - }, - ], - readinessProbe: { - initialDelaySeconds: 1, - periodSeconds: 1, - timeoutSeconds: 3, - successThreshold: 2, - failureThreshold: 5, - tcpSocket: { port: (rsyncPortName) }, - }, - resources: { - // This should be ample - limits: { - cpu: "256m", - memory: "512Mi", - }, - }, - securityContext: { - runAsUser: 1000, - runAsGroup: 1000, - }, -} - -const baseBuildkitDeployment: KubernetesDeployment = { - apiVersion: "apps/v1", - kind: "Deployment", - metadata: { - labels: { - app: buildkitDeploymentName, - }, - name: buildkitDeploymentName, - }, - spec: { - replicas: 1, - selector: { - matchLabels: { - app: buildkitDeploymentName, - }, - }, - template: { - metadata: { - labels: { - app: buildkitDeploymentName, - }, - }, - spec: { - containers: [ - { - name: buildkitContainerName, - image: buildkitImageName, - args: ["--addr", "unix:///run/buildkit/buildkitd.sock"], - readinessProbe: { - exec: { - command: ["buildctl", "debug", "workers"], - }, - initialDelaySeconds: 3, - periodSeconds: 5, - }, - livenessProbe: { - exec: { - command: ["buildctl", "debug", "workers"], - }, - initialDelaySeconds: 5, - periodSeconds: 30, - }, - securityContext: { - privileged: true, - }, - volumeMounts: [ - { - name: buildkitAuthSecretName, - mountPath: "/.docker", - readOnly: true, - }, - { - name: buildSyncVolumeName, - mountPath: "/garden-build", - }, - ], - env: [ - { - name: "DOCKER_CONFIG", - value: "/.docker", - }, - ], - }, - // Attach a util container for the rsync server and to use skopeo - utilContainer, - ], - volumes: [ - { - name: buildkitAuthSecretName, - secret: { - secretName: buildkitAuthSecretName, - items: [ - { - key: dockerAuthSecretKey, - path: "config.json", - }, - ], - }, - }, - { - name: buildSyncVolumeName, - emptyDir: {}, - }, - ], - tolerations: [ - { - key: "garden-build", - operator: "Equal", - value: "true", - effect: "NoSchedule", - }, - ], - }, - }, - }, -} diff --git a/core/src/plugins/kubernetes/container/build/cluster-docker.ts b/core/src/plugins/kubernetes/container/build/cluster-docker.ts index 460b515431..2b05c77716 100644 --- a/core/src/plugins/kubernetes/container/build/cluster-docker.ts +++ b/core/src/plugins/kubernetes/container/build/cluster-docker.ts @@ -7,15 +7,27 @@ */ import { getDeploymentPod } from "../../util" -import { dockerDaemonDeploymentName, dockerDaemonContainerName } from "../../constants" +import { dockerDaemonDeploymentName, dockerDaemonContainerName, rsyncPort } from "../../constants" import { KubeApi } from "../../api" import { KubernetesProvider, KubernetesPluginContext } from "../../config" import { InternalError } from "../../../../exceptions" import { PodRunner } from "../../run" import { getSystemNamespace } from "../../namespace" -import chalk = require("chalk") +import chalk from "chalk" import { PluginContext } from "../../../../plugin-context" -import { BuildStatusHandler, getManifestInspectArgs } from "./common" +import { + BuildHandler, + BuildStatusHandler, + getManifestInspectArgs, + sharedBuildSyncDeploymentName, + syncToBuildSync, +} from "./common" +import { posix } from "path" +import split2 = require("split2") +import { LogLevel } from "../../../../logger/log-node" +import { renderOutputStream } from "../../../../util/util" +import { getDockerBuildFlags } from "../../../container/build" +import { containerHelpers } from "../../../container/helpers" export const getClusterDockerBuildStatus: BuildStatusHandler = async (params) => { const { ctx, module, log } = params @@ -57,6 +69,103 @@ export const getClusterDockerBuildStatus: BuildStatusHandler = async (params) => } } +export const clusterDockerBuild: BuildHandler = async (params) => { + const { ctx, module, log } = params + const provider = ctx.provider + const systemNamespace = await getSystemNamespace(ctx, provider, log) + const api = await KubeApi.factory(log, ctx, provider) + + const localId = containerHelpers.getLocalImageId(module, module.version) + const deploymentImageId = containerHelpers.getDeploymentImageId( + module, + module.version, + provider.config.deploymentRegistry + ) + const dockerfile = module.spec.dockerfile || "Dockerfile" + + const { contextPath } = await syncToBuildSync({ + ...params, + api, + namespace: systemNamespace, + deploymentName: sharedBuildSyncDeploymentName, + rsyncPort, + }) + + log.setState(`Building image ${localId}...`) + + let buildLog = "" + + // Stream debug log to a status line + const stdout = split2() + const statusLine = log.placeholder({ level: LogLevel.verbose }) + + stdout.on("error", () => {}) + stdout.on("data", (line: Buffer) => { + statusLine.setState(renderOutputStream(line.toString())) + }) + + // Prepare the build command + const dockerfilePath = posix.join(contextPath, dockerfile) + + let args = [ + "docker", + "build", + "-t", + deploymentImageId, + "-f", + dockerfilePath, + contextPath, + ...getDockerBuildFlags(module), + ] + + // Execute the build + const containerName = dockerDaemonContainerName + const buildTimeout = module.spec.build.timeout + + if (provider.config.clusterDocker && provider.config.clusterDocker.enableBuildKit) { + args = ["/bin/sh", "-c", "DOCKER_BUILDKIT=1 " + args.join(" ")] + } + + const runner = await getDockerDaemonPodRunner({ api, ctx, provider, systemNamespace }) + + const buildRes = await runner.exec({ + log, + command: args, + timeoutSec: buildTimeout, + containerName, + stdout, + buffer: true, + }) + + buildLog = buildRes.log + + // Push the image to the registry + log.setState({ msg: `Pushing image ${localId} to registry...` }) + + const dockerCmd = ["docker", "push", deploymentImageId] + const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")] + + const pushRes = await runner.exec({ + log, + command: pushArgs, + timeoutSec: 300, + containerName, + stdout, + buffer: true, + }) + + buildLog += pushRes.log + + log.silly(buildLog) + + return { + buildLog, + fetched: false, + fresh: true, + version: module.version.versionString, + } +} + export async function getDockerDaemonPodRunner({ api, systemNamespace, diff --git a/core/src/plugins/kubernetes/container/build/common.ts b/core/src/plugins/kubernetes/container/build/common.ts index 1ac6480f81..9be77ac0dd 100644 --- a/core/src/plugins/kubernetes/container/build/common.ts +++ b/core/src/plugins/kubernetes/container/build/common.ts @@ -12,7 +12,14 @@ import { containerHelpers } from "../../../container/helpers" import { GetBuildStatusParams, BuildStatus } from "../../../../types/plugin/module/getBuildStatus" import { BuildModuleParams, BuildResult } from "../../../../types/plugin/module/build" import { getDeploymentPod } from "../../util" -import { gardenUtilDaemonDeploymentName, inClusterRegistryHostname } from "../../constants" +import { + buildSyncVolumeName, + dockerAuthSecretKey, + gardenUtilDaemonDeploymentName, + inClusterRegistryHostname, + k8sUtilImageName, + rsyncPortName, +} from "../../constants" import { KubeApi } from "../../api" import { KubernetesProvider } from "../../config" import { PodRunner } from "../../run" @@ -20,13 +27,35 @@ import { PluginContext } from "../../../../plugin-context" import { resolve } from "path" import { getPortForward } from "../../port-forward" import { normalizeLocalRsyncPath } from "../../../../util/fs" -import { exec } from "../../../../util/util" +import { exec, hashString } from "../../../../util/util" import { InternalError, RuntimeError } from "../../../../exceptions" import { LogEntry } from "../../../../logger/log-entry" +import { prepareDockerAuth } from "../../init" +import chalk from "chalk" +import { V1Container } from "@kubernetes/client-node" const inClusterRegistryPort = 5000 export const sharedBuildSyncDeploymentName = "garden-build-sync" +export const utilRsyncPort = 8730 + +export const commonSyncArgs = [ + "--recursive", + // Copy symlinks (Note: These are sanitized while syncing to the build staging dir) + "--links", + // Preserve permissions + "--perms", + // Preserve modification times + "--times", + "--compress", +] + +export const builderToleration = { + key: "garden-build", + operator: "Equal", + value: "true", + effect: "NoSchedule", +} export type BuildStatusHandler = (params: GetBuildStatusParams) => Promise export type BuildHandler = (params: BuildModuleParams) => Promise @@ -63,22 +92,7 @@ export async function syncToBuildSync(params: SyncToSharedBuildSyncParams) { // https://stackoverflow.com/questions/1636889/rsync-how-can-i-configure-it-to-create-target-directory-on-server let src = normalizeLocalRsyncPath(`${buildRoot}`) + `/./${module.name}/` const destination = `rsync://localhost:${syncFwd.localPort}/volume/${ctx.workingCopyId}/` - const syncArgs = [ - "--recursive", - "--relative", - // Copy symlinks (Note: These are sanitized while syncing to the build staging dir) - "--links", - // Preserve permissions - "--perms", - // Preserve modification times - "--times", - "--compress", - "--delete", - "--temp-dir", - "/tmp", - src, - destination, - ] + const syncArgs = [...commonSyncArgs, "--relative", "--delete", "--temp-dir", "/tmp", src, destination] log.debug(`Syncing from ${src} to ${destination}`) // We retry a couple of times, because we may get intermittent connection issues or concurrency issues @@ -227,6 +241,99 @@ export async function getManifestInspectArgs(module: ContainerModule, deployment return dockerArgs } +/** + * Creates and saves a Kubernetes Docker authentication Secret in the specified namespace, suitable for mounting in + * builders and as an imagePullSecret. + * + * Returns the created Secret manifest. + */ +export async function ensureBuilderSecret({ + provider, + log, + api, + namespace, +}: { + provider: KubernetesProvider + log: LogEntry + api: KubeApi + namespace: string +}) { + // Ensure docker auth secret is available and up-to-date in the namespace + const authSecret = await prepareDockerAuth(api, provider, namespace) + let updated = false + + // Create a unique name based on the contents of the auth (otherwise different Garden runs can step over each other + // in shared namespaces). + const hash = hashString(authSecret.data![dockerAuthSecretKey], 6) + const secretName = `garden-docker-auth-${hash}` + authSecret.metadata.name = secretName + + const existingSecret = await api.readOrNull({ log, namespace, manifest: authSecret }) + + if (!existingSecret || authSecret.data?.[dockerAuthSecretKey] !== existingSecret.data?.[dockerAuthSecretKey]) { + log.setState(chalk.gray(`-> Updating Docker auth secret in namespace ${namespace}`)) + await api.upsert({ kind: "Secret", namespace, log, obj: authSecret }) + updated = true + } + + return { authSecret, updated } +} + function isLocalHostname(hostname: string) { return hostname === "localhost" || hostname.startsWith("127.") } + +export function getUtilContainer(authSecretName: string): V1Container { + return { + name: "util", + image: k8sUtilImageName, + imagePullPolicy: "IfNotPresent", + command: ["/rsync-server.sh"], + env: [ + // This makes sure the server is accessible on any IP address, because CIDRs can be different across clusters. + // K8s can be trusted to secure the port. - JE + { name: "ALLOW", value: "0.0.0.0/0" }, + { + name: "RSYNC_PORT", + value: "" + utilRsyncPort, + }, + ], + volumeMounts: [ + { + name: authSecretName, + mountPath: "/home/user/.docker", + readOnly: true, + }, + { + name: buildSyncVolumeName, + mountPath: "/data", + }, + ], + ports: [ + { + name: rsyncPortName, + protocol: "TCP", + containerPort: utilRsyncPort, + }, + ], + readinessProbe: { + initialDelaySeconds: 1, + periodSeconds: 1, + timeoutSeconds: 3, + successThreshold: 2, + failureThreshold: 5, + tcpSocket: { port: (rsyncPortName) }, + }, + resources: { + // This should be ample + limits: { + cpu: "256m", + memory: "512Mi", + }, + }, + securityContext: { + runAsUser: 1000, + runAsGroup: 1000, + }, + } +} diff --git a/core/src/plugins/kubernetes/container/build/kaniko.ts b/core/src/plugins/kubernetes/container/build/kaniko.ts index e853fa1fbe..7584b6fe05 100644 --- a/core/src/plugins/kubernetes/container/build/kaniko.ts +++ b/core/src/plugins/kubernetes/container/build/kaniko.ts @@ -6,30 +6,58 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -import { V1PodSpec } from "@kubernetes/client-node" +import AsyncLock from "async-lock" +import { V1PodSpec, V1Service } from "@kubernetes/client-node" import { ContainerModule } from "../../../container/config" import { millicpuToString, megabytesToString, makePodName } from "../../util" import { - dockerAuthSecretName, inClusterRegistryHostname, skopeoDaemonContainerName, - gardenUtilDaemonDeploymentName, + buildSyncVolumeName, + dockerAuthSecretKey, + k8sUtilImageName, } from "../../constants" import { KubeApi } from "../../api" import { LogEntry } from "../../../../logger/log-entry" -import { getDockerAuthVolume } from "../../util" import { KubernetesProvider, KubernetesPluginContext, DEFAULT_KANIKO_IMAGE } from "../../config" -import { ConfigurationError } from "../../../../exceptions" +import { BuildError, ConfigurationError } from "../../../../exceptions" import { PodRunner } from "../../run" -import { getRegistryHostname, getKubernetesSystemVariables } from "../../init" +import { getRegistryHostname } from "../../init" import { Writable } from "stream" -import { getSystemNamespace } from "../../namespace" +import { getNamespaceStatus, getSystemNamespace } from "../../namespace" import { dedent } from "../../../../util/string" import { RunResult } from "../../../../types/plugin/base" import { PluginContext } from "../../../../plugin-context" -import { KubernetesPod } from "../../types" -import { BuildStatusHandler, skopeoBuildStatus, getSocatContainer } from "./common" -import { differenceBy } from "lodash" +import { KubernetesDeployment, KubernetesPod, KubernetesResource } from "../../types" +import { + BuildStatusHandler, + skopeoBuildStatus, + getSocatContainer, + BuildHandler, + utilRsyncPort, + syncToBuildSync, + ensureBuilderSecret, + commonSyncArgs, + builderToleration, + getUtilContainer, +} from "./common" +import { cloneDeep, differenceBy, isEmpty } from "lodash" +import chalk from "chalk" +import split2 from "split2" +import { LogLevel } from "../../../../logger/log-node" +import { renderOutputStream, sleep } from "../../../../util/util" +import { getDockerBuildFlags } from "../../../container/build" +import { containerHelpers } from "../../../container/helpers" +import { compareDeployedResources, waitForResources } from "../../status/status" + +export const DEFAULT_KANIKO_FLAGS = ["--cache=true"] + +const utilDeploymentName = "garden-util" +const sharedVolumeName = "comms" +const sharedMountPath = "/.garden" +const contextPath = sharedMountPath + "/context" + +const deployLock = new AsyncLock() export const getKanikoBuildStatus: BuildStatusHandler = async (params) => { const { ctx, module, log } = params @@ -37,11 +65,19 @@ export const getKanikoBuildStatus: BuildStatusHandler = async (params) => { const provider = k8sCtx.provider const api = await KubeApi.factory(log, ctx, provider) - const systemNamespace = await getSystemNamespace(ctx, provider, log) + const namespace = (await getNamespaceStatus({ log, ctx, provider })).namespaceName + + await ensureUtilDeployment({ + ctx, + provider, + log, + api, + namespace, + }) return skopeoBuildStatus({ - namespace: systemNamespace, - deploymentName: gardenUtilDaemonDeploymentName, + namespace, + deploymentName: utilDeploymentName, containerName: skopeoDaemonContainerName, log, api, @@ -51,7 +87,185 @@ export const getKanikoBuildStatus: BuildStatusHandler = async (params) => { }) } -export const DEFAULT_KANIKO_FLAGS = ["--cache=true"] +export const kanikoBuild: BuildHandler = async (params) => { + const { ctx, module, log } = params + const provider = ctx.provider + const api = await KubeApi.factory(log, ctx, provider) + + const projectNamespace = (await getNamespaceStatus({ log, ctx, provider })).namespaceName + + const localId = containerHelpers.getLocalImageId(module, module.version) + const deploymentImageId = containerHelpers.getDeploymentImageId( + module, + module.version, + provider.config.deploymentRegistry + ) + const dockerfile = module.spec.dockerfile || "Dockerfile" + + let { authSecret } = await ensureUtilDeployment({ + ctx, + provider, + log, + api, + namespace: projectNamespace, + }) + + await syncToBuildSync({ + ...params, + api, + namespace: projectNamespace, + deploymentName: utilDeploymentName, + rsyncPort: utilRsyncPort, + }) + + log.setState(`Building image ${localId}...`) + + let buildLog = "" + + // Stream debug log to a status line + const stdout = split2() + const statusLine = log.placeholder({ level: LogLevel.verbose }) + + stdout.on("error", () => {}) + stdout.on("data", (line: Buffer) => { + statusLine.setState(renderOutputStream(line.toString())) + }) + + // Use the project namespace if set to null in config + // TODO: change in 0.13 to default to project namespace + let kanikoNamespace = + provider.config.kaniko?.namespace === null ? projectNamespace : provider.config.kaniko?.namespace + + if (!kanikoNamespace) { + kanikoNamespace = await getSystemNamespace(ctx, provider, log) + } + + if (kanikoNamespace !== projectNamespace) { + // Make sure the Kaniko Pod namespace has the auth secret ready + const secretRes = await ensureBuilderSecret({ + provider, + log: log.placeholder(), + api, + namespace: kanikoNamespace, + }) + + authSecret = secretRes.authSecret + } + + // Execute the build + const args = [ + "--context", + "dir://" + contextPath, + "--dockerfile", + dockerfile, + "--destination", + deploymentImageId, + ...getKanikoFlags(module.spec.extraFlags, provider.config.kaniko?.extraFlags), + ] + + if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { + // The in-cluster registry is not exposed, so we don't configure TLS on it. + args.push("--insecure") + } + + args.push(...getDockerBuildFlags(module)) + + const buildRes = await runKaniko({ + ctx, + provider, + log, + kanikoNamespace, + utilNamespace: projectNamespace, + authSecretName: authSecret.metadata.name, + module, + args, + outputStream: stdout, + }) + + buildLog = buildRes.log + + if (kanikoBuildFailed(buildRes)) { + throw new BuildError(`Failed building module ${chalk.bold(module.name)}:\n\n${buildLog}`, { buildLog }) + } + + log.silly(buildLog) + + return { + buildLog, + fetched: false, + fresh: true, + version: module.version.versionString, + } +} + +/** + * Ensures that a garden-util deployment exists in the specified namespace. + * Returns the docker auth secret that's generated and mounted in the deployment. + */ +export async function ensureUtilDeployment({ + ctx, + provider, + log, + api, + namespace, +}: { + ctx: PluginContext + provider: KubernetesProvider + log: LogEntry + api: KubeApi + namespace: string +}) { + return deployLock.acquire(namespace, async () => { + const deployLog = log.placeholder() + + const { authSecret, updated: secretUpdated } = await ensureBuilderSecret({ + provider, + log, + api, + namespace, + }) + + // Check status of the util deployment + const { deployment, service } = getUtilManifests(provider, authSecret.metadata.name) + const status = await compareDeployedResources( + ctx as KubernetesPluginContext, + api, + namespace, + [deployment, service], + deployLog + ) + + if (status.state === "ready") { + // Need to wait a little to ensure the secret is updated in the deployment + if (secretUpdated) { + await sleep(5) + } + return { authSecret, updated: false } + } + + // Deploy the service + deployLog.setState( + chalk.gray(`-> Deploying ${utilDeploymentName} service in ${namespace} namespace (was ${status.state})`) + ) + + await api.upsert({ kind: "Deployment", namespace, log: deployLog, obj: deployment }) + await api.upsert({ kind: "Service", namespace, log: deployLog, obj: service }) + + await waitForResources({ + namespace, + ctx, + provider, + serviceName: "garden-util", + resources: [deployment, service], + log: deployLog, + timeoutSec: 600, + }) + + deployLog.setState({ append: true, msg: "Done!" }) + + return { authSecret, updated: true } + }) +} export const getKanikoFlags = (flags?: string[], topLevelFlags?: string[]): string[] => { if (!flags && !topLevelFlags) { @@ -82,17 +296,21 @@ export function kanikoBuildFailed(buildRes: RunResult) { interface RunKanikoParams { ctx: PluginContext provider: KubernetesProvider - namespace: string + kanikoNamespace: string + utilNamespace: string + authSecretName: string log: LogEntry module: ContainerModule args: string[] outputStream: Writable } -export async function runKaniko({ +async function runKaniko({ ctx, provider, - namespace, + kanikoNamespace, + utilNamespace, + authSecretName, log, module, args, @@ -100,29 +318,26 @@ export async function runKaniko({ }: RunKanikoParams): Promise { const api = await KubeApi.factory(log, ctx, provider) - const podName = makePodName("kaniko", namespace, module.name) + const podName = makePodName("kaniko", module.name) const registryHostname = getRegistryHostname(provider.config) - const k8sSystemVars = getKubernetesSystemVariables(provider.config) - const syncDataVolumeName = k8sSystemVars["sync-volume-name"] - const commsVolumeName = "comms" - const commsMountPath = "/.garden/comms" // Escape the args so that we can safely interpolate them into the kaniko command const argsStr = args.map((arg) => JSON.stringify(arg)).join(" ") let commandStr = dedent` - /kaniko/executor ${argsStr}; - export exitcode=$?; - touch ${commsMountPath}/done; - exit $exitcode; - ` + /kaniko/executor ${argsStr}; + export exitcode=$?; + touch ${sharedMountPath}/done; + exit $exitcode; + ` + if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { // This may seem kind of insane but we have to wait until the socat proxy is up (because Kaniko immediately tries to // reach the registry we plan on pushing to). See the support container in the Pod spec below for more on this // hackery. commandStr = dedent` while true; do - if ls ${commsMountPath}/socatStarted 2> /dev/null; then + if ls ${sharedMountPath}/socatStarted 2> /dev/null; then ${commandStr} else sleep 0.3; @@ -132,23 +347,58 @@ export async function runKaniko({ } const kanikoImage = provider.config.kaniko?.image || DEFAULT_KANIKO_IMAGE + const utilHostname = `${utilDeploymentName}.${utilNamespace}.svc.cluster.local` + const sourceUrl = `rsync://${utilHostname}:${utilRsyncPort}/volume/${ctx.workingCopyId}/${module.name}/` + + const syncArgs = [...commonSyncArgs, sourceUrl, contextPath] const spec: V1PodSpec = { shareProcessNamespace: true, volumes: [ - // Mount the build sync volume, to get the build context from. + // Mount the docker auth secret, so Kaniko can pull from private registries. { - name: syncDataVolumeName, - persistentVolumeClaim: { claimName: syncDataVolumeName }, + name: authSecretName, + secret: { + secretName: authSecretName, + items: [{ key: dockerAuthSecretKey, path: "config.json" }], + }, }, - // Mount the docker auth secret, so Kaniko can pull from private registries. - getDockerAuthVolume(), // Mount a volume to communicate between the containers in the Pod. { - name: commsVolumeName, + name: sharedVolumeName, emptyDir: {}, }, ], + // Start by rsyncing the build context from the util deployment + initContainers: [ + { + name: "init", + image: k8sUtilImageName, + command: [ + "/bin/sh", + "-c", + dedent` + echo "Copying from ${sourceUrl} to ${contextPath}" + mkdir -p ${contextPath} + n=0 + until [ "$n" -ge 30 ] + do + rsync ${syncArgs.join(" ")} && break + n=$((n+1)) + sleep 1 + done + echo "Done!" + `, + ], + imagePullPolicy: "IfNotPresent", + volumeMounts: [ + { + name: sharedVolumeName, + mountPath: sharedMountPath, + }, + ], + }, + ], containers: [ { name: "kaniko", @@ -156,17 +406,13 @@ export async function runKaniko({ command: ["sh", "-c", commandStr], volumeMounts: [ { - name: syncDataVolumeName, - mountPath: "/garden-build", - }, - { - name: dockerAuthSecretName, + name: authSecretName, mountPath: "/kaniko/.docker", readOnly: true, }, { - name: commsVolumeName, - mountPath: commsMountPath, + name: sharedVolumeName, + mountPath: sharedMountPath, }, ], resources: { @@ -198,14 +444,14 @@ export async function runKaniko({ dedent` while true; do if pidof socat 2> /dev/null; then - touch ${commsMountPath}/socatStarted; + touch ${sharedMountPath}/socatStarted; break; else sleep 0.3; fi done while true; do - if ls ${commsMountPath}/done 2> /dev/null; then + if ls ${sharedMountPath}/done 2> /dev/null; then killall socat; exit 0; else sleep 0.3; @@ -215,8 +461,8 @@ export async function runKaniko({ ], volumeMounts: [ { - name: commsVolumeName, - mountPath: commsMountPath, + name: sharedVolumeName, + mountPath: sharedMountPath, }, ], }, @@ -228,22 +474,27 @@ export async function runKaniko({ kind: "Pod", metadata: { name: podName, - namespace, + namespace: kanikoNamespace, }, spec, } + // Set the configured nodeSelector, if any + if (!isEmpty(provider.config.kaniko?.nodeSelector)) { + pod.spec.nodeSelector = provider.config.kaniko?.nodeSelector + } + const runner = new PodRunner({ ctx, api, pod, provider, - namespace, + namespace: kanikoNamespace, }) const result = await runner.runAndWait({ log, - remove: true, + remove: false, // TODO timeoutSec: module.spec.build.timeout, stdout: outputStream, tty: false, @@ -255,3 +506,88 @@ export async function runKaniko({ version: module.version.versionString, } } + +export function getUtilManifests(provider: KubernetesProvider, authSecretName: string) { + const deployment: KubernetesDeployment = { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { + labels: { + app: utilDeploymentName, + }, + name: utilDeploymentName, + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: utilDeploymentName, + }, + }, + template: { + metadata: { + labels: { + app: utilDeploymentName, + }, + }, + spec: { + containers: [getUtilContainer(authSecretName)], + volumes: [ + { + name: authSecretName, + secret: { + secretName: authSecretName, + items: [ + { + key: dockerAuthSecretKey, + path: "config.json", + }, + ], + }, + }, + { + name: buildSyncVolumeName, + emptyDir: {}, + }, + ], + tolerations: [builderToleration], + }, + }, + }, + } + + const service = cloneDeep(baseUtilService) + + // We need a proxy sidecar to be able to reach the in-cluster registry from the Pod + const registryHostname = getRegistryHostname(provider.config) + deployment.spec!.template.spec!.containers.push(getSocatContainer(registryHostname)) + + // Set the configured nodeSelector, if any + if (!isEmpty(provider.config.kaniko?.nodeSelector)) { + deployment.spec!.template.spec!.nodeSelector = provider.config.kaniko?.nodeSelector + } + + return { deployment, service } +} + +const baseUtilService: KubernetesResource = { + apiVersion: "v1", + kind: "Service", + metadata: { + name: utilDeploymentName, + }, + spec: { + ports: [ + { + name: "rsync", + protocol: "TCP", + port: utilRsyncPort, + targetPort: utilRsyncPort, + }, + ], + selector: { + app: utilDeploymentName, + }, + type: "ClusterIP", + }, +} diff --git a/core/src/plugins/kubernetes/init.ts b/core/src/plugins/kubernetes/init.ts index 53e8c06104..42c1b1feda 100644 --- a/core/src/plugins/kubernetes/init.ts +++ b/core/src/plugins/kubernetes/init.ts @@ -32,7 +32,7 @@ import { import { ConfigurationError } from "../../exceptions" import Bluebird from "bluebird" import { readSecret } from "./secrets" -import { dockerAuthSecretName, dockerAuthSecretKey } from "./constants" +import { systemDockerAuthSecretName, dockerAuthSecretKey } from "./constants" import { V1Secret, V1Toleration } from "@kubernetes/client-node" import { KubernetesResource } from "./types" import { compareDeployedResources } from "./status/status" @@ -504,7 +504,7 @@ export async function prepareDockerAuth( apiVersion: "v1", kind: "Secret", metadata: { - name: dockerAuthSecretName, + name: systemDockerAuthSecretName, namespace, }, data: { diff --git a/core/src/plugins/kubernetes/kubernetes.ts b/core/src/plugins/kubernetes/kubernetes.ts index afaaf8a0ae..69303fb5e5 100644 --- a/core/src/plugins/kubernetes/kubernetes.ts +++ b/core/src/plugins/kubernetes/kubernetes.ts @@ -72,10 +72,6 @@ export async function configureProvider({ // TODO: clean this up, this is getting confusing here if (buildMode !== "local-docker") { - if (buildMode !== "cluster-buildkit") { - config._systemServices.push("build-sync", "util") - } - const usingInClusterRegistry = !config.deploymentRegistry || config.deploymentRegistry.hostname === inClusterRegistryHostname @@ -92,20 +88,16 @@ export async function configureProvider({ config._systemServices.push("docker-registry", "registry-proxy") } - if (buildMode !== "cluster-buildkit" && (!usingInClusterRegistry || buildMode === "kaniko")) { - // If using an external registry and kaniko or cluster-docker, we need the util service - // Also the kaniko buildMode needs the util service even if using an in-cluster registry - config._systemServices.push("util") - } - if (buildMode === "cluster-docker") { - config._systemServices.push("docker-daemon") + config._systemServices.push("build-sync", "util", "docker-daemon") + + // Set up an NFS provisioner if the user doesn't explicitly set a storage class for the shared sync volume + if (!config.storage.sync.storageClass) { + config._systemServices.push("nfs-provisioner") + } } - // Set up an NFS provisioner if not using cluster-buildkit, and the user doesn't explicitly set a storage class for - // the shared sync volume if (buildMode !== "cluster-buildkit" && !config.storage.sync.storageClass) { - config._systemServices.push("nfs-provisioner") } } else if (config.name !== "local-kubernetes" && !config.deploymentRegistry) { throw new ConfigurationError(`kubernetes: must specify deploymentRegistry in config if using local build mode`, { diff --git a/core/src/plugins/kubernetes/util.ts b/core/src/plugins/kubernetes/util.ts index 67cf3ecb29..4009693b3b 100644 --- a/core/src/plugins/kubernetes/util.ts +++ b/core/src/plugins/kubernetes/util.ts @@ -17,7 +17,7 @@ import { KubernetesResource, KubernetesWorkload, KubernetesPod, KubernetesServer import { splitLast, serializeValues, findByName } from "../../util/util" import { KubeApi, KubernetesError } from "./api" import { gardenAnnotationKey, base64, deline, stableStringify } from "../../util/string" -import { MAX_CONFIGMAP_DATA_SIZE, dockerAuthSecretName, dockerAuthSecretKey } from "./constants" +import { MAX_CONFIGMAP_DATA_SIZE, systemDockerAuthSecretName } from "./constants" import { ContainerEnvVars } from "../container/config" import { ConfigurationError, PluginError } from "../../exceptions" import { ServiceResourceSpec, KubernetesProvider } from "./config" @@ -599,19 +599,6 @@ export function makePodName(type: string, ...parts: string[]) { return id.slice(0, maxPodNamePrefixLength) + "-" + hash.slice(0, podNameHashLength) } -/** - * Gets the Docker auth volume details to be mounted into a container. - */ -export function getDockerAuthVolume() { - return { - name: dockerAuthSecretName, - secret: { - secretName: dockerAuthSecretName, - items: [{ key: dockerAuthSecretKey, path: "config.json" }], - }, - } -} - /** * Creates a skopeo container configuration to be execued by a PodRunner. * @@ -624,7 +611,7 @@ export function getSkopeoContainer(command: string) { command: ["sh", "-c", command], volumeMounts: [ { - name: dockerAuthSecretName, + name: systemDockerAuthSecretName, mountPath: "/root/.docker", readOnly: true, }, diff --git a/core/test/data/test-projects/container/garden.yml b/core/test/data/test-projects/container/garden.yml index cf7e5e8e4a..04b2797e16 100644 --- a/core/test/data/test-projects/container/garden.yml +++ b/core/test/data/test-projects/container/garden.yml @@ -8,6 +8,7 @@ environments: - name: cluster-docker-auth - name: cluster-docker-remote-registry - name: kaniko + - name: kaniko-project-namespace - name: kaniko-image-override - name: kaniko-remote-registry - name: cluster-buildkit @@ -41,6 +42,11 @@ providers: - <<: *clusterDocker environments: [kaniko] buildMode: kaniko + - <<: *clusterDocker + environments: [kaniko-project-namespace] + buildMode: kaniko + kaniko: + namespace: null - <<: *clusterDocker environments: [kaniko-remote-registry] buildMode: kaniko diff --git a/core/test/integ/src/plugins/kubernetes/container/build/build.ts b/core/test/integ/src/plugins/kubernetes/container/build/build.ts index 388244d1c7..4fac18dbf6 100644 --- a/core/test/integ/src/plugins/kubernetes/container/build/build.ts +++ b/core/test/integ/src/plugins/kubernetes/container/build/build.ts @@ -24,6 +24,7 @@ import { getSystemNamespace } from "../../../../../../../src/plugins/kubernetes/ import { getDockerDaemonPodRunner } from "../../../../../../../src/plugins/kubernetes/container/build/cluster-docker" import { k8sPublishContainerModule } from "../../../../../../../src/plugins/kubernetes/container/publish" import { LogEntry } from "../../../../../../../src/logger/log-entry" +import { cloneDeep } from "lodash" describe("kubernetes build flow", () => { let garden: Garden @@ -53,7 +54,7 @@ describe("kubernetes build flow", () => { } async function buildImage(moduleName: string) { - const module = graph.getModule(moduleName) + const module = cloneDeep(graph.getModule(moduleName)) const key = `${currentEnv}.${module.name}.${module.version.versionString}` if (builtImages[key]) { @@ -112,7 +113,7 @@ describe("kubernetes build flow", () => { log, ctx, }) - buildImage[`${currentEnv}.${module.name}.${module.version.versionString}`] = false + builtImages[`${currentEnv}.${module.name}.${module.version.versionString}`] = false const status = await k8sGetContainerBuildStatus({ ctx, @@ -261,7 +262,7 @@ describe("kubernetes build flow", () => { }) it("should return ready=false status when image doesn't exist in registry", async () => { - const module = graph.getModule("remote-registry-test") + const module = cloneDeep(graph.getModule("remote-registry-test")) await garden.buildStaging.syncFromSrc(module, garden.log) module.version.versionString = "v-0000000000" @@ -403,6 +404,28 @@ describe("kubernetes build flow", () => { }) }) + grouped("kaniko", "remote-only").context("kaniko-project-namespace mode", () => { + before(async () => { + await init("kaniko-project-namespace") + }) + + it("should build a simple container", async () => { + await buildImage("simple-service") + }) + + it("should get the build status from the registry", async () => { + const module = await buildImage("simple-service") + + const status = await k8sGetContainerBuildStatus({ + ctx, + log, + module, + }) + + expect(status.ready).to.be.true + }) + }) + grouped("kaniko", "remote-only").context("kaniko-remote-registry mode", () => { before(async () => { await init("kaniko-remote-registry") @@ -425,7 +448,7 @@ describe("kubernetes build flow", () => { }) it("should return ready=false status when image doesn't exist in registry", async () => { - const module = graph.getModule("remote-registry-test") + const module = cloneDeep(graph.getModule("remote-registry-test")) await garden.buildStaging.syncFromSrc(module, garden.log) module.version.versionString = "v-0000000000" @@ -615,7 +638,7 @@ describe("kubernetes build flow", () => { }) it("should return ready=false status when image doesn't exist in registry", async () => { - const module = graph.getModule("remote-registry-test") + const module = cloneDeep(graph.getModule("remote-registry-test")) await garden.buildStaging.syncFromSrc(module, garden.log) module.version.versionString = "v-0000000000" diff --git a/core/test/integ/src/plugins/kubernetes/container/build/buildkit.ts b/core/test/integ/src/plugins/kubernetes/container/build/buildkit.ts index eb10958c48..e2059732a6 100644 --- a/core/test/integ/src/plugins/kubernetes/container/build/buildkit.ts +++ b/core/test/integ/src/plugins/kubernetes/container/build/buildkit.ts @@ -13,7 +13,6 @@ import { PluginContext } from "../../../../../../../src/plugin-context" import { ensureBuildkit, buildkitDeploymentName, - buildkitAuthSecretName, } from "../../../../../../../src/plugins/kubernetes/container/build/buildkit" import { KubeApi } from "../../../../../../../src/plugins/kubernetes/api" import { getNamespaceStatus } from "../../../../../../../src/plugins/kubernetes/namespace" @@ -53,7 +52,7 @@ describe("ensureBuildkit", () => { await api.apps.deleteNamespacedDeployment(buildkitDeploymentName, namespace) } catch {} - const deployed = await ensureBuildkit({ + const { updated } = await ensureBuildkit({ ctx, provider, log: garden.log, @@ -64,7 +63,7 @@ describe("ensureBuildkit", () => { // Make sure deployment is there const deployment = await api.apps.readNamespacedDeployment(buildkitDeploymentName, namespace) - expect(deployed).to.be.true + expect(updated).to.be.true expect(deployment.spec.template.spec?.tolerations).to.eql([ { key: "garden-build", @@ -99,21 +98,21 @@ describe("ensureBuildkit", () => { }) it("creates a docker auth secret from configured imagePullSecrets", async () => { - await ensureBuildkit({ + const { authSecret } = await ensureBuildkit({ ctx, provider, log: garden.log, api, namespace, }) - await api.core.readNamespacedSecret(buildkitAuthSecretName, namespace) + await api.core.readNamespacedSecret(authSecret.metadata.name, namespace) }) it("creates an empty docker auth secret if there are no imagePullSecrets", async () => { const _provider = cloneDeep(provider) _provider.config.imagePullSecrets = [] - await ensureBuildkit({ + const { authSecret } = await ensureBuildkit({ ctx, provider: _provider, log: garden.log, @@ -121,7 +120,7 @@ describe("ensureBuildkit", () => { namespace, }) - const secret = await api.core.readNamespacedSecret(buildkitAuthSecretName, namespace) + const secret = await api.core.readNamespacedSecret(authSecret.metadata.name, namespace) const expectedConfig = await buildDockerAuthConfig([], api) const decoded = JSON.parse(Buffer.from(secret.data![dockerAuthSecretKey], "base64").toString()) @@ -136,14 +135,14 @@ describe("ensureBuildkit", () => { api, namespace, }) - const deployed = await ensureBuildkit({ + const { updated } = await ensureBuildkit({ ctx, provider, log: garden.log, api, namespace, }) - expect(deployed).to.be.false + expect(updated).to.be.false }) }) @@ -179,14 +178,14 @@ describe("ensureBuildkit", () => { provider.config.clusterBuildkit = { rootless: true } - const deployed = await ensureBuildkit({ + const { updated } = await ensureBuildkit({ ctx, provider, log: garden.log, api, namespace, }) - expect(deployed).to.be.true + expect(updated).to.be.true }) }) }) diff --git a/docs/advanced/terraform.md b/docs/advanced/terraform.md index 3f7292453d..590e5af6fc 100644 --- a/docs/advanced/terraform.md +++ b/docs/advanced/terraform.md @@ -47,7 +47,7 @@ providers: kubeconfig: ${providers.terraform.outputs.kubeconfig_path} context: gke defaultHostname: terraform-gke-${local.username}.dev-2.sys.garden - buildMode: cluster-docker + buildMode: kaniko ``` The `initRoot` parameter tells Garden that there is a Terraform working directory at the specified path. If you don't specify this, Garden doesn't attempt to apply a stack when initializing the provider. diff --git a/docs/guides/cloud-provider-setup.md b/docs/guides/cloud-provider-setup.md index c19d5aaaa4..e31579ca16 100644 --- a/docs/guides/cloud-provider-setup.md +++ b/docs/guides/cloud-provider-setup.md @@ -44,7 +44,7 @@ name: your-project - name: kubernetes context: defaultHostname: your-project.yourdomain.com # <- replace this with your intended ingress hostname - buildMode: cluster-docker # <- (optional) enable in-cluster building + buildMode: kaniko # <- (optional) enable in-cluster building setupIngressController: nginx # <- skip this if you want to install your own ingress controller ``` @@ -98,7 +98,7 @@ environments: - name: kubernetes context: defaultHostname: your-project.yourdomain.com # <- replace this with your intended ingress hostname - buildMode: cluster-docker # <- (optional) enable in-cluster building + buildMode: kaniko # <- (optional) enable in-cluster building setupIngressController: nginx # <- skip this if you want to install your own ingress controller - name: some-other-environment ... @@ -133,7 +133,7 @@ environments: - name: kubernetes context: defaultHostname: your-project.yourdomain.com # <- replace this with your intended ingress hostname - buildMode: cluster-docker # <- (optional) enable in-cluster building + buildMode: kaniko # <- (optional) enable in-cluster building setupIngressController: nginx # <- skip this if you want to install your own ingress controller - name: some-other-environment ... @@ -172,7 +172,7 @@ environments: - name: kubernetes context: defaultHostname: your-project.yourdomain.com # <- replace this with your intended ingress hostname - buildMode: cluster-docker # <- (optional) enable in-cluster building + buildMode: kaniko # <- (optional) enable in-cluster building setupIngressController: nginx # <- skip this if you want to install your own ingress controller ... ``` diff --git a/docs/guides/in-cluster-building.md b/docs/guides/in-cluster-building.md index f678c42084..59dd3a31e6 100644 --- a/docs/guides/in-cluster-building.md +++ b/docs/guides/in-cluster-building.md @@ -9,6 +9,37 @@ to set it up. This guide assumes you've already read through the [Remote Kubernetes](./remote-kubernetes.md) guide. +## tl;dr + +If in doubt, use the following setup for builds: + +- [**`kaniko`**](#kaniko) build mode, which works well for most scenarios. +- Use the project namespace for build pods. +- [Connect a remote deployment registry](#Configuring-a-deployment-registry) to use for built images. _Note: You can also skip this and use the included in-cluster registry while testing, but be aware that you may hit scaling issues as you go._ + +Here's a basic configuration example: + +```yaml +kind: Project +name: my-project +... +providers: + - name: kubernetes + # Use the kaniko build mode + buildMode: kaniko + kaniko: + namespace: null # <--- use the project namespace for builds + # Recommended: Configure a remote registry + deploymentRegistry: + hostname: my-private-registry.com # <--- the hostname of your registry + namespace: my-project # <--- the namespace to use within your registry + imagePullSecrets: + - name: my-deployment-registry-secret # <--- the name and namespace of a valid Kubernetes imagePullSecret + namespace: default +``` + +The only tricky bit would be connecting the remote registry, so we suggest reading more about that [below](#Configuring-a-deployment-registry). + ## Security considerations First off, you should only use in-cluster building in development and testing clusters! Production clusters should not run the builder services for multiple reasons, both to do with resource and security concerns. @@ -23,7 +54,7 @@ The specific requirements vary by the [_build mode_](#build-modes) used, and whe In all cases you'll need at least 2GB of RAM _on top of your own service requirements_. More RAM is strongly recommended if you have many concurrent developers or CI builds. -For the [`cluster-docker`](#cluster-docker) and [`kaniko`](#kaniko) modes, and the (optional) in-cluster image registry, support for `PersistentVolumeClaim`s is required, with enough disk space for layer caches and built images. The in-cluster registry also requires support for `hostPort`, and for reaching `hostPort`s from the node/Kubelet. This should work out-of-the-box in most standard setups, but clusters using Cilium for networking may need to configure this specifically, for example. +For the [`cluster-docker`](#cluster-docker) mode, and the (optional) in-cluster image registry, support for `PersistentVolumeClaim`s is required, with enough disk space for layer caches and built images. The in-cluster registry also requires support for `hostPort`, and for reaching `hostPort`s from the node/Kubelet. This should work out-of-the-box in most standard setups, but clusters using Cilium for networking may need to configure this specifically, for example. You can—_and should_—adjust the allocated resources and storage in the provider configuration, under [resources](../reference/providers/kubernetes.md#providersresources) and @@ -36,8 +67,8 @@ We also strongly recommend a separate image registry to use for built images. Ga Garden supports multiple methods for building images and making them available to the cluster: -1. [**`kaniko`**](#kaniko) — Individual [Kaniko](https://github.com/GoogleContainerTools/kaniko) pods created for each build in the `garden-system` namespace. -2. [**`cluster-buildkit`**](#cluster-buildkit) _(experimental)_— A [BuildKit](https://github.com/moby/buildkit) deployment created for each project namespace. +1. [**`kaniko`**](#kaniko) — Individual [Kaniko](https://github.com/GoogleContainerTools/kaniko) pods created for each build. +2. [**`cluster-buildkit`**](#cluster-buildkit) — A [BuildKit](https://github.com/moby/buildkit) deployment created for each project namespace. 3. [**`cluster-docker`**](#cluster-docker) — A single Docker daemon installed in the `garden-system` namespace and shared between users/deployments. 4. `local-docker` — Build using the local Docker daemon on the developer/CI machine before pushing to the cluster/registry. @@ -47,9 +78,11 @@ The other modes—which are why you're reading this guide—all build your image The remote building options each have some pros and cons. You'll find more details below but **here are our general recommendations** at the moment: -- [**`kaniko`**](#kaniko) is a solid choice for most cases and is _currently our first recommendation_. It is battle-tested among Garden's most demanding users (including the Garden team itself). It also scales horizontally, since individual Pods are created for each build. -- [**`cluster-buildkit`**](#cluster-buildkit) is a new addition and is for now considered experimental, **but** we are hoping to make that the default in the future. Unlike the other options, which deploy cluster-wide services in the `garden-system` namespace, a [BuildKit](https://github.com/moby/buildkit) Deployment is dynamically created in each project namespace and requires no other cluster-wide services. This mode also offers a _rootless_ option, which runs without any elevated privileges, in clusters that support it. -- [**`cluster-docker`**](#cluster-docker) was the first implementation included with Garden. It's pretty quick and efficient for small team setups, but relies on a single Docker daemon for all users of a cluster, and also requires supporting services in `garden-system` and some operations to keep it from filling its data volume. It is *no longer recommended* and we may deprecate it in future releases. +- [**`kaniko`**](#kaniko) is a solid choice for most cases and is _currently our first recommendation_. It is battle-tested among Garden's most demanding users (including the Garden team itself). It also scales horizontally and elastically, since individual Pods are created for each build. It doesn't require priviliged containers to run and requires no shared cluster-wide services. +- [**`cluster-buildkit`**](#cluster-buildkit) is a new addition and is meant to replace the `cluster-docker` mode. Unlike the `cluster-docker` mode, which deploys cluster-wide services in the `garden-system` namespace, a [BuildKit](https://github.com/moby/buildkit) Deployment is dynamically created in each project namespace and much like Kaniko requires no other cluster-wide services. This mode also offers a _rootless_ option, which runs without any elevated privileges, in clusters that support it. +- [**`cluster-docker`**](#cluster-docker) was the first implementation included with Garden. It's pretty quick and efficient for small team setups, but relies on a single Docker daemon for all users of a cluster, and also requires supporting services in `garden-system` and some operations to keep it from filling its data volume. It is **no longer recommended** and we may remove it in future releases. + +Generally we recommend picking either `kaniko` or `cluster-buildkit`, based on your usage patterns and scalability requirements. For ephemeral namespaces, `kaniko` is generally the better option, since the persistent BuildKit deployment won't have a warm cache anyway. For long-lived namespaces, like the ones a developer uses while working, `cluster-buildkit` may be a more performant option. Let's look at how each mode works in more detail, and how you configure them: @@ -57,25 +90,23 @@ Let's look at how each mode works in more detail, and how you configure them: This mode uses an individual [Kaniko](https://github.com/GoogleContainerTools/kaniko) Pod for each image build. -The Kaniko project provides a compelling alternative to the standard Docker daemon because it can run without special privileges on the cluster, and is thus more secure. It may also scale better because it doesn't rely on a single daemon shared across users, so builds are executed in individual Pods and don't share the same resources of a single Pod. This also removes the need to provision another persistent volume, which the Docker daemon needs for its layer cache. +The Kaniko project provides a compelling alternative to a Docker daemon because it can run without special privileges on the cluster, and is thus more secure. It also scales better because it doesn't rely on a single daemon shared across multiple users and/or builds; builds are executed in individual Pods and thus scale horizontally and elastically. In this mode, builds are executed as follows: -1. Your code (build context) is synchronized to a sync service in the cluster, making it available to Kaniko pods. -2. A Kaniko pod is created for the build in the `garden-system` namespace. +1. Your code (build context) is synchronized to a sync service in the cluster, which holds a cache of the build context, so that each change can be uploaded quickly. +2. A Kaniko pod is created, which pulls the build context from the sync service, and performs the build. 3. Kaniko pulls caches from the [deployment registry](#configuring-a-deployment-registry), builds the image, and then pushes the built image back to the registry, which makes it available to the cluster. -#### Comparison - -The trade-off compared to the [`cluster-docker`](#cluster-docker) is generally in performance, partly because it relies only on the Docker registry to cache layers, and has no local cache. There are also some occasional issues and incompatibilities, so your mileage may vary. - -Compared to [`cluster-buildkit`](#cluster-buildkit), Kaniko may be a bit slower because it has no local cache. It also requires cluster-wide services to be installed and operated, and for each user to have access to those services in the `garden-system` namespace, which can be a problem in some environments. It is however currently considered more "battle-tested", since the [`cluster-buildkit`](#cluster-buildkit) mode is a recent addition. - #### Configuration and requirements -Enable this by setting `buildMode: kaniko` in your `kubernetes` provider configuration, and running `garden plugins kubernetes cluster-init --env=` to install required cluster-wide service. +{% hint style="info" %} +As of Garden v0.12.22, the `kaniko` build mode no longer requires shared system services or an NFS provisioner, nor running `cluster-init` ahead of usage. +{% endhint %} -By default, Garden will install an NFS volume provisioner into `garden-system` in order to be able to efficiently synchronize build sources to the cluster and then attaching those to the Kaniko pods. You can also [specify a storageClass](../reference/providers/kubernetes.md#providersstoragesyncstorageclass) to provide another _ReadWriteMany_ capable storage class to use instead of NFS. This may be advisable if your cloud provider provides a good alternative, or if you already have such a provisioner installed. +Enable this by setting `buildMode: kaniko` in your `kubernetes` provider configuration. + +_As of Garden v0.12.22, we also recommend setting `kaniko.namespace: null` in the `kubernetes` provider configuration, so that builder pods are started in the project namespace instead of the `garden-system` namespace, which is the current default. This will become the default in Garden v0.13._ Note the difference in how resources for the builder are allocated between Kaniko and the other modes. For this mode, the resource configuration applies to _each Kaniko pod_. See the [builder resources](../reference/providers/kubernetes.md#providersresourcesbuilder) reference for details. @@ -91,40 +122,38 @@ This does not appear to be an issue for GCR on GCP. We haven't tested this on ot You can provide extra arguments to Kaniko via the [`extraFlags`](../reference/providers/kubernetes.md#providerskanikoextraFlags) field. Users with projects with a large number of files should take a look at the `--snapshoteMode=redo` and `--use-new-run` options as these can provide [significant performance improvements](https://github.com/GoogleContainerTools/kaniko/releases/tag/v1.0.0). Please refer to the [official docs](https://github.com/GoogleContainerTools/kaniko#additional-flags) for the full list of available flags. +The Kaniko pods will always have the following toleration set: + +```yaml +key: "garden-build", +operator: "Equal", +value: "true", +effect: "NoSchedule" +``` + +This allows you to set corresponding [Taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) on cluster nodes to control which nodes builder deployments are deployed to. You can also configure a [`nodeSelector`](../reference/providers/kubernetes.md#providerskanikonodeSelector) to serve the same purpose. + ### cluster-buildkit With this mode, a [BuildKit](https://github.com/moby/buildkit) Deployment is dynamically created in each project namespace to perform in-cluster builds. +Much like [`kaniko`](#kaniko) (and unlike [`cluster-docker`](#cluster-docker)), this mode requires no cluster-wide services or permissions to be managed, and thus no permissions outside of a single namespace for each user/project. + In this mode, builds are executed as follows: 1. BuildKit is automatically deployed to the project namespace, if it hasn't already been deployed there. 2. Your code (build context) is synchronized directly to the BuildKit deployment. 3. BuildKit imports caches from the [deployment registry](#configuring-a-deployment-registry), builds the image, and then pushes the built image and caches back to the registry. -#### Comparison - -_This mode is a recent addition and is still considered experimental_. **However**, the general plan is for this to become the recommended approach, because it has several benefits compared to the alternatives. - -- It requires **no cluster-wide services or permissions** to be managed, and thus no permissions outside of a single namespace for each user/project. -- By extension, operators/users **don't need to run a cluster initialization command** ahead of building and deploying projects. The BuildKit deployment is automatically installed and updated ahead of builds, as needed. -- It **does not rely on persistent volumes**. Other modes need to either install an NFS provisioner, or for a ReadWriteMany storage class to be provided and configured by the user. -- BuildKit offers a [rootless](https://github.com/moby/buildkit/blob/master/docs/rootless.md) mode (see below for how to enable it and some caveats). If it's supported on your cluster, this coupled with the per-namespace isolation, makes `cluster-buildkit` by far the most secure option. -- BuildKit is a very efficient builder, and uses a combination of local and registry-based caching, so it **should perform better than [`kaniko`](#kaniko)** in most cases, and for long-running namespaces as good as [`cluster-docker`](#cluster-docker). - -Beyond being less tested in the wild (for the moment), there are a couple of drawbacks to consider: - -- It doesn't scale quite as horizontally as Kaniko, since there is a single deployment per each project namespace, instead of a pod for every single build. -- The local cache is ephemeral, and local to each project namespace. This means users only share a cache at the registry level, much like with Kaniko. The [`cluster-docker`](#cluster-docker) daemon has a persistent local cache that is shared across a cluster (but in turn needs to be maintained and [cleaned up](#cleaning-up-cached-images)). The effect of this is most pronounced for short-lived namespaces, e.g. ones created in CI runs, where the local cache won't exist ahead of the builds. - #### Configuration and requirements -Enable this mode by setting `buildMode: cluster-buildkit` in your `kubernetes` provider configuration. Unlike other remote building modes, no further cluster-wide installation or initialization is required. +Enable this mode by setting `buildMode: cluster-buildkit` in your `kubernetes` provider configuration. In order to enable [rootless](https://github.com/moby/buildkit/blob/master/docs/rootless.md) mode, add the following to your `kubernetes` provider configuration: ```yaml clusterBuildkit: - rootless: false + rootless: true ``` *Note that not all clusters can currently support rootless operation, and that you may need to configure your cluster with this in mind. Please see the [BuildKits docs](https://github.com/moby/buildkit/blob/master/docs/rootless.md) for details.* @@ -140,28 +169,22 @@ value: "true", effect: "NoSchedule" ``` -This allows you to set corresponding [Taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) on cluster nodes to control which nodes builder deployments are deployed to. +This allows you to set corresponding [Taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) on cluster nodes to control which nodes builder deployments are deployed to. You can also configure a [`nodeSelector`](../reference/providers/kubernetes.md#providersclusterbuildkitnodeselector) to serve the same purpose. ### cluster-docker -The `cluster-docker` mode installs a standalone Docker daemon into your cluster, that is then used for builds across all users of the clusters, along with a handful of other supporting services. - {% hint style="warning" %} -The `cluster-docker` build mode may be deprecated in an upcoming release. +The `cluster-docker` build mode may be deprecated and removed in an upcoming release. Please consider `kaniko` or `cluster-buildkit` instead. {% endhint %} +The `cluster-docker` mode installs a standalone Docker daemon into your cluster, that is then used for builds across all users of the clusters, along with a handful of other supporting services. + In this mode, builds are executed as follows: 1. Your code (build context) is synchronized to a sync service in the cluster, making it available to the Docker daemon. 2. A build is triggered in the Docker daemon. 3. The built image is pushed to the [deployment registry](#configuring-a-deployment-registry), which makes it available to the cluster. -#### Comparison - -The Docker daemon is of course tried and tested, and is an efficient builder. However, it's not designed with multi-tenancy and is a slightly awkward fit for the context of building images in a shared cluster. It also requires a fair bit of operation and several supporting services deployed along-side it in the `garden-system`  namespace. - -*As of now, we only recommend this option for certain scenarios, e.g. clusters serving individuals, small teams or other low-load setups.* - #### Configuration and requirements Enable this mode by setting `buildMode: cluster-docker` in your `kubernetes` provider configuration. @@ -174,7 +197,7 @@ Optionally, you can also enable [BuildKit](https://github.com/moby/buildkit) to ```yaml clusterDocker: - enableBuildKit: false + enableBuildKit: true ``` Make sure your cluster has enough resources and storage to support the required services, and keep in mind that these diff --git a/docs/guides/using-garden-in-ci.md b/docs/guides/using-garden-in-ci.md index 6d32f99da7..1dab64e9fa 100644 --- a/docs/guides/using-garden-in-ci.md +++ b/docs/guides/using-garden-in-ci.md @@ -50,7 +50,7 @@ providers: environments: [preview] context: my-preview-cluster defaultHostname: ${environment.namespace}.preview.my-domain - buildMode: cluster-docker + buildMode: kaniko ``` Notice that we're using the `CIRCLE_BRANCH` environment variable to label the project namespace. This ensures that each pull request gets deployed into its own namespace. diff --git a/docs/reference/providers/kubernetes.md b/docs/reference/providers/kubernetes.md index e1ca748ca3..4cfdf61a78 100644 --- a/docs/reference/providers/kubernetes.md +++ b/docs/reference/providers/kubernetes.md @@ -65,12 +65,25 @@ providers: # Configuration options for the `kaniko` build mode. kaniko: + # Specify extra flags to use when building the container image with kaniko. Flags set on `container` modules + # take precedence over these. + extraFlags: + # Change the kaniko image (repository/image:tag) to use when building in kaniko mode. - image: 'gcr.io/kaniko-project/executor:debug-v1.2.0' + image: 'gcr.io/kaniko-project/executor:v1.6.0-debug' - # Specify extra flags to use when building the container image with kaniko. Flags set on container module take - # precedence over these. - extraFlags: + # Choose the namespace where the Kaniko pods will be run. Set to `null` to use the project namespace. + # + # **IMPORTANT: The default namespace will change to the project namespace instead of the garden-system namespace + # in an upcoming release!** + namespace: garden-system + + # Exposes the `nodeSelector` field on the PodSpec of the Kaniko pods. This allows you to constrain the Kaniko + # pods to only run on particular nodes. + # + # [See here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) for the official Kubernetes + # guide to assigning Pods to nodes. + nodeSelector: # A default hostname to use when no hostname is explicitly configured for a service. defaultHostname: @@ -120,7 +133,7 @@ providers: requests: # CPU request in millicpu. - cpu: 200 + cpu: 100 # Memory request in megabytes. memory: 512 @@ -303,9 +316,9 @@ providers: # The registry where built containers should be pushed to, and then pulled to the cluster when deploying services. # - # Important: If you specify this in combination with `buildMode: cluster-docker` or `buildMode: kaniko`, you must - # make sure `imagePullSecrets` includes authentication with the specified deployment registry, that has the - # appropriate write privileges (usually full write access to the configured `deploymentRegistry.namespace`). + # Important: If you specify this in combination with in-cluster building, you must make sure `imagePullSecrets` + # includes authentication with the specified deployment registry, that has the appropriate write privileges + # (usually full write access to the configured `deploymentRegistry.namespace`). deploymentRegistry: # The hostname (and optionally port, if not the default port) of the registry. hostname: @@ -482,6 +495,16 @@ Configuration options for the `kaniko` build mode. | -------- | -------- | | `object` | No | +### `providers[].kaniko.extraFlags[]` + +[providers](#providers) > [kaniko](#providerskaniko) > extraFlags + +Specify extra flags to use when building the container image with kaniko. Flags set on `container` modules take precedence over these. + +| Type | Required | +| --------------- | -------- | +| `array[string]` | No | + ### `providers[].kaniko.image` [providers](#providers) > [kaniko](#providerskaniko) > image @@ -490,17 +513,31 @@ Change the kaniko image (repository/image:tag) to use when building in kaniko mo | Type | Default | Required | | -------- | ----------------------------------------------- | -------- | -| `string` | `"gcr.io/kaniko-project/executor:debug-v1.2.0"` | No | +| `string` | `"gcr.io/kaniko-project/executor:v1.6.0-debug"` | No | -### `providers[].kaniko.extraFlags[]` +### `providers[].kaniko.namespace` -[providers](#providers) > [kaniko](#providerskaniko) > extraFlags +[providers](#providers) > [kaniko](#providerskaniko) > namespace -Specify extra flags to use when building the container image with kaniko. Flags set on container module take precedence over these. +Choose the namespace where the Kaniko pods will be run. Set to `null` to use the project namespace. -| Type | Required | -| --------------- | -------- | -| `array[string]` | No | +**IMPORTANT: The default namespace will change to the project namespace instead of the garden-system namespace in an upcoming release!** + +| Type | Default | Required | +| -------- | ----------------- | -------- | +| `string` | `"garden-system"` | No | + +### `providers[].kaniko.nodeSelector` + +[providers](#providers) > [kaniko](#providerskaniko) > nodeSelector + +Exposes the `nodeSelector` field on the PodSpec of the Kaniko pods. This allows you to constrain the Kaniko pods to only run on particular nodes. + +[See here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) for the official Kubernetes guide to assigning Pods to nodes. + +| Type | Required | +| -------- | -------- | +| `object` | No | ### `providers[].defaultHostname` @@ -590,7 +627,7 @@ Resource requests and limits for the in-cluster builder, container registry and | Type | Default | Required | | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -| `object` | `{"builder":{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":200,"memory":512}},"registry":{"limits":{"cpu":2000,"memory":4096},"requests":{"cpu":200,"memory":512}},"sync":{"limits":{"cpu":500,"memory":512},"requests":{"cpu":100,"memory":90}}}` | No | +| `object` | `{"builder":{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":100,"memory":512}},"registry":{"limits":{"cpu":2000,"memory":4096},"requests":{"cpu":200,"memory":512}},"sync":{"limits":{"cpu":500,"memory":512},"requests":{"cpu":100,"memory":90}}}` | No | ### `providers[].resources.builder` @@ -606,7 +643,7 @@ When `buildMode` is `cluster-docker`, this applies to the single Docker Daemon t | Type | Default | Required | | -------- | --------------------------------------------------------------------------- | -------- | -| `object` | `{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":200,"memory":512}}` | No | +| `object` | `{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":100,"memory":512}}` | No | ### `providers[].resources.builder.limits` @@ -668,7 +705,7 @@ providers: | Type | Default | Required | | -------- | -------------------------- | -------- | -| `object` | `{"cpu":200,"memory":512}` | No | +| `object` | `{"cpu":100,"memory":512}` | No | ### `providers[].resources.builder.requests.cpu` @@ -678,7 +715,7 @@ CPU request in millicpu. | Type | Default | Required | | -------- | ------- | -------- | -| `number` | `200` | No | +| `number` | `100` | No | Example: @@ -690,7 +727,7 @@ providers: ... requests: ... - cpu: 200 + cpu: 100 ``` ### `providers[].resources.builder.requests.memory` @@ -1446,7 +1483,7 @@ providers: The registry where built containers should be pushed to, and then pulled to the cluster when deploying services. -Important: If you specify this in combination with `buildMode: cluster-docker` or `buildMode: kaniko`, you must make sure `imagePullSecrets` includes authentication with the specified deployment registry, that has the appropriate write privileges (usually full write access to the configured `deploymentRegistry.namespace`). +Important: If you specify this in combination with in-cluster building, you must make sure `imagePullSecrets` includes authentication with the specified deployment registry, that has the appropriate write privileges (usually full write access to the configured `deploymentRegistry.namespace`). | Type | Required | | -------- | -------- | diff --git a/docs/reference/providers/local-kubernetes.md b/docs/reference/providers/local-kubernetes.md index efa7958656..b3d68c443f 100644 --- a/docs/reference/providers/local-kubernetes.md +++ b/docs/reference/providers/local-kubernetes.md @@ -61,12 +61,25 @@ providers: # Configuration options for the `kaniko` build mode. kaniko: + # Specify extra flags to use when building the container image with kaniko. Flags set on `container` modules + # take precedence over these. + extraFlags: + # Change the kaniko image (repository/image:tag) to use when building in kaniko mode. - image: 'gcr.io/kaniko-project/executor:debug-v1.2.0' + image: 'gcr.io/kaniko-project/executor:v1.6.0-debug' - # Specify extra flags to use when building the container image with kaniko. Flags set on container module take - # precedence over these. - extraFlags: + # Choose the namespace where the Kaniko pods will be run. Set to `null` to use the project namespace. + # + # **IMPORTANT: The default namespace will change to the project namespace instead of the garden-system namespace + # in an upcoming release!** + namespace: garden-system + + # Exposes the `nodeSelector` field on the PodSpec of the Kaniko pods. This allows you to constrain the Kaniko + # pods to only run on particular nodes. + # + # [See here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) for the official Kubernetes + # guide to assigning Pods to nodes. + nodeSelector: # A default hostname to use when no hostname is explicitly configured for a service. defaultHostname: @@ -116,7 +129,7 @@ providers: requests: # CPU request in millicpu. - cpu: 200 + cpu: 100 # Memory request in megabytes. memory: 512 @@ -444,6 +457,16 @@ Configuration options for the `kaniko` build mode. | -------- | -------- | | `object` | No | +### `providers[].kaniko.extraFlags[]` + +[providers](#providers) > [kaniko](#providerskaniko) > extraFlags + +Specify extra flags to use when building the container image with kaniko. Flags set on `container` modules take precedence over these. + +| Type | Required | +| --------------- | -------- | +| `array[string]` | No | + ### `providers[].kaniko.image` [providers](#providers) > [kaniko](#providerskaniko) > image @@ -452,17 +475,31 @@ Change the kaniko image (repository/image:tag) to use when building in kaniko mo | Type | Default | Required | | -------- | ----------------------------------------------- | -------- | -| `string` | `"gcr.io/kaniko-project/executor:debug-v1.2.0"` | No | +| `string` | `"gcr.io/kaniko-project/executor:v1.6.0-debug"` | No | -### `providers[].kaniko.extraFlags[]` +### `providers[].kaniko.namespace` -[providers](#providers) > [kaniko](#providerskaniko) > extraFlags +[providers](#providers) > [kaniko](#providerskaniko) > namespace -Specify extra flags to use when building the container image with kaniko. Flags set on container module take precedence over these. +Choose the namespace where the Kaniko pods will be run. Set to `null` to use the project namespace. -| Type | Required | -| --------------- | -------- | -| `array[string]` | No | +**IMPORTANT: The default namespace will change to the project namespace instead of the garden-system namespace in an upcoming release!** + +| Type | Default | Required | +| -------- | ----------------- | -------- | +| `string` | `"garden-system"` | No | + +### `providers[].kaniko.nodeSelector` + +[providers](#providers) > [kaniko](#providerskaniko) > nodeSelector + +Exposes the `nodeSelector` field on the PodSpec of the Kaniko pods. This allows you to constrain the Kaniko pods to only run on particular nodes. + +[See here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) for the official Kubernetes guide to assigning Pods to nodes. + +| Type | Required | +| -------- | -------- | +| `object` | No | ### `providers[].defaultHostname` @@ -552,7 +589,7 @@ Resource requests and limits for the in-cluster builder, container registry and | Type | Default | Required | | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -| `object` | `{"builder":{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":200,"memory":512}},"registry":{"limits":{"cpu":2000,"memory":4096},"requests":{"cpu":200,"memory":512}},"sync":{"limits":{"cpu":500,"memory":512},"requests":{"cpu":100,"memory":90}}}` | No | +| `object` | `{"builder":{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":100,"memory":512}},"registry":{"limits":{"cpu":2000,"memory":4096},"requests":{"cpu":200,"memory":512}},"sync":{"limits":{"cpu":500,"memory":512},"requests":{"cpu":100,"memory":90}}}` | No | ### `providers[].resources.builder` @@ -568,7 +605,7 @@ When `buildMode` is `cluster-docker`, this applies to the single Docker Daemon t | Type | Default | Required | | -------- | --------------------------------------------------------------------------- | -------- | -| `object` | `{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":200,"memory":512}}` | No | +| `object` | `{"limits":{"cpu":4000,"memory":8192},"requests":{"cpu":100,"memory":512}}` | No | ### `providers[].resources.builder.limits` @@ -630,7 +667,7 @@ providers: | Type | Default | Required | | -------- | -------------------------- | -------- | -| `object` | `{"cpu":200,"memory":512}` | No | +| `object` | `{"cpu":100,"memory":512}` | No | ### `providers[].resources.builder.requests.cpu` @@ -640,7 +677,7 @@ CPU request in millicpu. | Type | Default | Required | | -------- | ------- | -------- | -| `number` | `200` | No | +| `number` | `100` | No | Example: @@ -652,7 +689,7 @@ providers: ... requests: ... - cpu: 200 + cpu: 100 ``` ### `providers[].resources.builder.requests.memory` diff --git a/examples/build-dependencies/garden.yml b/examples/build-dependencies/garden.yml index 1f823e419b..979d59ebc9 100644 --- a/examples/build-dependencies/garden.yml +++ b/examples/build-dependencies/garden.yml @@ -13,4 +13,4 @@ providers: # Replace these values as appropriate context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/demo-project/garden.yml b/examples/demo-project/garden.yml index 7077489800..31eb6174da 100644 --- a/examples/demo-project/garden.yml +++ b/examples/demo-project/garden.yml @@ -13,4 +13,4 @@ providers: context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 namespace: demo-project-testing-${local.env.USER || local.username} defaultHostname: ${local.env.USER || local.username}-demo-project.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko \ No newline at end of file diff --git a/examples/deployment-strategies/garden.yml b/examples/deployment-strategies/garden.yml index 48f37b2eb8..8f1568c55c 100644 --- a/examples/deployment-strategies/garden.yml +++ b/examples/deployment-strategies/garden.yml @@ -23,5 +23,5 @@ providers: environments: [testing] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: deployment-strategies-${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko deploymentStrategy: blue-green diff --git a/examples/disabled-configs/garden.yml b/examples/disabled-configs/garden.yml index 1469179441..fc06946c60 100644 --- a/examples/disabled-configs/garden.yml +++ b/examples/disabled-configs/garden.yml @@ -13,4 +13,4 @@ providers: # Replace these values as appropriate context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${local.username}-disabled-configs.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/hadolint/garden.yml b/examples/hadolint/garden.yml index cea414aa23..abae53c863 100644 --- a/examples/hadolint/garden.yml +++ b/examples/hadolint/garden.yml @@ -12,4 +12,4 @@ providers: environments: [testing] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/hot-reload-k8s/garden.yml b/examples/hot-reload-k8s/garden.yml index b8162924bd..250ea9a600 100644 --- a/examples/hot-reload-k8s/garden.yml +++ b/examples/hot-reload-k8s/garden.yml @@ -18,4 +18,4 @@ providers: context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 namespace: hot-reload-k8s-testing-${local.username} defaultHostname: ${var.default-hostname} - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/hot-reload-post-sync-command/garden.yml b/examples/hot-reload-post-sync-command/garden.yml index 5891509863..5ff4127d1b 100644 --- a/examples/hot-reload-post-sync-command/garden.yml +++ b/examples/hot-reload-post-sync-command/garden.yml @@ -11,4 +11,4 @@ providers: environments: [testing] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker \ No newline at end of file + buildMode: kaniko \ No newline at end of file diff --git a/examples/kaniko/garden.yml b/examples/kaniko/garden.yml index 5e8e289075..53910acd95 100644 --- a/examples/kaniko/garden.yml +++ b/examples/kaniko/garden.yml @@ -12,4 +12,6 @@ providers: environments: [remote] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: kaniko \ No newline at end of file + buildMode: kaniko + kaniko: + namespace: null \ No newline at end of file diff --git a/examples/openfaas/garden.yml b/examples/openfaas/garden.yml index f2987ebc7c..2d2f706180 100644 --- a/examples/openfaas/garden.yml +++ b/examples/openfaas/garden.yml @@ -11,5 +11,5 @@ providers: environments: [testing] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko - name: openfaas diff --git a/examples/remote-k8s/garden.yml b/examples/remote-k8s/garden.yml index 58a0c1f146..5b64de6efa 100644 --- a/examples/remote-k8s/garden.yml +++ b/examples/remote-k8s/garden.yml @@ -19,4 +19,4 @@ providers: secretRef: name: garden-example-tls namespace: default - buildMode: cluster-docker \ No newline at end of file + buildMode: kaniko \ No newline at end of file diff --git a/examples/remote-sources/garden.yml b/examples/remote-sources/garden.yml index 7f00890cfd..33db83a539 100644 --- a/examples/remote-sources/garden.yml +++ b/examples/remote-sources/garden.yml @@ -16,7 +16,7 @@ providers: environments: [testing] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko variables: postgres-database: postgres # Only use for testing! diff --git a/examples/tasks/garden.yml b/examples/tasks/garden.yml index cc00ef6fc9..1dda3aa192 100644 --- a/examples/tasks/garden.yml +++ b/examples/tasks/garden.yml @@ -11,4 +11,4 @@ providers: environments: [testing] context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 defaultHostname: ${environment.namespace}.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/templated-k8s-container/project.garden.yml b/examples/templated-k8s-container/project.garden.yml index 7c66592b63..61e6ba71a5 100644 --- a/examples/templated-k8s-container/project.garden.yml +++ b/examples/templated-k8s-container/project.garden.yml @@ -12,4 +12,4 @@ providers: context: gke_garden-dev-200012_europe-west1-b_garden-dev-1 namespace: templated-k8s-container-testing-${local.username} defaultHostname: ${local.username}-templated-k8s-container.dev-1.sys.garden - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/terraform-gke/garden.yml b/examples/terraform-gke/garden.yml index 86f77b1dbb..0b7141ba42 100644 --- a/examples/terraform-gke/garden.yml +++ b/examples/terraform-gke/garden.yml @@ -13,4 +13,4 @@ providers: kubeconfig: ${providers.terraform.outputs.kubeconfig_path} context: gke defaultHostname: terraform-gke-${local.username}.dev-2.sys.garden - buildMode: cluster-docker + buildMode: kaniko diff --git a/examples/vote/garden.yml b/examples/vote/garden.yml index 5beb82459c..60f4b1d396 100644 --- a/examples/vote/garden.yml +++ b/examples/vote/garden.yml @@ -19,7 +19,7 @@ providers: environments: [remote] # Replace these values as appropriate context: ${var.remoteContext} - buildMode: cluster-docker + buildMode: kaniko - name: kubernetes environments: [testing] context: ${var.remoteContext}