From c9d0a5838df47324e3ecb1e177beab78c21377c3 Mon Sep 17 00:00:00 2001 From: Jon Edvald Date: Tue, 19 Jan 2021 03:41:34 +0100 Subject: [PATCH] refactor(k8s): split container build module up --- .../commands/cleanup-cluster-registry.ts | 3 +- .../plugins/kubernetes/container/.gitignore | 1 + .../src/plugins/kubernetes/container/build.ts | 706 ------------------ .../kubernetes/container/build/build.ts | 196 +++++ .../container/build/cluster-docker.ts | 79 ++ .../kubernetes/container/build/common.ts | 129 ++++ .../kubernetes/container/build/kaniko.ts | 303 ++++++++ .../kubernetes/container/build/local.ts | 81 ++ .../plugins/kubernetes/container/handlers.ts | 2 +- core/src/plugins/openfaas/build.ts | 2 +- .../plugins/kubernetes/commands/pull-image.ts | 2 +- .../src/plugins/kubernetes/container/build.ts | 4 +- .../src/plugins/kubernetes/container/build.ts | 2 +- 13 files changed, 797 insertions(+), 713 deletions(-) create mode 100644 core/src/plugins/kubernetes/container/.gitignore delete mode 100644 core/src/plugins/kubernetes/container/build.ts create mode 100644 core/src/plugins/kubernetes/container/build/build.ts create mode 100644 core/src/plugins/kubernetes/container/build/cluster-docker.ts create mode 100644 core/src/plugins/kubernetes/container/build/common.ts create mode 100644 core/src/plugins/kubernetes/container/build/kaniko.ts create mode 100644 core/src/plugins/kubernetes/container/build/local.ts diff --git a/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts b/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts index 8cbcbcd0da..302b3bc584 100644 --- a/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts +++ b/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts @@ -25,11 +25,12 @@ import { apply } from "../kubectl" import { waitForResources } from "../status/status" import { execInWorkload } from "../container/exec" import { dedent, deline } from "../../../util/string" -import { buildSyncDeploymentName, getDockerDaemonPodRunner } from "../container/build" +import { buildSyncDeploymentName } from "../container/build/common" import { getDeploymentPod } from "../util" import { getSystemNamespace } from "../namespace" import { PluginContext } from "../../../plugin-context" import { PodRunner } from "../run" +import { getDockerDaemonPodRunner } from "../container/build/cluster-docker" const workspaceSyncDirTtl = 0.5 * 86400 // 2 days diff --git a/core/src/plugins/kubernetes/container/.gitignore b/core/src/plugins/kubernetes/container/.gitignore new file mode 100644 index 0000000000..684ae5d80c --- /dev/null +++ b/core/src/plugins/kubernetes/container/.gitignore @@ -0,0 +1 @@ +!build \ No newline at end of file diff --git a/core/src/plugins/kubernetes/container/build.ts b/core/src/plugins/kubernetes/container/build.ts deleted file mode 100644 index 8fd5cddf2e..0000000000 --- a/core/src/plugins/kubernetes/container/build.ts +++ /dev/null @@ -1,706 +0,0 @@ -/* - * Copyright (C) 2018-2020 Garden Technologies, Inc. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - */ - -import pRetry from "p-retry" -import split2 = require("split2") -import { differenceBy } from "lodash" -import { V1PodSpec } from "@kubernetes/client-node" -import { ContainerModule, ContainerRegistryConfig } from "../../container/config" -import { containerHelpers } from "../../container/helpers" -import { buildContainerModule, getContainerBuildStatus, getDockerBuildFlags } from "../../container/build" -import { GetBuildStatusParams, BuildStatus } from "../../../types/plugin/module/getBuildStatus" -import { BuildModuleParams, BuildResult } from "../../../types/plugin/module/build" -import { millicpuToString, megabytesToString, getDeploymentPod, makePodName } from "../util" -import { - RSYNC_PORT, - dockerAuthSecretName, - inClusterRegistryHostname, - dockerDaemonDeploymentName, - gardenUtilDaemonDeploymentName, - dockerDaemonContainerName, - skopeoDaemonContainerName, -} from "../constants" -import { posix, resolve } from "path" -import { KubeApi } from "../api" -import { LogEntry } from "../../../logger/log-entry" -import { getDockerAuthVolume } from "../util" -import { KubernetesProvider, ContainerBuildMode, KubernetesPluginContext, DEFAULT_KANIKO_IMAGE } from "../config" -import { InternalError, RuntimeError, BuildError, ConfigurationError } from "../../../exceptions" -import { PodRunner } from "../run" -import { getRegistryHostname, getKubernetesSystemVariables } from "../init" -import { normalizeLocalRsyncPath } from "../../../util/fs" -import { getPortForward } from "../port-forward" -import { Writable } from "stream" -import { LogLevel } from "../../../logger/log-node" -import { exec, renderOutputStream } from "../../../util/util" -import { loadImageToKind, getKindImageStatus } from "../local/kind" -import { getSystemNamespace } from "../namespace" -import { dedent } from "../../../util/string" -import chalk = require("chalk") -import { loadImageToMicrok8s, getMicrok8sImageStatus } from "../local/microk8s" -import { RunResult } from "../../../types/plugin/base" -import { ContainerProvider } from "../../container/container" -import { PluginContext } from "../../../plugin-context" -import { KubernetesPod } from "../types" - -const registryPort = 5000 - -export const buildSyncDeploymentName = "garden-build-sync" - -export async function k8sGetContainerBuildStatus(params: GetBuildStatusParams): Promise { - const { ctx, module } = params - const provider = ctx.provider - - const hasDockerfile = containerHelpers.hasDockerfile(module, module.version) - - if (!hasDockerfile) { - // Nothing to build - return { ready: true } - } - - const handler = buildStatusHandlers[provider.config.buildMode] - return handler(params) -} - -export async function k8sBuildContainer(params: BuildModuleParams): Promise { - const { ctx } = params - const provider = ctx.provider - const handler = buildHandlers[provider.config.buildMode] - return handler(params) -} - -type BuildStatusHandler = (params: GetBuildStatusParams) => Promise - -const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler } = { - "local-docker": async (params) => { - const { ctx, module, log } = params - const k8sCtx = ctx as KubernetesPluginContext - const config = k8sCtx.provider.config - const deploymentRegistry = config.deploymentRegistry - - if (deploymentRegistry) { - const args = await getManifestInspectArgs(module, deploymentRegistry) - const res = await containerHelpers.dockerCli({ - cwd: module.buildPath, - args, - log, - ctx, - ignoreError: true, - }) - - // Non-zero exit code can both mean the manifest is not found, and any other unexpected error - if (res.code !== 0 && !res.all.includes("no such manifest")) { - const detail = res.all || `docker manifest inspect exited with code ${res.code}` - log.warn(chalk.yellow(`Unable to query registry for image status: ${detail}`)) - } - - return { ready: res.code === 0 } - } else if (config.clusterType === "kind") { - const localId = containerHelpers.getLocalImageId(module, module.version) - return getKindImageStatus(config, localId, log) - } else if (config.clusterType === "microk8s") { - const localId = containerHelpers.getLocalImageId(module, module.version) - return getMicrok8sImageStatus(localId) - } else { - return getContainerBuildStatus({ ...params, ctx: { ...ctx, provider: ctx.provider.dependencies.container } }) - } - }, - - // TODO: make these handlers faster by running a simple in-cluster service - // that wraps https://github.com/containers/image - "cluster-docker": async (params) => { - const { ctx, module, log } = params - const k8sCtx = ctx as KubernetesPluginContext - const provider = k8sCtx.provider - const deploymentRegistry = provider.config.deploymentRegistry - const api = await KubeApi.factory(log, ctx, provider) - - if (!deploymentRegistry) { - // This is validated in the provider configure handler, so this is an internal error if it happens - throw new InternalError(`Expected configured deploymentRegistry for remote build`, { config: provider.config }) - } - - const args = await getManifestInspectArgs(module, deploymentRegistry) - const pushArgs = ["/bin/sh", "-c", "DOCKER_CLI_EXPERIMENTAL=enabled docker " + args.join(" ")] - - const systemNamespace = await getSystemNamespace(ctx, provider, log) - const runner = await getDockerDaemonPodRunner({ api, systemNamespace, ctx, provider }) - - try { - await runner.exec({ - log, - command: pushArgs, - timeoutSec: 300, - containerName: dockerDaemonContainerName, - }) - return { ready: true } - } catch (err) { - const res = err.detail?.result - - // Non-zero exit code can both mean the manifest is not found, and any other unexpected error - if (res.exitCode !== 0 && !res.stderr.includes("no such manifest")) { - const detail = res.all || `docker manifest inspect exited with code ${res.exitCode}` - log.warn(chalk.yellow(`Unable to query registry for image status: ${detail}`)) - } - - return { ready: false } - } - }, - - "kaniko": async (params) => { - const { ctx, module, log } = params - const k8sCtx = ctx as KubernetesPluginContext - const provider = k8sCtx.provider - const deploymentRegistry = provider.config.deploymentRegistry - - if (!deploymentRegistry) { - // This is validated in the provider configure handler, so this is an internal error if it happens - throw new InternalError(`Expected configured deploymentRegistry for remote build`, { config: provider.config }) - } - - const remoteId = containerHelpers.getDeploymentImageId(module, module.version, deploymentRegistry) - const inClusterRegistry = deploymentRegistry?.hostname === inClusterRegistryHostname - const skopeoCommand = ["skopeo", "--command-timeout=30s", "inspect", "--raw"] - if (inClusterRegistry) { - // The in-cluster registry is not exposed, so we don't configure TLS on it. - skopeoCommand.push("--tls-verify=false") - } - - skopeoCommand.push(`docker://${remoteId}`) - - const podCommand = ["sh", "-c", skopeoCommand.join(" ")] - const api = await KubeApi.factory(log, ctx, provider) - const systemNamespace = await getSystemNamespace(ctx, provider, log) - const runner = await getUtilDaemonPodRunner({ api, systemNamespace, ctx, provider }) - - try { - await runner.exec({ - log, - command: podCommand, - timeoutSec: 300, - containerName: skopeoDaemonContainerName, - }) - return { ready: true } - } catch (err) { - const res = err.detail?.result || {} - - // Non-zero exit code can both mean the manifest is not found, and any other unexpected error - if (res.exitCode !== 0 && !res.stderr.includes("manifest unknown")) { - const output = res.allLogs || err.message - - throw new RuntimeError(`Unable to query registry for image status: ${output}`, { - command: skopeoCommand, - output, - }) - } - return { ready: false } - } - }, -} - -type BuildHandler = (params: BuildModuleParams) => Promise - -const localBuild: BuildHandler = async (params) => { - const { ctx, module, log } = params - const provider = ctx.provider as KubernetesProvider - const containerProvider = provider.dependencies.container as ContainerProvider - const buildResult = await buildContainerModule({ ...params, ctx: { ...ctx, provider: containerProvider } }) - - if (!provider.config.deploymentRegistry) { - if (provider.config.clusterType === "kind") { - await loadImageToKind(buildResult, provider.config, log) - } else if (provider.config.clusterType === "microk8s") { - const imageId = containerHelpers.getLocalImageId(module, module.version) - await loadImageToMicrok8s({ module, imageId, log, ctx }) - } - return buildResult - } - - if (!containerHelpers.hasDockerfile(module, module.version)) { - return buildResult - } - - const localId = containerHelpers.getLocalImageId(module, module.version) - const remoteId = containerHelpers.getDeploymentImageId(module, module.version, ctx.provider.config.deploymentRegistry) - - log.setState({ msg: `Pushing image ${remoteId} to cluster...` }) - - await containerHelpers.dockerCli({ cwd: module.buildPath, args: ["tag", localId, remoteId], log, ctx }) - await containerHelpers.dockerCli({ cwd: module.buildPath, args: ["push", remoteId], log, ctx }) - - return buildResult -} - -const remoteBuild: BuildHandler = async (params) => { - const { ctx, module, log } = params - const provider = ctx.provider - const systemNamespace = await getSystemNamespace(ctx, provider, log) - const api = await KubeApi.factory(log, ctx, provider) - - if (!containerHelpers.hasDockerfile(module, module.version)) { - return {} - } - - const buildSyncPod = await getDeploymentPod({ - api, - deploymentName: buildSyncDeploymentName, - namespace: systemNamespace, - }) - // Sync the build context to the remote sync service - // -> Get a tunnel to the service - log.setState("Syncing sources to cluster...") - const syncFwd = await getPortForward({ - ctx, - log, - namespace: systemNamespace, - targetResource: `Pod/${buildSyncPod.metadata.name}`, - port: RSYNC_PORT, - }) - - // -> Run rsync - const buildRoot = resolve(module.buildPath, "..") - // The '/./' trick is used to automatically create the correct target directory with rsync: - // https://stackoverflow.com/questions/1636889/rsync-how-can-i-configure-it-to-create-target-directory-on-server - let src = normalizeLocalRsyncPath(`${buildRoot}`) + `/./${module.name}/` - const destination = `rsync://localhost:${syncFwd.localPort}/volume/${ctx.workingCopyId}/` - const syncArgs = [ - "--recursive", - "--relative", - // Copy symlinks (Note: These are sanitized while syncing to the build staging dir) - "--links", - // Preserve permissions - "--perms", - // Preserve modification times - "--times", - "--compress", - "--delete", - "--temp-dir", - "/tmp", - src, - destination, - ] - - log.debug(`Syncing from ${src} to ${destination}`) - // We retry a couple of times, because we may get intermittent connection issues or concurrency issues - await pRetry(() => exec("rsync", syncArgs), { - retries: 3, - minTimeout: 500, - }) - - const localId = containerHelpers.getLocalImageId(module, module.version) - const deploymentImageId = containerHelpers.getDeploymentImageId( - module, - module.version, - provider.config.deploymentRegistry - ) - const dockerfile = module.spec.dockerfile || "Dockerfile" - - // Because we're syncing to a shared volume, we need to scope by a unique ID - const contextPath = `/garden-build/${ctx.workingCopyId}/${module.name}/` - - log.setState(`Building image ${localId}...`) - - let buildLog = "" - - // Stream debug log to a status line - const stdout = split2() - const statusLine = log.placeholder({ level: LogLevel.verbose }) - - stdout.on("error", () => {}) - stdout.on("data", (line: Buffer) => { - statusLine.setState(renderOutputStream(line.toString())) - }) - if (provider.config.buildMode === "cluster-docker") { - // Prepare the build command - const dockerfilePath = posix.join(contextPath, dockerfile) - - let args = [ - "docker", - "build", - "-t", - deploymentImageId, - "-f", - dockerfilePath, - contextPath, - ...getDockerBuildFlags(module), - ] - - // Execute the build - const containerName = dockerDaemonContainerName - const buildTimeout = module.spec.build.timeout - - if (provider.config.clusterDocker && provider.config.clusterDocker.enableBuildKit) { - args = ["/bin/sh", "-c", "DOCKER_BUILDKIT=1 " + args.join(" ")] - } - - const runner = await getDockerDaemonPodRunner({ api, ctx, provider, systemNamespace }) - - const buildRes = await runner.exec({ - log, - command: args, - timeoutSec: buildTimeout, - containerName, - stdout, - }) - - buildLog = buildRes.log - - // Push the image to the registry - log.setState({ msg: `Pushing image ${localId} to registry...` }) - - const dockerCmd = ["docker", "push", deploymentImageId] - const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")] - - const pushRes = await runner.exec({ - log, - command: pushArgs, - timeoutSec: 300, - containerName, - stdout, - }) - - buildLog += pushRes.log - } else if (provider.config.buildMode === "kaniko") { - // build with Kaniko - const args = [ - "--context", - "dir://" + contextPath, - "--dockerfile", - dockerfile, - "--destination", - deploymentImageId, - ...getKanikoFlags(module.spec.extraFlags, provider.config.kaniko?.extraFlags), - ] - - if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { - // The in-cluster registry is not exposed, so we don't configure TLS on it. - args.push("--insecure") - } - - args.push(...getDockerBuildFlags(module)) - - // Execute the build - const buildRes = await runKaniko({ - ctx, - provider, - log, - namespace: systemNamespace, - module, - args, - outputStream: stdout, - }) - buildLog = buildRes.log - - if (kanikoBuildFailed(buildRes)) { - throw new BuildError(`Failed building module ${chalk.bold(module.name)}:\n\n${buildLog}`, { buildLog }) - } - } else { - throw new ConfigurationError("Uknown build mode", { buildMode: provider.config.buildMode }) - } - - log.silly(buildLog) - - return { - buildLog, - fetched: false, - fresh: true, - version: module.version.versionString, - } -} - -export async function getDockerDaemonPodRunner({ - api, - systemNamespace, - ctx, - provider, -}: { - api: KubeApi - systemNamespace: string - ctx: PluginContext - provider: KubernetesProvider -}) { - const pod = await getDeploymentPod({ api, deploymentName: dockerDaemonDeploymentName, namespace: systemNamespace }) - - return new PodRunner({ - api, - ctx, - provider, - namespace: systemNamespace, - pod, - }) -} - -export async function getUtilDaemonPodRunner({ - api, - systemNamespace, - ctx, - provider, -}: { - api: KubeApi - systemNamespace: string - ctx: PluginContext - provider: KubernetesProvider -}) { - const pod = await getDeploymentPod({ - api, - deploymentName: gardenUtilDaemonDeploymentName, - namespace: systemNamespace, - }) - - return new PodRunner({ - api, - ctx, - provider, - namespace: systemNamespace, - pod, - }) -} - -export const DEFAULT_KANIKO_FLAGS = ["--cache=true"] - -export const getKanikoFlags = (flags?: string[], topLevelFlags?: string[]): string[] => { - if (!flags && !topLevelFlags) { - return DEFAULT_KANIKO_FLAGS - } - const flagToKey = (flag: string) => { - const found = flag.match(/--([a-zA-Z]*)/) - if (found === null) { - throw new ConfigurationError(`Invalid format for a kaniko flag`, { flag }) - } - return found[0] - } - const defaultsToKeep = differenceBy(DEFAULT_KANIKO_FLAGS, flags || topLevelFlags || [], flagToKey) - const topLevelToKeep = differenceBy(topLevelFlags || [], flags || [], flagToKey) - return [...(flags || []), ...topLevelToKeep, ...defaultsToKeep] -} - -export function kanikoBuildFailed(buildRes: RunResult) { - return ( - !buildRes.success && - !( - buildRes.log.includes("error pushing image: ") && - buildRes.log.includes("cannot be overwritten because the repository is immutable.") - ) - ) -} - -const buildHandlers: { [mode in ContainerBuildMode]: BuildHandler } = { - "local-docker": localBuild, - "cluster-docker": remoteBuild, - "kaniko": remoteBuild, -} - -interface RunKanikoParams { - ctx: PluginContext - provider: KubernetesProvider - namespace: string - log: LogEntry - module: ContainerModule - args: string[] - outputStream: Writable -} - -async function runKaniko({ - ctx, - provider, - namespace, - log, - module, - args, - outputStream, -}: RunKanikoParams): Promise { - const api = await KubeApi.factory(log, ctx, provider) - - const podName = makePodName("kaniko", namespace, module.name) - const registryHostname = getRegistryHostname(provider.config) - const k8sSystemVars = getKubernetesSystemVariables(provider.config) - const syncDataVolumeName = k8sSystemVars["sync-volume-name"] - const commsVolumeName = "comms" - const commsMountPath = "/.garden/comms" - - // Escape the args so that we can safely interpolate them into the kaniko command - const argsStr = args.map((arg) => JSON.stringify(arg)).join(" ") - - let commandStr = dedent` - /kaniko/executor ${argsStr}; - export exitcode=$?; - touch ${commsMountPath}/done; - exit $exitcode; - ` - if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { - // This may seem kind of insane but we have to wait until the socat proxy is up (because Kaniko immediately tries to - // reach the registry we plan on pushing to). See the support container in the Pod spec below for more on this - // hackery. - commandStr = dedent` - while true; do - if ls ${commsMountPath}/socatStarted 2> /dev/null; then - ${commandStr} - else - sleep 0.3; - fi - done - ` - } - - const kanikoImage = provider.config.kaniko?.image || DEFAULT_KANIKO_IMAGE - - const spec: V1PodSpec = { - shareProcessNamespace: true, - volumes: [ - // Mount the build sync volume, to get the build context from. - { - name: syncDataVolumeName, - persistentVolumeClaim: { claimName: syncDataVolumeName }, - }, - // Mount the docker auth secret, so Kaniko can pull from private registries. - getDockerAuthVolume(), - // Mount a volume to communicate between the containers in the Pod. - { - name: commsVolumeName, - emptyDir: {}, - }, - ], - containers: [ - { - name: "kaniko", - image: kanikoImage, - command: ["sh", "-c", commandStr], - volumeMounts: [ - { - name: syncDataVolumeName, - mountPath: "/garden-build", - }, - { - name: dockerAuthSecretName, - mountPath: "/kaniko/.docker", - readOnly: true, - }, - { - name: commsVolumeName, - mountPath: commsMountPath, - }, - ], - resources: { - limits: { - cpu: millicpuToString(provider.config.resources.builder.limits.cpu), - memory: megabytesToString(provider.config.resources.builder.limits.memory), - }, - requests: { - cpu: millicpuToString(provider.config.resources.builder.requests.cpu), - memory: megabytesToString(provider.config.resources.builder.requests.memory), - }, - }, - }, - ], - } - - if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { - spec.containers = spec.containers.concat([ - getSocatContainer(registryHostname), - // This is a workaround so that the kaniko executor can wait until socat starts, and so that the socat proxy - // doesn't just keep running after the build finishes. Doing this in the kaniko Pod is currently not possible - // because of https://github.com/GoogleContainerTools/distroless/issues/225 - { - name: "support", - image: "busybox:1.31.1", - command: [ - "sh", - "-c", - dedent` - while true; do - if pidof socat 2> /dev/null; then - touch ${commsMountPath}/socatStarted; - break; - else - sleep 0.3; - fi - done - while true; do - if ls ${commsMountPath}/done 2> /dev/null; then - killall socat; exit 0; - else - sleep 0.3; - fi - done - `, - ], - volumeMounts: [ - { - name: commsVolumeName, - mountPath: commsMountPath, - }, - ], - }, - ]) - } - - const pod: KubernetesPod = { - apiVersion: "v1", - kind: "Pod", - metadata: { - name: podName, - namespace, - }, - spec, - } - - const runner = new PodRunner({ - ctx, - api, - pod, - provider, - namespace, - }) - - const result = await runner.runAndWait({ - log, - remove: true, - timeoutSec: module.spec.build.timeout, - stdout: outputStream, - tty: false, - }) - - return { - ...result, - moduleName: module.name, - version: module.version.versionString, - } -} - -async function getManifestInspectArgs(module: ContainerModule, deploymentRegistry: ContainerRegistryConfig) { - const remoteId = containerHelpers.getDeploymentImageId(module, module.version, deploymentRegistry) - - const dockerArgs = ["manifest", "inspect", remoteId] - if (isLocalHostname(deploymentRegistry.hostname)) { - dockerArgs.push("--insecure") - } - - return dockerArgs -} - -function isLocalHostname(hostname: string) { - return hostname === "localhost" || hostname.startsWith("127.") -} - -function getSocatContainer(registryHostname: string) { - return { - name: "proxy", - image: "gardendev/socat:0.1.0", - command: ["/bin/sh", "-c", `socat TCP-LISTEN:5000,fork TCP:${registryHostname}:5000 || exit 0`], - ports: [ - { - name: "proxy", - containerPort: registryPort, - protocol: "TCP", - }, - ], - readinessProbe: { - tcpSocket: { port: registryPort }, - }, - } -} diff --git a/core/src/plugins/kubernetes/container/build/build.ts b/core/src/plugins/kubernetes/container/build/build.ts new file mode 100644 index 0000000000..963ee852d6 --- /dev/null +++ b/core/src/plugins/kubernetes/container/build/build.ts @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2018-2020 Garden Technologies, Inc. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +import split2 = require("split2") +import { ContainerModule } from "../../../container/config" +import { containerHelpers } from "../../../container/helpers" +import { getDockerBuildFlags } from "../../../container/build" +import { GetBuildStatusParams, BuildStatus } from "../../../../types/plugin/module/getBuildStatus" +import { BuildModuleParams, BuildResult } from "../../../../types/plugin/module/build" +import { inClusterRegistryHostname, dockerDaemonContainerName } from "../../constants" +import { posix } from "path" +import { KubeApi } from "../../api" +import { KubernetesProvider, ContainerBuildMode } from "../../config" +import { BuildError, ConfigurationError } from "../../../../exceptions" +import { LogLevel } from "../../../../logger/log-node" +import { renderOutputStream } from "../../../../util/util" +import { getSystemNamespace } from "../../namespace" +import chalk = require("chalk") +import { getKanikoBuildStatus, runKaniko, kanikoBuildFailed, getKanikoFlags } from "./kaniko" +import { getClusterDockerBuildStatus, getDockerDaemonPodRunner } from "./cluster-docker" +import { getLocalBuildStatus, localBuild } from "./local" +import { BuildStatusHandler, BuildHandler, syncToSharedBuildSync } from "./common" + +export async function k8sGetContainerBuildStatus(params: GetBuildStatusParams): Promise { + const { ctx, module } = params + const provider = ctx.provider + + const hasDockerfile = containerHelpers.hasDockerfile(module, module.version) + + if (!hasDockerfile) { + // Nothing to build + return { ready: true } + } + + const handler = buildStatusHandlers[provider.config.buildMode] + return handler(params) +} + +export async function k8sBuildContainer(params: BuildModuleParams): Promise { + const { ctx, module } = params + + if (!containerHelpers.hasDockerfile(module, module.version)) { + return {} + } + + const provider = ctx.provider + const handler = buildHandlers[provider.config.buildMode] + + return handler(params) +} + +const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler } = { + "local-docker": getLocalBuildStatus, + // TODO: make these handlers faster by running a simple in-cluster service + // that wraps https://github.com/containers/image + "cluster-docker": getClusterDockerBuildStatus, + "kaniko": getKanikoBuildStatus, +} + +const remoteBuild: BuildHandler = async (params) => { + const { ctx, module, log } = params + const provider = ctx.provider + const systemNamespace = await getSystemNamespace(ctx, provider, log) + const api = await KubeApi.factory(log, ctx, provider) + + const localId = containerHelpers.getLocalImageId(module, module.version) + const deploymentImageId = containerHelpers.getDeploymentImageId( + module, + module.version, + provider.config.deploymentRegistry + ) + const dockerfile = module.spec.dockerfile || "Dockerfile" + + const { contextPath } = await syncToSharedBuildSync({ ...params, api, systemNamespace }) + + log.setState(`Building image ${localId}...`) + + let buildLog = "" + + // Stream debug log to a status line + const stdout = split2() + const statusLine = log.placeholder({ level: LogLevel.verbose }) + + stdout.on("error", () => {}) + stdout.on("data", (line: Buffer) => { + statusLine.setState(renderOutputStream(line.toString())) + }) + + if (provider.config.buildMode === "cluster-docker") { + // Prepare the build command + const dockerfilePath = posix.join(contextPath, dockerfile) + + let args = [ + "docker", + "build", + "-t", + deploymentImageId, + "-f", + dockerfilePath, + contextPath, + ...getDockerBuildFlags(module), + ] + + // Execute the build + const containerName = dockerDaemonContainerName + const buildTimeout = module.spec.build.timeout + + if (provider.config.clusterDocker && provider.config.clusterDocker.enableBuildKit) { + args = ["/bin/sh", "-c", "DOCKER_BUILDKIT=1 " + args.join(" ")] + } + + const runner = await getDockerDaemonPodRunner({ api, ctx, provider, systemNamespace }) + + const buildRes = await runner.exec({ + log, + command: args, + timeoutSec: buildTimeout, + containerName, + stdout, + }) + + buildLog = buildRes.log + + // Push the image to the registry + log.setState({ msg: `Pushing image ${localId} to registry...` }) + + const dockerCmd = ["docker", "push", deploymentImageId] + const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")] + + const pushRes = await runner.exec({ + log, + command: pushArgs, + timeoutSec: 300, + containerName, + stdout, + }) + + buildLog += pushRes.log + } else if (provider.config.buildMode === "kaniko") { + // build with Kaniko + const args = [ + "--context", + "dir://" + contextPath, + "--dockerfile", + dockerfile, + "--destination", + deploymentImageId, + ...getKanikoFlags(module.spec.extraFlags, provider.config.kaniko?.extraFlags), + ] + + if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { + // The in-cluster registry is not exposed, so we don't configure TLS on it. + args.push("--insecure") + } + + args.push(...getDockerBuildFlags(module)) + + // Execute the build + const buildRes = await runKaniko({ + ctx, + provider, + log, + namespace: systemNamespace, + module, + args, + outputStream: stdout, + }) + buildLog = buildRes.log + + if (kanikoBuildFailed(buildRes)) { + throw new BuildError(`Failed building module ${chalk.bold(module.name)}:\n\n${buildLog}`, { buildLog }) + } + } else { + throw new ConfigurationError("Uknown build mode", { buildMode: provider.config.buildMode }) + } + + log.silly(buildLog) + + return { + buildLog, + fetched: false, + fresh: true, + version: module.version.versionString, + } +} + +const buildHandlers: { [mode in ContainerBuildMode]: BuildHandler } = { + "local-docker": localBuild, + "cluster-docker": remoteBuild, + "kaniko": remoteBuild, +} diff --git a/core/src/plugins/kubernetes/container/build/cluster-docker.ts b/core/src/plugins/kubernetes/container/build/cluster-docker.ts new file mode 100644 index 0000000000..77749ed781 --- /dev/null +++ b/core/src/plugins/kubernetes/container/build/cluster-docker.ts @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2018-2020 Garden Technologies, Inc. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +import { getDeploymentPod } from "../../util" +import { dockerDaemonDeploymentName, dockerDaemonContainerName } from "../../constants" +import { KubeApi } from "../../api" +import { KubernetesProvider, KubernetesPluginContext } from "../../config" +import { InternalError } from "../../../../exceptions" +import { PodRunner } from "../../run" +import { getSystemNamespace } from "../../namespace" +import chalk = require("chalk") +import { PluginContext } from "../../../../plugin-context" +import { BuildStatusHandler, getManifestInspectArgs } from "./common" + +export const getClusterDockerBuildStatus: BuildStatusHandler = async (params) => { + const { ctx, module, log } = params + const k8sCtx = ctx as KubernetesPluginContext + const provider = k8sCtx.provider + const deploymentRegistry = provider.config.deploymentRegistry + const api = await KubeApi.factory(log, ctx, provider) + + if (!deploymentRegistry) { + // This is validated in the provider configure handler, so this is an internal error if it happens + throw new InternalError(`Expected configured deploymentRegistry for remote build`, { config: provider.config }) + } + + const args = await getManifestInspectArgs(module, deploymentRegistry) + const pushArgs = ["/bin/sh", "-c", "DOCKER_CLI_EXPERIMENTAL=enabled docker " + args.join(" ")] + + const systemNamespace = await getSystemNamespace(ctx, provider, log) + const runner = await getDockerDaemonPodRunner({ api, systemNamespace, ctx, provider }) + + try { + await runner.exec({ + log, + command: pushArgs, + timeoutSec: 300, + containerName: dockerDaemonContainerName, + }) + return { ready: true } + } catch (err) { + const res = err.detail?.result + + // Non-zero exit code can both mean the manifest is not found, and any other unexpected error + if (res.exitCode !== 0 && !res.stderr.includes("no such manifest")) { + const detail = res.all || `docker manifest inspect exited with code ${res.exitCode}` + log.warn(chalk.yellow(`Unable to query registry for image status: ${detail}`)) + } + + return { ready: false } + } +} + +export async function getDockerDaemonPodRunner({ + api, + systemNamespace, + ctx, + provider, +}: { + api: KubeApi + systemNamespace: string + ctx: PluginContext + provider: KubernetesProvider +}) { + const pod = await getDeploymentPod({ api, deploymentName: dockerDaemonDeploymentName, namespace: systemNamespace }) + + return new PodRunner({ + api, + ctx, + provider, + namespace: systemNamespace, + pod, + }) +} diff --git a/core/src/plugins/kubernetes/container/build/common.ts b/core/src/plugins/kubernetes/container/build/common.ts new file mode 100644 index 0000000000..22022b7941 --- /dev/null +++ b/core/src/plugins/kubernetes/container/build/common.ts @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2018-2020 Garden Technologies, Inc. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +import pRetry from "p-retry" +import { ContainerModule, ContainerRegistryConfig } from "../../../container/config" +import { containerHelpers } from "../../../container/helpers" +import { GetBuildStatusParams, BuildStatus } from "../../../../types/plugin/module/getBuildStatus" +import { BuildModuleParams, BuildResult } from "../../../../types/plugin/module/build" +import { getDeploymentPod } from "../../util" +import { gardenUtilDaemonDeploymentName, RSYNC_PORT } from "../../constants" +import { KubeApi } from "../../api" +import { KubernetesProvider } from "../../config" +import { PodRunner } from "../../run" +import { PluginContext } from "../../../../plugin-context" +import { resolve } from "path" +import { getPortForward } from "../../port-forward" +import { normalizeLocalRsyncPath } from "../../../../util/fs" +import { exec } from "../../../../util/util" + +export const buildSyncDeploymentName = "garden-build-sync" + +export type BuildStatusHandler = (params: GetBuildStatusParams) => Promise +export type BuildHandler = (params: BuildModuleParams) => Promise + +interface SyncToSharedBuildSyncParams extends BuildModuleParams { + api: KubeApi + systemNamespace: string +} + +export async function syncToSharedBuildSync(params: SyncToSharedBuildSyncParams) { + const { ctx, module, log, api, systemNamespace } = params + + const buildSyncPod = await getDeploymentPod({ + api, + deploymentName: buildSyncDeploymentName, + namespace: systemNamespace, + }) + // Sync the build context to the remote sync service + // -> Get a tunnel to the service + log.setState("Syncing sources to cluster...") + const syncFwd = await getPortForward({ + ctx, + log, + namespace: systemNamespace, + targetResource: `Pod/${buildSyncPod.metadata.name}`, + port: RSYNC_PORT, + }) + + // -> Run rsync + const buildRoot = resolve(module.buildPath, "..") + // The '/./' trick is used to automatically create the correct target directory with rsync: + // https://stackoverflow.com/questions/1636889/rsync-how-can-i-configure-it-to-create-target-directory-on-server + let src = normalizeLocalRsyncPath(`${buildRoot}`) + `/./${module.name}/` + const destination = `rsync://localhost:${syncFwd.localPort}/volume/${ctx.workingCopyId}/` + const syncArgs = [ + "--recursive", + "--relative", + // Copy symlinks (Note: These are sanitized while syncing to the build staging dir) + "--links", + // Preserve permissions + "--perms", + // Preserve modification times + "--times", + "--compress", + "--delete", + "--temp-dir", + "/tmp", + src, + destination, + ] + + log.debug(`Syncing from ${src} to ${destination}`) + // We retry a couple of times, because we may get intermittent connection issues or concurrency issues + await pRetry(() => exec("rsync", syncArgs), { + retries: 3, + minTimeout: 500, + }) + + // Because we're syncing to a shared volume, we need to scope by a unique ID + const contextPath = `/garden-build/${ctx.workingCopyId}/${module.name}/` + + return { contextPath } +} + +export async function getUtilDaemonPodRunner({ + api, + systemNamespace, + ctx, + provider, +}: { + api: KubeApi + systemNamespace: string + ctx: PluginContext + provider: KubernetesProvider +}) { + const pod = await getDeploymentPod({ + api, + deploymentName: gardenUtilDaemonDeploymentName, + namespace: systemNamespace, + }) + + return new PodRunner({ + api, + ctx, + provider, + namespace: systemNamespace, + pod, + }) +} + +export async function getManifestInspectArgs(module: ContainerModule, deploymentRegistry: ContainerRegistryConfig) { + const remoteId = containerHelpers.getDeploymentImageId(module, module.version, deploymentRegistry) + + const dockerArgs = ["manifest", "inspect", remoteId] + if (isLocalHostname(deploymentRegistry.hostname)) { + dockerArgs.push("--insecure") + } + + return dockerArgs +} + +function isLocalHostname(hostname: string) { + return hostname === "localhost" || hostname.startsWith("127.") +} diff --git a/core/src/plugins/kubernetes/container/build/kaniko.ts b/core/src/plugins/kubernetes/container/build/kaniko.ts new file mode 100644 index 0000000000..9d3014f81c --- /dev/null +++ b/core/src/plugins/kubernetes/container/build/kaniko.ts @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2018-2020 Garden Technologies, Inc. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +import { V1PodSpec } from "@kubernetes/client-node" +import { ContainerModule } from "../../../container/config" +import { containerHelpers } from "../../../container/helpers" +import { millicpuToString, megabytesToString, makePodName } from "../../util" +import { dockerAuthSecretName, inClusterRegistryHostname, skopeoDaemonContainerName } from "../../constants" +import { KubeApi } from "../../api" +import { LogEntry } from "../../../../logger/log-entry" +import { getDockerAuthVolume } from "../../util" +import { KubernetesProvider, KubernetesPluginContext, DEFAULT_KANIKO_IMAGE } from "../../config" +import { InternalError, RuntimeError, ConfigurationError } from "../../../../exceptions" +import { PodRunner } from "../../run" +import { getRegistryHostname, getKubernetesSystemVariables } from "../../init" +import { Writable } from "stream" +import { getSystemNamespace } from "../../namespace" +import { dedent } from "../../../../util/string" +import { RunResult } from "../../../../types/plugin/base" +import { PluginContext } from "../../../../plugin-context" +import { KubernetesPod } from "../../types" +import { getUtilDaemonPodRunner, BuildStatusHandler } from "./common" +import { differenceBy } from "lodash" + +const registryPort = 5000 + +export const getKanikoBuildStatus: BuildStatusHandler = async (params) => { + const { ctx, module, log } = params + const k8sCtx = ctx as KubernetesPluginContext + const provider = k8sCtx.provider + const deploymentRegistry = provider.config.deploymentRegistry + + if (!deploymentRegistry) { + // This is validated in the provider configure handler, so this is an internal error if it happens + throw new InternalError(`Expected configured deploymentRegistry for remote build`, { config: provider.config }) + } + + const remoteId = containerHelpers.getDeploymentImageId(module, module.version, deploymentRegistry) + const inClusterRegistry = deploymentRegistry?.hostname === inClusterRegistryHostname + const skopeoCommand = ["skopeo", "--command-timeout=30s", "inspect", "--raw"] + if (inClusterRegistry) { + // The in-cluster registry is not exposed, so we don't configure TLS on it. + skopeoCommand.push("--tls-verify=false") + } + + skopeoCommand.push(`docker://${remoteId}`) + + const podCommand = ["sh", "-c", skopeoCommand.join(" ")] + const api = await KubeApi.factory(log, ctx, provider) + const systemNamespace = await getSystemNamespace(ctx, provider, log) + const runner = await getUtilDaemonPodRunner({ api, systemNamespace, ctx, provider }) + + try { + await runner.exec({ + log, + command: podCommand, + timeoutSec: 300, + containerName: skopeoDaemonContainerName, + }) + return { ready: true } + } catch (err) { + const res = err.detail?.result || {} + + // Non-zero exit code can both mean the manifest is not found, and any other unexpected error + if (res.exitCode !== 0 && !res.stderr.includes("manifest unknown")) { + const output = res.allLogs || err.message + + throw new RuntimeError(`Unable to query registry for image status: ${output}`, { + command: skopeoCommand, + output, + }) + } + return { ready: false } + } +} + +export const DEFAULT_KANIKO_FLAGS = ["--cache=true"] + +export const getKanikoFlags = (flags?: string[], topLevelFlags?: string[]): string[] => { + if (!flags && !topLevelFlags) { + return DEFAULT_KANIKO_FLAGS + } + const flagToKey = (flag: string) => { + const found = flag.match(/--([a-zA-Z]*)/) + if (found === null) { + throw new ConfigurationError(`Invalid format for a kaniko flag`, { flag }) + } + return found[0] + } + const defaultsToKeep = differenceBy(DEFAULT_KANIKO_FLAGS, flags || topLevelFlags || [], flagToKey) + const topLevelToKeep = differenceBy(topLevelFlags || [], flags || [], flagToKey) + return [...(flags || []), ...topLevelToKeep, ...defaultsToKeep] +} + +export function kanikoBuildFailed(buildRes: RunResult) { + return ( + !buildRes.success && + !( + buildRes.log.includes("error pushing image: ") && + buildRes.log.includes("cannot be overwritten because the repository is immutable.") + ) + ) +} + +interface RunKanikoParams { + ctx: PluginContext + provider: KubernetesProvider + namespace: string + log: LogEntry + module: ContainerModule + args: string[] + outputStream: Writable +} + +export async function runKaniko({ + ctx, + provider, + namespace, + log, + module, + args, + outputStream, +}: RunKanikoParams): Promise { + const api = await KubeApi.factory(log, ctx, provider) + + const podName = makePodName("kaniko", namespace, module.name) + const registryHostname = getRegistryHostname(provider.config) + const k8sSystemVars = getKubernetesSystemVariables(provider.config) + const syncDataVolumeName = k8sSystemVars["sync-volume-name"] + const commsVolumeName = "comms" + const commsMountPath = "/.garden/comms" + + // Escape the args so that we can safely interpolate them into the kaniko command + const argsStr = args.map((arg) => JSON.stringify(arg)).join(" ") + + let commandStr = dedent` + /kaniko/executor ${argsStr}; + export exitcode=$?; + touch ${commsMountPath}/done; + exit $exitcode; + ` + if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { + // This may seem kind of insane but we have to wait until the socat proxy is up (because Kaniko immediately tries to + // reach the registry we plan on pushing to). See the support container in the Pod spec below for more on this + // hackery. + commandStr = dedent` + while true; do + if ls ${commsMountPath}/socatStarted 2> /dev/null; then + ${commandStr} + else + sleep 0.3; + fi + done + ` + } + + const kanikoImage = provider.config.kaniko?.image || DEFAULT_KANIKO_IMAGE + + const spec: V1PodSpec = { + shareProcessNamespace: true, + volumes: [ + // Mount the build sync volume, to get the build context from. + { + name: syncDataVolumeName, + persistentVolumeClaim: { claimName: syncDataVolumeName }, + }, + // Mount the docker auth secret, so Kaniko can pull from private registries. + getDockerAuthVolume(), + // Mount a volume to communicate between the containers in the Pod. + { + name: commsVolumeName, + emptyDir: {}, + }, + ], + containers: [ + { + name: "kaniko", + image: kanikoImage, + command: ["sh", "-c", commandStr], + volumeMounts: [ + { + name: syncDataVolumeName, + mountPath: "/garden-build", + }, + { + name: dockerAuthSecretName, + mountPath: "/kaniko/.docker", + readOnly: true, + }, + { + name: commsVolumeName, + mountPath: commsMountPath, + }, + ], + resources: { + limits: { + cpu: millicpuToString(provider.config.resources.builder.limits.cpu), + memory: megabytesToString(provider.config.resources.builder.limits.memory), + }, + requests: { + cpu: millicpuToString(provider.config.resources.builder.requests.cpu), + memory: megabytesToString(provider.config.resources.builder.requests.memory), + }, + }, + }, + ], + } + + if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) { + spec.containers = spec.containers.concat([ + getSocatContainer(registryHostname), + // This is a workaround so that the kaniko executor can wait until socat starts, and so that the socat proxy + // doesn't just keep running after the build finishes. Doing this in the kaniko Pod is currently not possible + // because of https://github.com/GoogleContainerTools/distroless/issues/225 + { + name: "support", + image: "busybox:1.31.1", + command: [ + "sh", + "-c", + dedent` + while true; do + if pidof socat 2> /dev/null; then + touch ${commsMountPath}/socatStarted; + break; + else + sleep 0.3; + fi + done + while true; do + if ls ${commsMountPath}/done 2> /dev/null; then + killall socat; exit 0; + else + sleep 0.3; + fi + done + `, + ], + volumeMounts: [ + { + name: commsVolumeName, + mountPath: commsMountPath, + }, + ], + }, + ]) + } + + const pod: KubernetesPod = { + apiVersion: "v1", + kind: "Pod", + metadata: { + name: podName, + namespace, + }, + spec, + } + + const runner = new PodRunner({ + ctx, + api, + pod, + provider, + namespace, + }) + + const result = await runner.runAndWait({ + log, + remove: true, + timeoutSec: module.spec.build.timeout, + stdout: outputStream, + tty: false, + }) + + return { + ...result, + moduleName: module.name, + version: module.version.versionString, + } +} + +function getSocatContainer(registryHostname: string) { + return { + name: "proxy", + image: "gardendev/socat:0.1.0", + command: ["/bin/sh", "-c", `socat TCP-LISTEN:5000,fork TCP:${registryHostname}:5000 || exit 0`], + ports: [ + { + name: "proxy", + containerPort: registryPort, + protocol: "TCP", + }, + ], + readinessProbe: { + tcpSocket: { port: registryPort }, + }, + } +} diff --git a/core/src/plugins/kubernetes/container/build/local.ts b/core/src/plugins/kubernetes/container/build/local.ts new file mode 100644 index 0000000000..0fc0dfc984 --- /dev/null +++ b/core/src/plugins/kubernetes/container/build/local.ts @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2018-2020 Garden Technologies, Inc. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +import { containerHelpers } from "../../../container/helpers" +import { buildContainerModule, getContainerBuildStatus } from "../../../container/build" +import { KubernetesProvider, KubernetesPluginContext } from "../../config" +import { loadImageToKind, getKindImageStatus } from "../../local/kind" +import chalk = require("chalk") +import { loadImageToMicrok8s, getMicrok8sImageStatus } from "../../local/microk8s" +import { ContainerProvider } from "../../../container/container" +import { BuildHandler, BuildStatusHandler, getManifestInspectArgs } from "./common" + +export const getLocalBuildStatus: BuildStatusHandler = async (params) => { + const { ctx, module, log } = params + const k8sCtx = ctx as KubernetesPluginContext + const config = k8sCtx.provider.config + const deploymentRegistry = config.deploymentRegistry + + if (deploymentRegistry) { + const args = await getManifestInspectArgs(module, deploymentRegistry) + const res = await containerHelpers.dockerCli({ + cwd: module.buildPath, + args, + log, + ctx, + ignoreError: true, + }) + + // Non-zero exit code can both mean the manifest is not found, and any other unexpected error + if (res.code !== 0 && !res.all.includes("no such manifest")) { + const detail = res.all || `docker manifest inspect exited with code ${res.code}` + log.warn(chalk.yellow(`Unable to query registry for image status: ${detail}`)) + } + + return { ready: res.code === 0 } + } else if (config.clusterType === "kind") { + const localId = containerHelpers.getLocalImageId(module, module.version) + return getKindImageStatus(config, localId, log) + } else if (k8sCtx.provider.config.clusterType === "microk8s") { + const localId = containerHelpers.getLocalImageId(module, module.version) + return getMicrok8sImageStatus(localId) + } else { + return getContainerBuildStatus({ ...params, ctx: { ...ctx, provider: ctx.provider.dependencies.container } }) + } +} + +export const localBuild: BuildHandler = async (params) => { + const { ctx, module, log } = params + const provider = ctx.provider as KubernetesProvider + const containerProvider = provider.dependencies.container as ContainerProvider + const buildResult = await buildContainerModule({ ...params, ctx: { ...ctx, provider: containerProvider } }) + + if (!provider.config.deploymentRegistry) { + if (provider.config.clusterType === "kind") { + await loadImageToKind(buildResult, provider.config, log) + } else if (provider.config.clusterType === "microk8s") { + const imageId = containerHelpers.getLocalImageId(module, module.version) + await loadImageToMicrok8s({ module, imageId, log, ctx }) + } + return buildResult + } + + if (!containerHelpers.hasDockerfile(module, module.version)) { + return buildResult + } + + const localId = containerHelpers.getLocalImageId(module, module.version) + const remoteId = containerHelpers.getDeploymentImageId(module, module.version, ctx.provider.config.deploymentRegistry) + + log.setState({ msg: `Pushing image ${remoteId} to cluster...` }) + + await containerHelpers.dockerCli({ cwd: module.buildPath, args: ["tag", localId, remoteId], log, ctx }) + await containerHelpers.dockerCli({ cwd: module.buildPath, args: ["push", remoteId], log, ctx }) + + return buildResult +} diff --git a/core/src/plugins/kubernetes/container/handlers.ts b/core/src/plugins/kubernetes/container/handlers.ts index cfed0bf4f8..b6e74279fd 100644 --- a/core/src/plugins/kubernetes/container/handlers.ts +++ b/core/src/plugins/kubernetes/container/handlers.ts @@ -20,7 +20,7 @@ import { getContainerServiceStatus } from "./status" import { getTestResult } from "../test-results" import { ContainerModule } from "../../container/config" import { getTaskResult } from "../task-results" -import { k8sBuildContainer, k8sGetContainerBuildStatus } from "./build" +import { k8sBuildContainer, k8sGetContainerBuildStatus } from "./build/build" import { k8sPublishContainerModule } from "./publish" import { getPortForwardHandler } from "../port-forward" import { GetModuleOutputsParams } from "../../../types/plugin/module/getModuleOutputs" diff --git a/core/src/plugins/openfaas/build.ts b/core/src/plugins/openfaas/build.ts index a45139bf2a..afc02608e8 100644 --- a/core/src/plugins/openfaas/build.ts +++ b/core/src/plugins/openfaas/build.ts @@ -12,7 +12,7 @@ import { KubernetesProvider } from "../kubernetes/config" import { dumpYaml } from "../../util/util" import { BuildModuleParams } from "../../types/plugin/module/build" import { containerHelpers } from "../container/helpers" -import { k8sBuildContainer, k8sGetContainerBuildStatus } from "../kubernetes/container/build" +import { k8sBuildContainer, k8sGetContainerBuildStatus } from "../kubernetes/container/build/build" import { GetBuildStatusParams } from "../../types/plugin/module/getBuildStatus" import { OpenFaasModule, getContainerModule, OpenFaasProvider } from "./config" import { LogEntry } from "../../logger/log-entry" diff --git a/core/test/integ/src/plugins/kubernetes/commands/pull-image.ts b/core/test/integ/src/plugins/kubernetes/commands/pull-image.ts index 1dae79d1a9..3a15aea64d 100644 --- a/core/test/integ/src/plugins/kubernetes/commands/pull-image.ts +++ b/core/test/integ/src/plugins/kubernetes/commands/pull-image.ts @@ -10,7 +10,7 @@ import { pullModule } from "../../../../../../src/plugins/kubernetes/commands/pu import { Garden } from "../../../../../../src/garden" import { ConfigGraph } from "../../../../../../src/config-graph" import { getContainerTestGarden } from "../container/container" -import { k8sBuildContainer } from "../../../../../../src/plugins/kubernetes/container/build" +import { k8sBuildContainer } from "../../../../../../src/plugins/kubernetes/container/build/build" import { PluginContext } from "../../../../../../src/plugin-context" import { KubernetesProvider, KubernetesPluginContext } from "../../../../../../src/plugins/kubernetes/config" import { GardenModule } from "../../../../../../src/types/module" diff --git a/core/test/integ/src/plugins/kubernetes/container/build.ts b/core/test/integ/src/plugins/kubernetes/container/build.ts index 4ecd3f1739..4a6729836d 100644 --- a/core/test/integ/src/plugins/kubernetes/container/build.ts +++ b/core/test/integ/src/plugins/kubernetes/container/build.ts @@ -12,8 +12,7 @@ import { ConfigGraph } from "../../../../../../src/config-graph" import { k8sBuildContainer, k8sGetContainerBuildStatus, - getDockerDaemonPodRunner, -} from "../../../../../../src/plugins/kubernetes/container/build" +} from "../../../../../../src/plugins/kubernetes/container/build/build" import { PluginContext } from "../../../../../../src/plugin-context" import { KubernetesProvider } from "../../../../../../src/plugins/kubernetes/config" import { expect } from "chai" @@ -22,6 +21,7 @@ import { containerHelpers } from "../../../../../../src/plugins/container/helper import { dockerDaemonContainerName } from "../../../../../../src/plugins/kubernetes/constants" import { KubeApi } from "../../../../../../src/plugins/kubernetes/api" import { getSystemNamespace } from "../../../../../../src/plugins/kubernetes/namespace" +import { getDockerDaemonPodRunner } from "../../../../../../src/plugins/kubernetes/container/build/cluster-docker" describe("kubernetes build flow", () => { let garden: Garden diff --git a/core/test/unit/src/plugins/kubernetes/container/build.ts b/core/test/unit/src/plugins/kubernetes/container/build.ts index 4effab8482..9874dbddc8 100644 --- a/core/test/unit/src/plugins/kubernetes/container/build.ts +++ b/core/test/unit/src/plugins/kubernetes/container/build.ts @@ -10,7 +10,7 @@ import { kanikoBuildFailed, getKanikoFlags, DEFAULT_KANIKO_FLAGS, -} from "../../../../../../src/plugins/kubernetes/container/build" +} from "../../../../../../src/plugins/kubernetes/container/build/kaniko" import { expect } from "chai" describe("kaniko build", () => {