Skip to content

Commit

Permalink
improvement(k8s): get rid of NFS when using kaniko build mode
Browse files Browse the repository at this point in the history
Closes #1798
  • Loading branch information
edvald committed May 4, 2021
1 parent d716c9a commit b6eb151
Show file tree
Hide file tree
Showing 39 changed files with 943 additions and 494 deletions.
2 changes: 1 addition & 1 deletion core/src/plugins/container/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ export const containerRegistryConfigSchema = () =>
}).description(dedent`
The registry where built containers should be pushed to, and then pulled to the cluster when deploying services.
Important: If you specify this in combination with \`buildMode: cluster-docker\` or \`buildMode: kaniko\`, you must make sure \`imagePullSecrets\` includes authentication with the specified deployment registry, that has the appropriate write privileges (usually full write access to the configured \`deploymentRegistry.namespace\`).
Important: If you specify this in combination with in-cluster building, you must make sure \`imagePullSecrets\` includes authentication with the specified deployment registry, that has the appropriate write privileges (usually full write access to the configured \`deploymentRegistry.namespace\`).
`)

export interface ContainerService extends GardenService<ContainerModule> {}
Expand Down
23 changes: 16 additions & 7 deletions core/src/plugins/kubernetes/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import {
Exec,
Attach,
V1Deployment,
V1Service,
} from "@kubernetes/client-node"
import AsyncLock = require("async-lock")
import request = require("request-promise")
Expand Down Expand Up @@ -103,6 +104,14 @@ const apiTypes: { [key: string]: K8sApiConstructor<any> } = {
}

const crudMap = {
Deployment: {
cls: new V1Deployment(),
group: "apps",
read: "readNamespacedDeployment",
create: "createNamespacedDeployment",
replace: "replaceNamespacedDeployment",
delete: "deleteNamespacedDeployment",
},
Secret: {
cls: new V1Secret(),
group: "core",
Expand All @@ -111,13 +120,13 @@ const crudMap = {
replace: "replaceNamespacedSecret",
delete: "deleteNamespacedSecret",
},
Deployment: {
cls: new V1Deployment(),
group: "apps",
read: "readNamespacedDeployment",
create: "createNamespacedDeployment",
replace: "replaceNamespacedDeployment",
delete: "deleteNamespacedDeployment",
Service: {
cls: new V1Service(),
group: "core",
read: "readNamespacedService",
create: "createNamespacedService",
replace: "replaceNamespacedService",
delete: "deleteNamespacedService",
},
}

Expand Down
2 changes: 1 addition & 1 deletion core/src/plugins/kubernetes/commands/cluster-init.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ export const clusterInit: PluginCommand = {
ctx: k8sCtx,
log,
namespace: systemNamespace,
args: ["delete", "--purge", "garden-nfs-provisioner"],
args: ["uninstall", "garden-nfs-provisioner"],
})
} catch (_) {}

Expand Down
13 changes: 9 additions & 4 deletions core/src/plugins/kubernetes/commands/pull-image.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,17 @@ import { LogEntry } from "../../../logger/log-entry"
import { containerHelpers } from "../../container/helpers"
import { RuntimeError } from "../../../exceptions"
import { PodRunner } from "../run"
import { dockerAuthSecretKey, dockerAuthSecretName, inClusterRegistryHostname, k8sUtilImageName } from "../constants"
import {
dockerAuthSecretKey,
systemDockerAuthSecretName,
inClusterRegistryHostname,
k8sUtilImageName,
} from "../constants"
import { getAppNamespace, getSystemNamespace } from "../namespace"
import { getRegistryPortForward } from "../container/util"
import { randomString } from "../../../util/string"
import { buildkitAuthSecretName, ensureBuilderSecret } from "../container/build/buildkit"
import { PluginContext } from "../../../plugin-context"
import { dockerAuthSecretName, ensureBuilderSecret } from "../container/build/common"

const tmpTarPath = "/tmp/image.tar"

Expand Down Expand Up @@ -149,7 +154,7 @@ async function pullFromExternalRegistry(

if (buildMode === "cluster-buildkit") {
namespace = await getAppNamespace(ctx, log, ctx.provider)
authSecretName = buildkitAuthSecretName
authSecretName = dockerAuthSecretName

await ensureBuilderSecret({
provider: ctx.provider,
Expand All @@ -160,7 +165,7 @@ async function pullFromExternalRegistry(
})
} else {
namespace = await getSystemNamespace(ctx, ctx.provider, log)
authSecretName = dockerAuthSecretName
authSecretName = systemDockerAuthSecretName
}

const imageId = containerHelpers.getDeploymentImageId(module, module.version, ctx.provider.config.deploymentRegistry)
Expand Down
23 changes: 19 additions & 4 deletions core/src/plugins/kubernetes/commands/uninstall-garden-services.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import { PluginCommand } from "../../../types/plugin/command"
import { getKubernetesSystemVariables } from "../init"
import { KubernetesPluginContext } from "../config"
import { getSystemGarden } from "../system"
import { getSystemNamespace } from "../namespace"
import { helm } from "../helm/helm-cli"

export const uninstallGardenServices: PluginCommand = {
name: "uninstall-garden-services",
Expand All @@ -36,10 +38,23 @@ export const uninstallGardenServices: PluginCommand = {
const serviceNames = services.map((s) => s.name).filter((name) => name !== "nfs-provisioner")
const serviceStatuses = await actions.deleteServices(log, serviceNames)

if (k8sCtx.provider.config._systemServices.includes("nfs-provisioner")) {
const service = graph.getService("nfs-provisioner")
await actions.deleteService({ service, log })
}
const systemNamespace = await getSystemNamespace(ctx, k8sCtx.provider, log)
try {
await helm({
ctx: k8sCtx,
log,
namespace: systemNamespace,
args: ["uninstall", "garden-nfs-provisioner"],
})
} catch (_) {}
try {
await helm({
ctx: k8sCtx,
log,
namespace: systemNamespace,
args: ["uninstall", "garden-nfs-provisioner-v2"],
})
} catch (_) {}

log.info("")

Expand Down
35 changes: 27 additions & 8 deletions core/src/plugins/kubernetes/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import { ArtifactSpec } from "../../config/validation"
import { V1Toleration } from "@kubernetes/client-node"
import { runPodSpecWhitelist } from "./run"

export const DEFAULT_KANIKO_IMAGE = "gcr.io/kaniko-project/executor:debug-v1.2.0"
export const DEFAULT_KANIKO_IMAGE = "gcr.io/kaniko-project/executor:v1.6.0-debug"
export interface ProviderSecretRef {
name: string
namespace: string
Expand Down Expand Up @@ -116,6 +116,8 @@ export interface KubernetesConfig extends GenericProviderConfig {
kaniko?: {
image?: string
extraFlags?: string[]
namespace?: string | null
nodeSelector?: StringMap
}
context: string
defaultHostname?: string
Expand Down Expand Up @@ -150,7 +152,7 @@ export const defaultResources: KubernetesResources = {
memory: 8192,
},
requests: {
cpu: 200,
cpu: 100,
memory: 512,
},
},
Expand Down Expand Up @@ -362,17 +364,34 @@ export const kubernetesConfigBase = () =>
kaniko: joi
.object()
.keys({
extraFlags: joi
.array()
.items(joi.string())
.description(
`Specify extra flags to use when building the container image with kaniko. Flags set on \`container\` modules take precedence over these.`
),
image: joi
.string()
.default(DEFAULT_KANIKO_IMAGE)
.description(`Change the kaniko image (repository/image:tag) to use when building in kaniko mode.`),
namespace: joi
.string()
.allow(null)
.default(defaultSystemNamespace)
.description(
deline`
Change the kaniko image (repository/image:tag) to use when building in kaniko mode.
`
dedent`
Choose the namespace where the Kaniko pods will be run. Set to \`null\` to use the project namespace.
**IMPORTANT: The default namespace will change to the project namespace instead of the garden-system namespace in an upcoming release!**
`
),
extraFlags: joi.array().items(joi.string()).description(deline`
Specify extra flags to use when building the container image with kaniko.
Flags set on container module take precedence over these.`),
nodeSelector: joiStringMap(joi.string()).description(
dedent`
Exposes the \`nodeSelector\` field on the PodSpec of the Kaniko pods. This allows you to constrain the Kaniko pods to only run on particular nodes.
[See here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) for the official Kubernetes guide to assigning Pods to nodes.
`
),
})
.default(() => {})
.description("Configuration options for the `kaniko` build mode."),
Expand Down
2 changes: 1 addition & 1 deletion core/src/plugins/kubernetes/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ export const MAX_CONFIGMAP_DATA_SIZE = 1024 * 1024 // max ConfigMap data size is
// the outputs field, so we cap at 250kB.
export const MAX_RUN_RESULT_LOG_LENGTH = 250 * 1024

export const dockerAuthSecretName = "builder-docker-config"
export const systemDockerAuthSecretName = "builder-docker-config"
export const dockerAuthSecretKey = ".dockerconfigjson"
export const inClusterRegistryHostname = "127.0.0.1:5000"

Expand Down
155 changes: 5 additions & 150 deletions core/src/plugins/kubernetes/container/build/build.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,25 +6,15 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/

import split2 = require("split2")
import { ContainerModule } from "../../../container/config"
import { containerHelpers } from "../../../container/helpers"
import { getDockerBuildFlags } from "../../../container/build"
import { GetBuildStatusParams, BuildStatus } from "../../../../types/plugin/module/getBuildStatus"
import { BuildModuleParams, BuildResult } from "../../../../types/plugin/module/build"
import { inClusterRegistryHostname, dockerDaemonContainerName, rsyncPort } from "../../constants"
import { posix } from "path"
import { KubeApi } from "../../api"
import { KubernetesProvider, ContainerBuildMode } from "../../config"
import { BuildError, ConfigurationError } from "../../../../exceptions"
import { LogLevel } from "../../../../logger/log-node"
import { renderOutputStream } from "../../../../util/util"
import { getSystemNamespace } from "../../namespace"
import chalk = require("chalk")
import { getKanikoBuildStatus, runKaniko, kanikoBuildFailed, getKanikoFlags } from "./kaniko"
import { getClusterDockerBuildStatus, getDockerDaemonPodRunner } from "./cluster-docker"
import { getKanikoBuildStatus, kanikoBuild } from "./kaniko"
import { clusterDockerBuild, getClusterDockerBuildStatus } from "./cluster-docker"
import { getLocalBuildStatus, localBuild } from "./local"
import { BuildStatusHandler, BuildHandler, syncToBuildSync, sharedBuildSyncDeploymentName } from "./common"
import { BuildStatusHandler, BuildHandler } from "./common"
import { buildkitBuildHandler, getBuildkitBuildStatus } from "./buildkit"

export async function k8sGetContainerBuildStatus(params: GetBuildStatusParams<ContainerModule>): Promise<BuildStatus> {
Expand Down Expand Up @@ -62,144 +52,9 @@ const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler }
"kaniko": getKanikoBuildStatus,
}

const remoteBuild: BuildHandler = async (params) => {
const { ctx, module, log } = params
const provider = <KubernetesProvider>ctx.provider
const systemNamespace = await getSystemNamespace(ctx, provider, log)
const api = await KubeApi.factory(log, ctx, provider)

const localId = containerHelpers.getLocalImageId(module, module.version)
const deploymentImageId = containerHelpers.getDeploymentImageId(
module,
module.version,
provider.config.deploymentRegistry
)
const dockerfile = module.spec.dockerfile || "Dockerfile"

const { contextPath } = await syncToBuildSync({
...params,
api,
namespace: systemNamespace,
deploymentName: sharedBuildSyncDeploymentName,
rsyncPort,
})

log.setState(`Building image ${localId}...`)

let buildLog = ""

// Stream debug log to a status line
const stdout = split2()
const statusLine = log.placeholder({ level: LogLevel.verbose })

stdout.on("error", () => {})
stdout.on("data", (line: Buffer) => {
statusLine.setState(renderOutputStream(line.toString()))
})

if (provider.config.buildMode === "cluster-docker") {
// Prepare the build command
const dockerfilePath = posix.join(contextPath, dockerfile)

let args = [
"docker",
"build",
"-t",
deploymentImageId,
"-f",
dockerfilePath,
contextPath,
...getDockerBuildFlags(module),
]

// Execute the build
const containerName = dockerDaemonContainerName
const buildTimeout = module.spec.build.timeout

if (provider.config.clusterDocker && provider.config.clusterDocker.enableBuildKit) {
args = ["/bin/sh", "-c", "DOCKER_BUILDKIT=1 " + args.join(" ")]
}

const runner = await getDockerDaemonPodRunner({ api, ctx, provider, systemNamespace })

const buildRes = await runner.exec({
log,
command: args,
timeoutSec: buildTimeout,
containerName,
stdout,
buffer: true,
})

buildLog = buildRes.log

// Push the image to the registry
log.setState({ msg: `Pushing image ${localId} to registry...` })

const dockerCmd = ["docker", "push", deploymentImageId]
const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")]

const pushRes = await runner.exec({
log,
command: pushArgs,
timeoutSec: 300,
containerName,
stdout,
buffer: true,
})

buildLog += pushRes.log
} else if (provider.config.buildMode === "kaniko") {
// build with Kaniko
const args = [
"--context",
"dir://" + contextPath,
"--dockerfile",
dockerfile,
"--destination",
deploymentImageId,
...getKanikoFlags(module.spec.extraFlags, provider.config.kaniko?.extraFlags),
]

if (provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) {
// The in-cluster registry is not exposed, so we don't configure TLS on it.
args.push("--insecure")
}

args.push(...getDockerBuildFlags(module))

// Execute the build
const buildRes = await runKaniko({
ctx,
provider,
log,
namespace: systemNamespace,
module,
args,
outputStream: stdout,
})
buildLog = buildRes.log

if (kanikoBuildFailed(buildRes)) {
throw new BuildError(`Failed building module ${chalk.bold(module.name)}:\n\n${buildLog}`, { buildLog })
}
} else {
throw new ConfigurationError("Uknown build mode", { buildMode: provider.config.buildMode })
}

log.silly(buildLog)

return {
buildLog,
fetched: false,
fresh: true,
version: module.version.versionString,
}
}

const buildHandlers: { [mode in ContainerBuildMode]: BuildHandler } = {
"local-docker": localBuild,
"cluster-buildkit": buildkitBuildHandler,
"cluster-docker": remoteBuild,
"kaniko": remoteBuild,
"cluster-docker": clusterDockerBuild,
"kaniko": kanikoBuild,
}
Loading

0 comments on commit b6eb151

Please sign in to comment.