Skip to content

Commit

Permalink
feat: add pull command
Browse files Browse the repository at this point in the history
  • Loading branch information
Mitchell Friedman committed Mar 19, 2020
1 parent 11a3f29 commit 5bfab26
Show file tree
Hide file tree
Showing 5 changed files with 318 additions and 30 deletions.
215 changes: 215 additions & 0 deletions garden-service/src/plugins/kubernetes/commands/pull-image.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,215 @@
/*
* Copyright (C) 2018-2020 Garden Technologies, Inc. <[email protected]>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/

import fs from "fs"
import tmp from "tmp-promise"
import { KubernetesPluginContext, KubernetesProvider } from "../config"
import { PluginError, ParameterError } from "../../../exceptions"
import { PluginCommand } from "../../../types/plugin/command"
import chalk from "chalk"
import { Module } from "../../../types/module"
import { findByNames } from "../../../util/util"
import { filter, map } from "lodash"
import { KubeApi } from "../api"
import { LogEntry } from "../../../logger/log-entry"
import { containerHelpers } from "../../container/helpers"
import { RuntimeError } from "../../../exceptions"
import { PodRunner } from "../run"
import { inClusterRegistryHostname } from "../constants"
import { getAppNamespace, getSystemNamespace } from "../namespace"
import { makePodName, skopeoImage, getSkopeoContainer, getDockerAuthVolume } from "../util"

export const pullImage: PluginCommand = {
name: "pull-image",
description: "Pull images from a remote cluster",
title: "Pull images from a remote cluster",
resolveModules: true,

handler: async ({ ctx, args, log, modules }) => {
const result = {}
const k8sCtx = ctx as KubernetesPluginContext
const provider = k8sCtx.provider

if (provider.config.buildMode === "local-docker") {
throw new PluginError(`Cannot pull images with buildMode=local-docker`, {
provider,
})
}

const modulesToPull = findModules(modules, args)
log.info({ msg: chalk.cyan(`\nPulling images for ${modulesToPull.length} modules`) })

await pullModules(k8sCtx, modulesToPull, log)

log.info({ msg: chalk.green("\nDone!"), status: "success" })

return { result }
},
}

function findModules(modules: Module[], names: string[]): Module[] {
let foundModules: Module[]

if (!names || names.length === 0) {
foundModules = modules
} else {
foundModules = findByNames(names, modules, "modules")
}

ensureAllModulesValid(foundModules)

return foundModules
}

function ensureAllModulesValid(modules: Module[]) {
const invalidModules = filter(modules, (module) => {
return !module.compatibleTypes.includes("container") || !containerHelpers.hasDockerfile(module)
})

if (invalidModules.length > 0) {
const invalidModuleNames = map(invalidModules, (module) => {
return module.name
})

throw new ParameterError(chalk.red(`Modules ${chalk.white(invalidModuleNames)} is not a container module.`), {
invalidModuleNames,
compatibleTypes: "container",
})
}
}

async function pullModules(ctx: KubernetesPluginContext, modules: Module[], log: LogEntry) {
await Promise.all(
modules.map(async (module) => {
const remoteId = await containerHelpers.getPublicImageId(module)
log.debug({ msg: chalk.cyan(`Pulling image ${remoteId}`) })
await pullModule(ctx, module, log)
log.info({ msg: chalk.green(`\nPulled module: ${module.name}`) })
})
)
}

async function pullModule(ctx: KubernetesPluginContext, module: Module, log: LogEntry) {
if (ctx.provider.config.deploymentRegistry?.hostname === inClusterRegistryHostname) {
await pullFromInClusterRegistry(module, log)
} else {
await pullFromExternalRegistry(ctx, module, log)
}
}

async function pullFromInClusterRegistry(module: Module, log: LogEntry) {
const localId = await containerHelpers.getLocalImageId(module)
const remoteId = await containerHelpers.getPublicImageId(module)

await containerHelpers.dockerCli(module.buildPath, ["pull", remoteId], log)

if (localId !== remoteId) {
await containerHelpers.dockerCli(module.buildPath, ["tag", remoteId, localId], log)
}
}

async function pullFromExternalRegistry(ctx: KubernetesPluginContext, module: Module, log: LogEntry) {
const api = await KubeApi.factory(log, ctx.provider)
const namespace = await getAppNamespace(ctx, log, ctx.provider)
const podName = makePodName("skopeo", namespace, module.name)
const systemNamespace = await getSystemNamespace(ctx.provider, log)
const imageId = await containerHelpers.getDeploymentImageId(module, ctx.provider.config.deploymentRegistry)
const tarName = `${module.name}-${module.version.versionString}`

const skopeoCommand = [
"skopeo",
"--command-timeout=300s",
"--insecure-policy",
"copy",
`docker://${imageId}`,
`docker-archive:${tarName}`,
]

try {
const runner = await launchSkopeoContainer(ctx.provider, api, podName, systemNamespace, module, log)
await pullImageFromRegistry(runner, skopeoCommand.join(" "), log)
await importImage(module, runner, tarName, imageId, log)
} catch (err) {
throw new RuntimeError(`Failed pulling image for module ${module.name} with image id ${imageId}`, {
err,
imageId,
})
}
}

async function importImage(module: Module, runner: PodRunner, tarName: string, imageId: string, log: LogEntry) {
const sourcePath = `/${tarName}`
const getOuputCommand = ["cat", sourcePath]
const tmpFile = await tmp.fileSync()

let writeStream = fs.createWriteStream(tmpFile.name)

await runner.spawn({
command: getOuputCommand,
container: "skopeo",
ignoreError: false,
log,
stdout: writeStream,
})

const args = ["import", tmpFile.name, imageId]
await containerHelpers.dockerCli(module.buildPath, args, log)
}

async function pullImageFromRegistry(runner: PodRunner, command: string, log: LogEntry) {
// TODO: make this timeout configurable
await runner.exec({
command: ["sh", "-c", command],
container: "skopeo",
ignoreError: false,
log,
timeout: 60 * 1000 * 5, // 5 minutes,
})
}

async function launchSkopeoContainer(
provider: KubernetesProvider,
api: KubeApi,
podName: string,
systemNamespace: string,
module: Module,
log: LogEntry
) {
const sleepCommand = "sleep 86400"
const runner = new PodRunner({
api,
podName,
provider,
image: skopeoImage,
module,
namespace: systemNamespace,
spec: {
shareProcessNamespace: true,
volumes: [
// Mount the docker auth secret, so skopeo can inspect private registries.
getDockerAuthVolume(),
],
containers: [getSkopeoContainer(sleepCommand)],
},
})

const { pod, state, debugLog } = await runner.start({
log,
ignoreError: false,
})

if (state !== "ready") {
throw new RuntimeError("Failed to start skopeo contaer", {
pod,
state,
debugLog,
})
}

return runner
}
38 changes: 10 additions & 28 deletions garden-service/src/plugins/kubernetes/container/build.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,19 @@ import { containerHelpers } from "../../container/helpers"
import { buildContainerModule, getContainerBuildStatus, getDockerBuildFlags } from "../../container/build"
import { GetBuildStatusParams, BuildStatus } from "../../../types/plugin/module/getBuildStatus"
import { BuildModuleParams, BuildResult } from "../../../types/plugin/module/build"
import { millicpuToString, megabytesToString, getRunningPodInDeployment, makePodName } from "../util"
import { RSYNC_PORT, dockerAuthSecretName, dockerAuthSecretKey, inClusterRegistryHostname } from "../constants"
import {
millicpuToString,
megabytesToString,
getRunningPodInDeployment,
makePodName,
getSkopeoContainer,
} from "../util"
import { RSYNC_PORT, dockerAuthSecretName, inClusterRegistryHostname } from "../constants"
import { posix, resolve } from "path"
import { KubeApi } from "../api"
import { kubectl } from "../kubectl"
import { LogEntry } from "../../../logger/log-entry"
import { getDockerAuthVolume } from "../util"
import { KubernetesProvider, ContainerBuildMode, KubernetesPluginContext, KubernetesConfig } from "../config"
import { PluginError, InternalError, RuntimeError, BuildError } from "../../../exceptions"
import { PodRunner } from "../run"
Expand All @@ -37,7 +44,6 @@ const dockerDaemonDeploymentName = "garden-docker-daemon"
const dockerDaemonContainerName = "docker-daemon"

const kanikoImage = "gcr.io/kaniko-project/executor:debug-v0.19.0"
const skopeoImage = "gardendev/skopeo:1.41.0-1"

const registryPort = 5000

Expand Down Expand Up @@ -176,21 +182,7 @@ const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler }
// Mount the docker auth secret, so skopeo can inspect private registries.
getDockerAuthVolume(),
],
containers: [
{
name: "skopeo",
image: skopeoImage,
command: ["sh", "-c", commandStr],
volumeMounts: [
{
name: dockerAuthSecretName,
mountPath: "/root/.docker",
readOnly: true,
},
],
},
getSocatContainer(registryHostname),
],
containers: [getSkopeoContainer(commandStr), getSocatContainer(registryHostname)],
},
})

Expand Down Expand Up @@ -613,16 +605,6 @@ function isLocalHostname(hostname: string) {
return hostname === "localhost" || hostname.startsWith("127.")
}

function getDockerAuthVolume() {
return {
name: dockerAuthSecretName,
secret: {
secretName: dockerAuthSecretName,
items: [{ key: dockerAuthSecretKey, path: "config.json" }],
},
}
}

function getSocatContainer(registryHostname: string) {
return {
name: "proxy",
Expand Down
3 changes: 2 additions & 1 deletion garden-service/src/plugins/kubernetes/kubernetes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import { configSchema } from "./config"
import { ConfigurationError } from "../../exceptions"
import { cleanupClusterRegistry } from "./commands/cleanup-cluster-registry"
import { clusterInit } from "./commands/cluster-init"
import { pullImage } from "./commands/pull-image"
import { uninstallGardenServices } from "./commands/uninstall-garden-services"
import { joi, joiIdentifier } from "../../config/common"
import { resolve } from "path"
Expand Down Expand Up @@ -189,7 +190,7 @@ export const gardenPlugin = createGardenPlugin({
`,
configSchema,
outputsSchema,
commands: [cleanupClusterRegistry, clusterInit, removeTillerCmd, uninstallGardenServices],
commands: [cleanupClusterRegistry, clusterInit, removeTillerCmd, uninstallGardenServices, pullImage],
handlers: {
configureProvider,
getEnvironmentStatus,
Expand Down
54 changes: 54 additions & 0 deletions garden-service/src/plugins/kubernetes/run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -539,6 +539,60 @@ export class PodRunner extends PodRunnerParams {
return { proc: this.proc, pod, state, debugLog }
}

async spawn(params: ExecParams) {
const { log, command, container, ignoreError, input, stdout, stderr, timeout } = params

if (!this.proc) {
throw new PodRunnerError(`Attempting to spawn a command in Pod before starting it`, { command })
}

// TODO: use API library
const args = ["exec", "-i", this.podName, "-c", container || this.spec.containers[0].name, "--", ...command]

const startedAt = new Date()

const proc = await kubectl.spawn({
args,
namespace: this.namespace,
ignoreError,
input,
log,
provider: this.provider,
stdout,
stderr,
timeout,
})

return new Promise((_resolve, reject) => {
proc.on("close", (code) => {
if (code === 0) {
_resolve({
moduleName: this.module.name,
command,
version: this.module.version.versionString,
startedAt,
completedAt: new Date(),
log: "", // TODO: what here.
success: code === 0,
})
}

reject(
new RuntimeError(`Failed to spawn kubectl process with code ${code}`, {
code,
})
)
})

proc.on("error", (err) => {
!proc.killed && proc.kill()
throw err
})

stdout && proc.stdout?.pipe(stdout)
})
}

/**
* Executes a command in the running Pod. Must be called after `start()`.
*/
Expand Down
Loading

0 comments on commit 5bfab26

Please sign in to comment.