Skip to content

Commit

Permalink
fix(k8s): backwards-compatible deployment logic
Browse files Browse the repository at this point in the history
This fixes an issue when using newer versions of `kubectl` with older
cluster versions.

When `kubectl apply --prune` is used against a cluster with different
API versions of certain resources (most commonly `Ingress` and
`CronJob`), it will fail because `kubectl apply --prune` can only
handle the resources hardcoded in the current version of `kubectl`.

See: https://github.com/kubernetes/kubectl/blob/0a152f10/pkg/cmd/apply/prune.go#L176-L192

Since the `--prune` option for `kubectl apply` currently isn't
backwards-compatible, here we essentially reimplement the pruning
logic in our Kubernetes plugin to be able to prune resources in a way
that works for newer and older versions of Kubernetes.

This really should be fixed in `kubectl` proper. In fact, simply
including resource mappings for older/beta API versions and adding the
appropriate error handling for missing API/resource versions to the
pruning logic would be enough to make `kubectl apply --prune`
backwards-compatible.

If/when we have time, we should try to contribute a fix to `kubectl`
along these lines.
  • Loading branch information
thsig committed Nov 5, 2021
1 parent 85e2b97 commit b960b9c
Show file tree
Hide file tree
Showing 10 changed files with 210 additions and 33 deletions.
41 changes: 40 additions & 1 deletion core/src/plugins/kubernetes/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import { IncomingMessage } from "http"
import { Agent } from "https"
import { ReadStream } from "tty"
import Bluebird from "bluebird"
import chalk from "chalk"
import httpStatusCodes from "http-status-codes"
import {
Expand Down Expand Up @@ -43,7 +44,7 @@ import { readFile } from "fs-extra"
import { lookup } from "dns-lookup-cache"

import { Omit, safeDumpYaml, StringCollector, sleep } from "../../util/util"
import { omitBy, isObject, isPlainObject, keyBy } from "lodash"
import { omitBy, isObject, isPlainObject, keyBy, flatten } from "lodash"
import { GardenBaseError, RuntimeError, ConfigurationError } from "../../exceptions"
import {
KubernetesResource,
Expand Down Expand Up @@ -435,6 +436,44 @@ export class KubeApi {
return list
}

/**
* Fetches all resources in the namespace matching the provided API version + kind pairs, optionally filtered by
* `labelSelector`.
*
* Useful when resources of several kinds need to be fetched at once.
*/
async listResourcesForKinds({
log,
namespace,
versionedKinds,
labelSelector,
}: {
log: LogEntry
namespace: string
versionedKinds: { apiVersion: string; kind: string }[]
labelSelector?: { [label: string]: string }
}): Promise<KubernetesResource[]> {
const resources = await Bluebird.map(versionedKinds, async ({ apiVersion, kind }) => {
try {
const resourceListForKind = await this.listResources({
log,
apiVersion,
kind,
namespace,
labelSelector,
})
return resourceListForKind.items
} catch (err) {
if (err.statusCode === 404) {
// Then this resource version + kind is not available in the cluster.
return []
}
throw err
}
})
return flatten(resources)
}

async replace({
log,
resource,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,7 @@ async function runRegistryGarbageCollection(ctx: KubernetesPluginContext, api: K
try {
await apply({
ctx,
api,
log,
provider,
manifests: [modifiedDeployment],
Expand Down Expand Up @@ -301,6 +302,7 @@ async function runRegistryGarbageCollection(ctx: KubernetesPluginContext, api: K

await apply({
ctx,
api,
log,
provider,
manifests: [writableRegistry],
Expand Down
30 changes: 17 additions & 13 deletions core/src/plugins/kubernetes/container/deployment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,13 @@ export async function deployContainerService(
): Promise<ContainerServiceStatus> {
const { ctx, service, log, devMode } = params
const { deploymentStrategy } = params.ctx.provider.config
const k8sCtx = <KubernetesPluginContext>ctx
const api = await KubeApi.factory(log, k8sCtx, k8sCtx.provider)

if (deploymentStrategy === "blue-green") {
await deployContainerServiceBlueGreen(params)
await deployContainerServiceBlueGreen({ ...params, api })
} else {
await deployContainerServiceRolling(params)
await deployContainerServiceRolling({ ...params, api })
}

const status = await getContainerServiceStatus(params)
Expand Down Expand Up @@ -103,15 +105,16 @@ export async function startContainerDevSync({
})
}

export async function deployContainerServiceRolling(params: DeployServiceParams<ContainerModule>) {
const { ctx, service, runtimeContext, log, devMode, hotReload } = params
export async function deployContainerServiceRolling(params: DeployServiceParams<ContainerModule> & { api: KubeApi }) {
const { ctx, api, service, runtimeContext, log, devMode, hotReload } = params
const k8sCtx = <KubernetesPluginContext>ctx

const namespaceStatus = await getAppNamespaceStatus(k8sCtx, log, k8sCtx.provider)
const namespace = namespaceStatus.namespaceName

const { manifests } = await createContainerManifests({
ctx: k8sCtx,
api,
log,
service,
runtimeContext,
Expand All @@ -121,9 +124,9 @@ export async function deployContainerServiceRolling(params: DeployServiceParams<
})

const provider = k8sCtx.provider
const pruneSelector = gardenAnnotationKey("service") + "=" + service.name
const pruneLabels = { [gardenAnnotationKey("service")]: service.name }

await apply({ log, ctx, provider, manifests, namespace, pruneSelector })
await apply({ log, ctx, api, provider, manifests, namespace, pruneLabels })

await waitForResources({
namespace,
Expand All @@ -136,15 +139,16 @@ export async function deployContainerServiceRolling(params: DeployServiceParams<
})
}

export async function deployContainerServiceBlueGreen(params: DeployServiceParams<ContainerModule>) {
const { ctx, service, runtimeContext, log, devMode, hotReload } = params
export async function deployContainerServiceBlueGreen(params: DeployServiceParams<ContainerModule> & { api: KubeApi }) {
const { ctx, api, service, runtimeContext, log, devMode, hotReload } = params
const k8sCtx = <KubernetesPluginContext>ctx
const namespaceStatus = await getAppNamespaceStatus(k8sCtx, log, k8sCtx.provider)
const namespace = namespaceStatus.namespaceName

// Create all the resource manifests for the Garden service which will be deployed
const { manifests } = await createContainerManifests({
ctx: k8sCtx,
api,
log,
service,
runtimeContext,
Expand All @@ -154,7 +158,6 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam
})

const provider = k8sCtx.provider
const api = await KubeApi.factory(log, ctx, provider)

// Retrieve the k8s service referring to the Garden service which is already deployed
const currentService = (await api.core.listNamespacedService(namespace)).items.filter(
Expand All @@ -167,7 +170,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam
if (!isServiceAlreadyDeployed) {
// No service found, no need to execute a blue-green deployment
// Just apply all the resources for the Garden service
await apply({ log, ctx, provider, manifests, namespace })
await apply({ log, ctx, api, provider, manifests, namespace })
await waitForResources({
namespace,
ctx,
Expand All @@ -187,7 +190,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam
const filteredManifests = manifests.filter((manifest) => manifest.kind !== "Service")

// Apply new Deployment manifest (deploy the Green version)
await apply({ log, ctx, provider, manifests: filteredManifests, namespace })
await apply({ log, ctx, api, provider, manifests: filteredManifests, namespace })
await waitForResources({
namespace,
ctx,
Expand Down Expand Up @@ -223,7 +226,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam
// If the result is outdated it means something in the Service definition itself changed
// and we need to apply the whole Service manifest. Otherwise we just patch it.
if (result.state === "outdated") {
await apply({ log, ctx, provider, manifests: [patchedServiceManifest], namespace })
await apply({ log, ctx, api, provider, manifests: [patchedServiceManifest], namespace })
} else {
await api.core.patchNamespacedService(service.name, namespace, servicePatchBody)
}
Expand Down Expand Up @@ -255,6 +258,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam

export async function createContainerManifests({
ctx,
api,
log,
service,
runtimeContext,
Expand All @@ -263,6 +267,7 @@ export async function createContainerManifests({
blueGreen,
}: {
ctx: PluginContext
api: KubeApi
log: LogEntry
service: ContainerService
runtimeContext: RuntimeContext
Expand All @@ -274,7 +279,6 @@ export async function createContainerManifests({
const provider = k8sCtx.provider
const { production } = ctx
const namespace = await getAppNamespace(k8sCtx, log, provider)
const api = await KubeApi.factory(log, ctx, provider)
const ingresses = await createIngressResources(api, provider, namespace, service, log)
const workload = await createWorkloadManifest({
api,
Expand Down
1 change: 1 addition & 0 deletions core/src/plugins/kubernetes/container/status.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ export async function getContainerServiceStatus({
// FIXME: [objects, matched] and ingresses can be run in parallel
const { workload, manifests } = await createContainerManifests({
ctx: k8sCtx,
api,
log,
service,
runtimeContext,
Expand Down
6 changes: 4 additions & 2 deletions core/src/plugins/kubernetes/helm/deployment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import { getServiceResource, getServiceResourceSpec } from "../util"
import { getModuleNamespace, getModuleNamespaceStatus } from "../namespace"
import { getHotReloadSpec, configureHotReload, getHotReloadContainerName } from "../hot-reload/helpers"
import { configureDevMode, startDevModeSync } from "../dev-mode"
import { KubeApi } from "../api"

export async function deployHelmService({
ctx,
Expand All @@ -38,6 +39,7 @@ export async function deployHelmService({

const k8sCtx = ctx as KubernetesPluginContext
const provider = k8sCtx.provider
const api = await KubeApi.factory(log, ctx, provider)

const namespaceStatus = await getModuleNamespaceStatus({
ctx: k8sCtx,
Expand Down Expand Up @@ -104,15 +106,15 @@ export async function deployHelmService({
spec: service.spec.devMode,
containerName: service.spec.devMode?.containerName,
})
await apply({ log, ctx, provider, manifests: [serviceResource], namespace })
await apply({ log, ctx, api, provider, manifests: [serviceResource], namespace })
} else if (hotReload && hotReloadSpec && serviceResourceSpec && serviceResource) {
configureHotReload({
target: serviceResource,
hotReloadSpec,
hotReloadArgs: serviceResourceSpec.hotReloadArgs,
containerName: getHotReloadContainerName(module),
})
await apply({ log, ctx, provider, manifests: [serviceResource], namespace })
await apply({ log, ctx, api, provider, manifests: [serviceResource], namespace })
}

// FIXME: we should get these objects from the cluster, and not from the local `helm template` command, because
Expand Down
7 changes: 4 additions & 3 deletions core/src/plugins/kubernetes/integrations/cert-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ export async function setupCertManager({ ctx, provider, log, status }: SetupCert
const customResourcesPath = join(STATIC_DIR, "kubernetes", "system", "cert-manager", "cert-manager-crd.yaml")
const crd = yaml.safeLoadAll((await readFile(customResourcesPath)).toString()).filter((x) => x)
entry.setState("Installing Custom Resources...")
await apply({ log, ctx, provider, manifests: crd, validate: false })
await apply({ log, ctx, api, provider, manifests: crd, validate: false })

const waitForCertManagerPods: WaitForResourcesParams = {
ctx,
Expand All @@ -250,6 +250,7 @@ export async function setupCertManager({ ctx, provider, log, status }: SetupCert
msg: `Processing certificates...`,
status: "active",
})
const api = await KubeApi.factory(log, ctx, provider)
const issuers: any[] = []
const certificates: any[] = []
const secretNames: string[] = []
Expand Down Expand Up @@ -279,10 +280,10 @@ export async function setupCertManager({ ctx, provider, log, status }: SetupCert

if (issuers.length > 0) {
certsLog.setState("Creating Issuers...")
await apply({ log, ctx, provider, manifests: issuers })
await apply({ log, ctx, api, provider, manifests: issuers })
certsLog.setState("Issuers created.")

await apply({ log, ctx, provider, manifests: certificates, namespace })
await apply({ log, ctx, api, provider, manifests: certificates, namespace })
certsLog.setState("Creating Certificates...")

const certificateNames = certificates.map((cert) => cert.metadata.name)
Expand Down
68 changes: 64 additions & 4 deletions core/src/plugins/kubernetes/kubectl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,40 @@ import { gardenAnnotationKey } from "../../util/string"
import { hashManifest } from "./util"
import { PluginToolSpec } from "../../types/plugin/tools"
import { PluginContext } from "../../plugin-context"
import { KubeApi } from "./api"

// Corresponds to the default prune whitelist in `kubectl`.
// See: https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/apply/prune.go#L176-L192
const versionedPruneKinds = [
{ apiVersion: "v1", kind: "ConfigMap" },
{ apiVersion: "v1", kind: "Endpoints" },
{ apiVersion: "v1", kind: "Namespace" },
{ apiVersion: "v1", kind: "PersistentVolumeClaim" },
{ apiVersion: "v1", kind: "PersistentVolume" },
{ apiVersion: "v1", kind: "Pod" },
{ apiVersion: "v1", kind: "ReplicationController" },
{ apiVersion: "v1", kind: "Secret" },
{ apiVersion: "v1", kind: "Service" },
{ apiVersion: "batch/v1", kind: "Job" },
{ apiVersion: "batch/v1", kind: "CronJob" },
{ apiVersion: "batch/v1beta1", kind: "CronJob" },
{ apiVersion: "extensions/v1beta1", kind: "Ingress" },
{ apiVersion: "networking.k8s.io/v1", kind: "Ingress" },
{ apiVersion: "apps/v1", kind: "DaemonSet" },
{ apiVersion: "apps/v1", kind: "Deployment" },
{ apiVersion: "apps/v1", kind: "ReplicaSet" },
{ apiVersion: "apps/v1", kind: "StatefulSet" },
]

export interface ApplyParams {
log: LogEntry
ctx: PluginContext
api: KubeApi
provider: KubernetesProvider
manifests: KubernetesResource[]
namespace?: string
dryRun?: boolean
pruneSelector?: string
pruneLabels?: { [label: string]: string }
validate?: boolean
}

Expand All @@ -33,11 +58,12 @@ export const KUBECTL_DEFAULT_TIMEOUT = 300
export async function apply({
log,
ctx,
api,
provider,
manifests,
dryRun = false,
namespace,
pruneSelector,
pruneLabels,
validate = true,
}: ApplyParams) {
// Hash the raw input and add as an annotation on each manifest (this is helpful beyond kubectl's own annotation,
Expand All @@ -53,16 +79,50 @@ export async function apply({
manifest.metadata.annotations[gardenAnnotationKey("manifest-hash")] = await hashManifest(manifest)
}

// The `--prune` option for `kubectl apply` currently isn't backwards-compatible, so here, we essentially
// reimplement the pruning logic. This enables us to prune resources in a way that works for newer and older
// versions of Kubernetes, while still being able to use an up-to-date version of `kubectl`.
//
// This really should be fixed in `kubectl` proper. In fact, simply including resource mappings for older/beta API
// versions and adding the appropriate error handling for missing API/resource versions to the pruning logic would
// be enough to make `kubectl apply --prune` backwards-compatible.
let resourcesToPrune: KubernetesResource[] = []
if (namespace && pruneLabels) {
// Fetch all deployed resources in the namesapce matching `pruneLabels` (for all resource kinds represented in
// `versionedPruneKinds` - see its definition above).
const resourcesForLabels = await api.listResourcesForKinds({
log,
namespace,
versionedKinds: versionedPruneKinds,
labelSelector: pruneLabels,
})

// We only prune resources that were created/updated via `kubectl apply (this is how `kubectl apply --prune` works)
// and that don't match any of the applied manifests by kind and name.
resourcesToPrune = resourcesForLabels
.filter((r) => r.metadata.annotations?.["kubectl.kubernetes.io/last-applied-configuration"])
.filter((r) => !manifests.find((m) => m.kind === r.kind && m.metadata.name === r.metadata.name))
}

const input = Buffer.from(encodeYamlMulti(manifests))

let args = ["apply"]
dryRun && args.push("--dry-run")
pruneSelector && args.push("--prune", "--selector", pruneSelector)
args.push("--output=json", "-f", "-")
!validate && args.push("--validate=false")

const result = await kubectl(ctx, provider).stdout({ log, namespace, args, input })

if (namespace && resourcesToPrune.length > 0) {
await deleteResources({
log,
ctx,
provider,
namespace,
resources: resourcesToPrune,
})
}

try {
return JSON.parse(result)
} catch (_) {
Expand Down Expand Up @@ -232,7 +292,7 @@ export const kubectlSpec: PluginToolSpec = {
platform: "windows",
architecture: "amd64",
url: "https://storage.googleapis.com/kubernetes-release/release/v1.22.3/bin/windows/amd64/kubectl.exe",
sha256: "c1c148569b1aa500fc46151756c497d7fbbff0789f316d7be444ace1dc793593"
sha256: "c1c148569b1aa500fc46151756c497d7fbbff0789f316d7be444ace1dc793593",
},
],
}
Loading

0 comments on commit b960b9c

Please sign in to comment.