From b960b9c69857805deaad1cbe301444c97d5fe309 Mon Sep 17 00:00:00 2001 From: Thorarinn Sigurdsson Date: Sat, 30 Oct 2021 13:27:08 -0700 Subject: [PATCH] fix(k8s): backwards-compatible deployment logic This fixes an issue when using newer versions of `kubectl` with older cluster versions. When `kubectl apply --prune` is used against a cluster with different API versions of certain resources (most commonly `Ingress` and `CronJob`), it will fail because `kubectl apply --prune` can only handle the resources hardcoded in the current version of `kubectl`. See: https://github.com/kubernetes/kubectl/blob/0a152f10/pkg/cmd/apply/prune.go#L176-L192 Since the `--prune` option for `kubectl apply` currently isn't backwards-compatible, here we essentially reimplement the pruning logic in our Kubernetes plugin to be able to prune resources in a way that works for newer and older versions of Kubernetes. This really should be fixed in `kubectl` proper. In fact, simply including resource mappings for older/beta API versions and adding the appropriate error handling for missing API/resource versions to the pruning logic would be enough to make `kubectl apply --prune` backwards-compatible. If/when we have time, we should try to contribute a fix to `kubectl` along these lines. --- core/src/plugins/kubernetes/api.ts | 41 +++++++++- .../commands/cleanup-cluster-registry.ts | 2 + .../kubernetes/container/deployment.ts | 30 ++++---- .../plugins/kubernetes/container/status.ts | 1 + .../src/plugins/kubernetes/helm/deployment.ts | 6 +- .../kubernetes/integrations/cert-manager.ts | 7 +- core/src/plugins/kubernetes/kubectl.ts | 68 ++++++++++++++++- .../kubernetes/kubernetes-module/handlers.ts | 10 +-- .../kubernetes/container/deployment.ts | 74 ++++++++++++++++++- .../plugins/kubernetes/container/ingress.ts | 4 +- 10 files changed, 210 insertions(+), 33 deletions(-) diff --git a/core/src/plugins/kubernetes/api.ts b/core/src/plugins/kubernetes/api.ts index a1ef38c485..415d74f6e7 100644 --- a/core/src/plugins/kubernetes/api.ts +++ b/core/src/plugins/kubernetes/api.ts @@ -11,6 +11,7 @@ import { IncomingMessage } from "http" import { Agent } from "https" import { ReadStream } from "tty" +import Bluebird from "bluebird" import chalk from "chalk" import httpStatusCodes from "http-status-codes" import { @@ -43,7 +44,7 @@ import { readFile } from "fs-extra" import { lookup } from "dns-lookup-cache" import { Omit, safeDumpYaml, StringCollector, sleep } from "../../util/util" -import { omitBy, isObject, isPlainObject, keyBy } from "lodash" +import { omitBy, isObject, isPlainObject, keyBy, flatten } from "lodash" import { GardenBaseError, RuntimeError, ConfigurationError } from "../../exceptions" import { KubernetesResource, @@ -435,6 +436,44 @@ export class KubeApi { return list } + /** + * Fetches all resources in the namespace matching the provided API version + kind pairs, optionally filtered by + * `labelSelector`. + * + * Useful when resources of several kinds need to be fetched at once. + */ + async listResourcesForKinds({ + log, + namespace, + versionedKinds, + labelSelector, + }: { + log: LogEntry + namespace: string + versionedKinds: { apiVersion: string; kind: string }[] + labelSelector?: { [label: string]: string } + }): Promise { + const resources = await Bluebird.map(versionedKinds, async ({ apiVersion, kind }) => { + try { + const resourceListForKind = await this.listResources({ + log, + apiVersion, + kind, + namespace, + labelSelector, + }) + return resourceListForKind.items + } catch (err) { + if (err.statusCode === 404) { + // Then this resource version + kind is not available in the cluster. + return [] + } + throw err + } + }) + return flatten(resources) + } + async replace({ log, resource, diff --git a/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts b/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts index 599c8ab88a..49078aba03 100644 --- a/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts +++ b/core/src/plugins/kubernetes/commands/cleanup-cluster-registry.ts @@ -255,6 +255,7 @@ async function runRegistryGarbageCollection(ctx: KubernetesPluginContext, api: K try { await apply({ ctx, + api, log, provider, manifests: [modifiedDeployment], @@ -301,6 +302,7 @@ async function runRegistryGarbageCollection(ctx: KubernetesPluginContext, api: K await apply({ ctx, + api, log, provider, manifests: [writableRegistry], diff --git a/core/src/plugins/kubernetes/container/deployment.ts b/core/src/plugins/kubernetes/container/deployment.ts index c112692ef7..a7c946fa85 100644 --- a/core/src/plugins/kubernetes/container/deployment.ts +++ b/core/src/plugins/kubernetes/container/deployment.ts @@ -48,11 +48,13 @@ export async function deployContainerService( ): Promise { const { ctx, service, log, devMode } = params const { deploymentStrategy } = params.ctx.provider.config + const k8sCtx = ctx + const api = await KubeApi.factory(log, k8sCtx, k8sCtx.provider) if (deploymentStrategy === "blue-green") { - await deployContainerServiceBlueGreen(params) + await deployContainerServiceBlueGreen({ ...params, api }) } else { - await deployContainerServiceRolling(params) + await deployContainerServiceRolling({ ...params, api }) } const status = await getContainerServiceStatus(params) @@ -103,8 +105,8 @@ export async function startContainerDevSync({ }) } -export async function deployContainerServiceRolling(params: DeployServiceParams) { - const { ctx, service, runtimeContext, log, devMode, hotReload } = params +export async function deployContainerServiceRolling(params: DeployServiceParams & { api: KubeApi }) { + const { ctx, api, service, runtimeContext, log, devMode, hotReload } = params const k8sCtx = ctx const namespaceStatus = await getAppNamespaceStatus(k8sCtx, log, k8sCtx.provider) @@ -112,6 +114,7 @@ export async function deployContainerServiceRolling(params: DeployServiceParams< const { manifests } = await createContainerManifests({ ctx: k8sCtx, + api, log, service, runtimeContext, @@ -121,9 +124,9 @@ export async function deployContainerServiceRolling(params: DeployServiceParams< }) const provider = k8sCtx.provider - const pruneSelector = gardenAnnotationKey("service") + "=" + service.name + const pruneLabels = { [gardenAnnotationKey("service")]: service.name } - await apply({ log, ctx, provider, manifests, namespace, pruneSelector }) + await apply({ log, ctx, api, provider, manifests, namespace, pruneLabels }) await waitForResources({ namespace, @@ -136,8 +139,8 @@ export async function deployContainerServiceRolling(params: DeployServiceParams< }) } -export async function deployContainerServiceBlueGreen(params: DeployServiceParams) { - const { ctx, service, runtimeContext, log, devMode, hotReload } = params +export async function deployContainerServiceBlueGreen(params: DeployServiceParams & { api: KubeApi }) { + const { ctx, api, service, runtimeContext, log, devMode, hotReload } = params const k8sCtx = ctx const namespaceStatus = await getAppNamespaceStatus(k8sCtx, log, k8sCtx.provider) const namespace = namespaceStatus.namespaceName @@ -145,6 +148,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam // Create all the resource manifests for the Garden service which will be deployed const { manifests } = await createContainerManifests({ ctx: k8sCtx, + api, log, service, runtimeContext, @@ -154,7 +158,6 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam }) const provider = k8sCtx.provider - const api = await KubeApi.factory(log, ctx, provider) // Retrieve the k8s service referring to the Garden service which is already deployed const currentService = (await api.core.listNamespacedService(namespace)).items.filter( @@ -167,7 +170,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam if (!isServiceAlreadyDeployed) { // No service found, no need to execute a blue-green deployment // Just apply all the resources for the Garden service - await apply({ log, ctx, provider, manifests, namespace }) + await apply({ log, ctx, api, provider, manifests, namespace }) await waitForResources({ namespace, ctx, @@ -187,7 +190,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam const filteredManifests = manifests.filter((manifest) => manifest.kind !== "Service") // Apply new Deployment manifest (deploy the Green version) - await apply({ log, ctx, provider, manifests: filteredManifests, namespace }) + await apply({ log, ctx, api, provider, manifests: filteredManifests, namespace }) await waitForResources({ namespace, ctx, @@ -223,7 +226,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam // If the result is outdated it means something in the Service definition itself changed // and we need to apply the whole Service manifest. Otherwise we just patch it. if (result.state === "outdated") { - await apply({ log, ctx, provider, manifests: [patchedServiceManifest], namespace }) + await apply({ log, ctx, api, provider, manifests: [patchedServiceManifest], namespace }) } else { await api.core.patchNamespacedService(service.name, namespace, servicePatchBody) } @@ -255,6 +258,7 @@ export async function deployContainerServiceBlueGreen(params: DeployServiceParam export async function createContainerManifests({ ctx, + api, log, service, runtimeContext, @@ -263,6 +267,7 @@ export async function createContainerManifests({ blueGreen, }: { ctx: PluginContext + api: KubeApi log: LogEntry service: ContainerService runtimeContext: RuntimeContext @@ -274,7 +279,6 @@ export async function createContainerManifests({ const provider = k8sCtx.provider const { production } = ctx const namespace = await getAppNamespace(k8sCtx, log, provider) - const api = await KubeApi.factory(log, ctx, provider) const ingresses = await createIngressResources(api, provider, namespace, service, log) const workload = await createWorkloadManifest({ api, diff --git a/core/src/plugins/kubernetes/container/status.ts b/core/src/plugins/kubernetes/container/status.ts index 78c3a87d60..8fe9a0f861 100644 --- a/core/src/plugins/kubernetes/container/status.ts +++ b/core/src/plugins/kubernetes/container/status.ts @@ -48,6 +48,7 @@ export async function getContainerServiceStatus({ // FIXME: [objects, matched] and ingresses can be run in parallel const { workload, manifests } = await createContainerManifests({ ctx: k8sCtx, + api, log, service, runtimeContext, diff --git a/core/src/plugins/kubernetes/helm/deployment.ts b/core/src/plugins/kubernetes/helm/deployment.ts index 596a03e428..c557a056dd 100644 --- a/core/src/plugins/kubernetes/helm/deployment.ts +++ b/core/src/plugins/kubernetes/helm/deployment.ts @@ -22,6 +22,7 @@ import { getServiceResource, getServiceResourceSpec } from "../util" import { getModuleNamespace, getModuleNamespaceStatus } from "../namespace" import { getHotReloadSpec, configureHotReload, getHotReloadContainerName } from "../hot-reload/helpers" import { configureDevMode, startDevModeSync } from "../dev-mode" +import { KubeApi } from "../api" export async function deployHelmService({ ctx, @@ -38,6 +39,7 @@ export async function deployHelmService({ const k8sCtx = ctx as KubernetesPluginContext const provider = k8sCtx.provider + const api = await KubeApi.factory(log, ctx, provider) const namespaceStatus = await getModuleNamespaceStatus({ ctx: k8sCtx, @@ -104,7 +106,7 @@ export async function deployHelmService({ spec: service.spec.devMode, containerName: service.spec.devMode?.containerName, }) - await apply({ log, ctx, provider, manifests: [serviceResource], namespace }) + await apply({ log, ctx, api, provider, manifests: [serviceResource], namespace }) } else if (hotReload && hotReloadSpec && serviceResourceSpec && serviceResource) { configureHotReload({ target: serviceResource, @@ -112,7 +114,7 @@ export async function deployHelmService({ hotReloadArgs: serviceResourceSpec.hotReloadArgs, containerName: getHotReloadContainerName(module), }) - await apply({ log, ctx, provider, manifests: [serviceResource], namespace }) + await apply({ log, ctx, api, provider, manifests: [serviceResource], namespace }) } // FIXME: we should get these objects from the cluster, and not from the local `helm template` command, because diff --git a/core/src/plugins/kubernetes/integrations/cert-manager.ts b/core/src/plugins/kubernetes/integrations/cert-manager.ts index e3f2a3065e..ad2b75644c 100644 --- a/core/src/plugins/kubernetes/integrations/cert-manager.ts +++ b/core/src/plugins/kubernetes/integrations/cert-manager.ts @@ -229,7 +229,7 @@ export async function setupCertManager({ ctx, provider, log, status }: SetupCert const customResourcesPath = join(STATIC_DIR, "kubernetes", "system", "cert-manager", "cert-manager-crd.yaml") const crd = yaml.safeLoadAll((await readFile(customResourcesPath)).toString()).filter((x) => x) entry.setState("Installing Custom Resources...") - await apply({ log, ctx, provider, manifests: crd, validate: false }) + await apply({ log, ctx, api, provider, manifests: crd, validate: false }) const waitForCertManagerPods: WaitForResourcesParams = { ctx, @@ -250,6 +250,7 @@ export async function setupCertManager({ ctx, provider, log, status }: SetupCert msg: `Processing certificates...`, status: "active", }) + const api = await KubeApi.factory(log, ctx, provider) const issuers: any[] = [] const certificates: any[] = [] const secretNames: string[] = [] @@ -279,10 +280,10 @@ export async function setupCertManager({ ctx, provider, log, status }: SetupCert if (issuers.length > 0) { certsLog.setState("Creating Issuers...") - await apply({ log, ctx, provider, manifests: issuers }) + await apply({ log, ctx, api, provider, manifests: issuers }) certsLog.setState("Issuers created.") - await apply({ log, ctx, provider, manifests: certificates, namespace }) + await apply({ log, ctx, api, provider, manifests: certificates, namespace }) certsLog.setState("Creating Certificates...") const certificateNames = certificates.map((cert) => cert.metadata.name) diff --git a/core/src/plugins/kubernetes/kubectl.ts b/core/src/plugins/kubernetes/kubectl.ts index 80ce52f142..36c4a945dd 100644 --- a/core/src/plugins/kubernetes/kubectl.ts +++ b/core/src/plugins/kubernetes/kubectl.ts @@ -16,15 +16,40 @@ import { gardenAnnotationKey } from "../../util/string" import { hashManifest } from "./util" import { PluginToolSpec } from "../../types/plugin/tools" import { PluginContext } from "../../plugin-context" +import { KubeApi } from "./api" + +// Corresponds to the default prune whitelist in `kubectl`. +// See: https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/apply/prune.go#L176-L192 +const versionedPruneKinds = [ + { apiVersion: "v1", kind: "ConfigMap" }, + { apiVersion: "v1", kind: "Endpoints" }, + { apiVersion: "v1", kind: "Namespace" }, + { apiVersion: "v1", kind: "PersistentVolumeClaim" }, + { apiVersion: "v1", kind: "PersistentVolume" }, + { apiVersion: "v1", kind: "Pod" }, + { apiVersion: "v1", kind: "ReplicationController" }, + { apiVersion: "v1", kind: "Secret" }, + { apiVersion: "v1", kind: "Service" }, + { apiVersion: "batch/v1", kind: "Job" }, + { apiVersion: "batch/v1", kind: "CronJob" }, + { apiVersion: "batch/v1beta1", kind: "CronJob" }, + { apiVersion: "extensions/v1beta1", kind: "Ingress" }, + { apiVersion: "networking.k8s.io/v1", kind: "Ingress" }, + { apiVersion: "apps/v1", kind: "DaemonSet" }, + { apiVersion: "apps/v1", kind: "Deployment" }, + { apiVersion: "apps/v1", kind: "ReplicaSet" }, + { apiVersion: "apps/v1", kind: "StatefulSet" }, +] export interface ApplyParams { log: LogEntry ctx: PluginContext + api: KubeApi provider: KubernetesProvider manifests: KubernetesResource[] namespace?: string dryRun?: boolean - pruneSelector?: string + pruneLabels?: { [label: string]: string } validate?: boolean } @@ -33,11 +58,12 @@ export const KUBECTL_DEFAULT_TIMEOUT = 300 export async function apply({ log, ctx, + api, provider, manifests, dryRun = false, namespace, - pruneSelector, + pruneLabels, validate = true, }: ApplyParams) { // Hash the raw input and add as an annotation on each manifest (this is helpful beyond kubectl's own annotation, @@ -53,16 +79,50 @@ export async function apply({ manifest.metadata.annotations[gardenAnnotationKey("manifest-hash")] = await hashManifest(manifest) } + // The `--prune` option for `kubectl apply` currently isn't backwards-compatible, so here, we essentially + // reimplement the pruning logic. This enables us to prune resources in a way that works for newer and older + // versions of Kubernetes, while still being able to use an up-to-date version of `kubectl`. + // + // This really should be fixed in `kubectl` proper. In fact, simply including resource mappings for older/beta API + // versions and adding the appropriate error handling for missing API/resource versions to the pruning logic would + // be enough to make `kubectl apply --prune` backwards-compatible. + let resourcesToPrune: KubernetesResource[] = [] + if (namespace && pruneLabels) { + // Fetch all deployed resources in the namesapce matching `pruneLabels` (for all resource kinds represented in + // `versionedPruneKinds` - see its definition above). + const resourcesForLabels = await api.listResourcesForKinds({ + log, + namespace, + versionedKinds: versionedPruneKinds, + labelSelector: pruneLabels, + }) + + // We only prune resources that were created/updated via `kubectl apply (this is how `kubectl apply --prune` works) + // and that don't match any of the applied manifests by kind and name. + resourcesToPrune = resourcesForLabels + .filter((r) => r.metadata.annotations?.["kubectl.kubernetes.io/last-applied-configuration"]) + .filter((r) => !manifests.find((m) => m.kind === r.kind && m.metadata.name === r.metadata.name)) + } + const input = Buffer.from(encodeYamlMulti(manifests)) let args = ["apply"] dryRun && args.push("--dry-run") - pruneSelector && args.push("--prune", "--selector", pruneSelector) args.push("--output=json", "-f", "-") !validate && args.push("--validate=false") const result = await kubectl(ctx, provider).stdout({ log, namespace, args, input }) + if (namespace && resourcesToPrune.length > 0) { + await deleteResources({ + log, + ctx, + provider, + namespace, + resources: resourcesToPrune, + }) + } + try { return JSON.parse(result) } catch (_) { @@ -232,7 +292,7 @@ export const kubectlSpec: PluginToolSpec = { platform: "windows", architecture: "amd64", url: "https://storage.googleapis.com/kubernetes-release/release/v1.22.3/bin/windows/amd64/kubectl.exe", - sha256: "c1c148569b1aa500fc46151756c497d7fbbff0789f316d7be444ace1dc793593" + sha256: "c1c148569b1aa500fc46151756c497d7fbbff0789f316d7be444ace1dc793593", }, ], } diff --git a/core/src/plugins/kubernetes/kubernetes-module/handlers.ts b/core/src/plugins/kubernetes/kubernetes-module/handlers.ts index a24a0cdd52..a00390d50a 100644 --- a/core/src/plugins/kubernetes/kubernetes-module/handlers.ts +++ b/core/src/plugins/kubernetes/kubernetes-module/handlers.ts @@ -164,7 +164,7 @@ export async function deployKubernetesService( if (namespaceManifests.length > 0) { // Don't prune namespaces - await apply({ log, ctx, provider, manifests: namespaceManifests }) + await apply({ log, ctx, api, provider, manifests: namespaceManifests }) await waitForResources({ namespace, ctx, @@ -178,7 +178,7 @@ export async function deployKubernetesService( let target: HotReloadableResource | undefined - const pruneSelector = getSelector(service) + const pruneLabels = { [gardenAnnotationKey("service")]: service.name } if (otherManifests.length > 0) { const prepareResult = await prepareManifestsForSync({ ctx: k8sCtx, @@ -192,7 +192,7 @@ export async function deployKubernetesService( target = prepareResult.target - await apply({ log, ctx, provider: k8sCtx.provider, manifests: prepareResult.manifests, pruneSelector }) + await apply({ log, ctx, api, provider: k8sCtx.provider, manifests: prepareResult.manifests, pruneLabels }) await waitForResources({ namespace, ctx, @@ -319,10 +319,6 @@ async function getServiceLogs(params: GetServiceLogsParams) { return streamK8sLogs({ ...params, provider, defaultNamespace: namespace, resources: manifests }) } -function getSelector(service: KubernetesService) { - return `${gardenAnnotationKey("service")}=${service.name}` -} - /** * Looks for a hot-reload or dev-mode target in a list of manifests. If found, the target is either * configured for hot-reloading/dev-mode or annotated with `dev-mode: false` and/or `hot-reload: false`. diff --git a/core/test/integ/src/plugins/kubernetes/container/deployment.ts b/core/test/integ/src/plugins/kubernetes/container/deployment.ts index 34f0ffbac3..825bb05caa 100644 --- a/core/test/integ/src/plugins/kubernetes/container/deployment.ts +++ b/core/test/integ/src/plugins/kubernetes/container/deployment.ts @@ -13,7 +13,7 @@ import { emptyRuntimeContext } from "../../../../../../src/runtime-context" import { KubeApi } from "../../../../../../src/plugins/kubernetes/api" import { createWorkloadManifest } from "../../../../../../src/plugins/kubernetes/container/deployment" import { KubernetesPluginContext, KubernetesProvider } from "../../../../../../src/plugins/kubernetes/config" -import { V1Secret } from "@kubernetes/client-node" +import { V1ConfigMap, V1Secret } from "@kubernetes/client-node" import { KubernetesResource } from "../../../../../../src/plugins/kubernetes/types" import { cloneDeep, keyBy } from "lodash" import { getContainerTestGarden } from "./container" @@ -25,6 +25,9 @@ import { kilobytesToString, millicpuToString } from "../../../../../../src/plugi import { getResourceRequirements } from "../../../../../../src/plugins/kubernetes/container/util" import { isConfiguredForDevMode } from "../../../../../../src/plugins/kubernetes/status/status" import { ContainerService } from "../../../../../../src/plugins/container/config" +import { apply } from "../../../../../../src/plugins/kubernetes/kubectl" +import { getAppNamespace } from "../../../../../../src/plugins/kubernetes/namespace" +import { gardenAnnotationKey } from "../../../../../../src/util/string" describe("kubernetes container deployment handlers", () => { let garden: Garden @@ -537,6 +540,75 @@ describe("kubernetes container deployment handlers", () => { ]) }) + it("should prune previously applied resources when deploying", async () => { + const log = garden.log + const service = graph.getService("simple-service") + const namespace = await getAppNamespace(ctx, log, provider) + + const mapToNotPruneKey = "should-not-be-pruned" + const mapToPruneKey = "should-be-pruned" + + const labels = { [gardenAnnotationKey("service")]: service.name } + + // This `ConfigMap` is created through `kubectl apply` below, which will add the + // "kubectl.kubernetes.io/last-applied-configuration" annotation. We don't prune resources that lack this + // annotation. + const configMapToPrune: KubernetesResource = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: mapToPruneKey, + annotations: { ...labels }, + labels: { ...labels }, + }, + data: {}, + } + + await apply({ log, ctx, api, provider, manifests: [configMapToPrune], namespace }) + + // Here, we create via the k8s API (not `kubetl apply`), so that unlike `configMapToPrune`, it won't acquire + // the "last applied" annotation. This means that it should *not* be pruned when we deploy the service, even + // though it has the service's label. + await api.core.createNamespacedConfigMap(namespace, { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: mapToNotPruneKey, + annotations: { ...labels }, + labels: { ...labels }, + }, + data: {}, + }) + + const deployTask = new DeployTask({ + garden, + graph, + log, + service, + force: true, + forceBuild: false, + devModeServiceNames: [], + hotReloadServiceNames: [], + }) + + await garden.processTasks([deployTask], { throwOnError: true }) + + // We expect this `ConfigMap` to still exist. + await api.core.readNamespacedConfigMap(mapToNotPruneKey, namespace) + + // ...and we expect this `ConfigMap` to have been deleted. + await expectError( + () => api.core.readNamespacedConfigMap(mapToPruneKey, namespace), + (err) => { + expect(stripAnsi(err.message)).to.match( + /Got error from Kubernetes API - configmaps "should-be-pruned" not found/ + ) + } + ) + + await api.core.deleteNamespacedConfigMap(mapToNotPruneKey, namespace) + }) + it("should ignore empty env vars in status check comparison", async () => { const service: ContainerService = graph.getService("simple-service") service.spec.env = { diff --git a/core/test/integ/src/plugins/kubernetes/container/ingress.ts b/core/test/integ/src/plugins/kubernetes/container/ingress.ts index d6b5317d9c..01eee24fd4 100644 --- a/core/test/integ/src/plugins/kubernetes/container/ingress.ts +++ b/core/test/integ/src/plugins/kubernetes/container/ingress.ts @@ -475,7 +475,7 @@ describe("createIngressResources", () => { if (ingress.apiVersion === "networking.k8s.io/v1") { expect(ingress.spec.ingressClassName).to.equal("nginx") - expect(ingress.metadata.annotations?.["kubernetes.io/ingress.class"]).to.be.empty + expect(ingress.metadata.annotations?.["kubernetes.io/ingress.class"]).to.be.undefined expect(ingress.spec.rules).to.eql([ { host: "my.domain.com", @@ -483,7 +483,7 @@ describe("createIngressResources", () => { paths: [ { path: "/", - pathType: "prefix", + pathType: "Prefix", backend: { service: { name: "my-service",