diff --git a/src/commands/network.mjs b/src/commands/network.mjs index 1681d92e8..2c79df692 100644 --- a/src/commands/network.mjs +++ b/src/commands/network.mjs @@ -184,7 +184,7 @@ export class NetworkCommand extends BaseCommand { subTasks.push({ title: `Check Node: ${chalk.yellow(nodeId)}`, task: () => - self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + self.k8.waitForPods([constants.POD_PHASE_RUNNING], [ 'fullstack.hedera.com/type=network-node', `fullstack.hedera.com/node-name=${nodeId}` ], 1, 60 * 15, 1000) // timeout 15 minutes @@ -374,7 +374,7 @@ export class NetworkCommand extends BaseCommand { { title: 'Waiting for network pods to be ready', task: async (ctx, _) => { - await this.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + await this.k8.waitForPods([constants.POD_PHASE_RUNNING], [ 'fullstack.hedera.com/type=network-node' ], 1) } diff --git a/src/commands/node.mjs b/src/commands/node.mjs index f1304850c..efe8ce319 100644 --- a/src/commands/node.mjs +++ b/src/commands/node.mjs @@ -779,7 +779,7 @@ export class NodeCommand extends BaseCommand { throw new FullstackTestingError(`failed to stop portForward for podName ${podName} with localPort ${localPort}: ${e.message}`, e) } try { - await this.k8.recyclePodByLabels(podLabels, 50) + await this.k8.recyclePodByLabels(podLabels) } catch (e) { throw new FullstackTestingError(`failed to recycle pod for podName ${podName} with localPort ${localPort}: ${e.message}`, e) } diff --git a/src/commands/relay.mjs b/src/commands/relay.mjs index 9605407cc..1f988db67 100644 --- a/src/commands/relay.mjs +++ b/src/commands/relay.mjs @@ -159,7 +159,7 @@ export class RelayCommand extends BaseCommand { await self.chartManager.install(namespace, releaseName, chartPath, '', valuesArg) - await self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + await self.k8.waitForPods([constants.POD_PHASE_RUNNING], [ 'app=hedera-json-rpc-relay', `app.kubernetes.io/instance=${releaseName}` ], 1, 900, 1000) diff --git a/src/core/constants.mjs b/src/core/constants.mjs index 8fb052de8..a9c5d6f2e 100644 --- a/src/core/constants.mjs +++ b/src/core/constants.mjs @@ -90,7 +90,7 @@ export const ACCOUNT_CREATE_BATCH_SIZE = process.env.ACCOUNT_CREATE_BATCH_SIZE | export const NODE_PROXY_USER_ID = process.env.NODE_PROXY_USER_ID || 'admin' export const NODE_PROXY_PASSWORD = process.env.NODE_PROXY_PASSWORD || 'adminpwd' -export const POD_STATUS_RUNNING = 'Running' +export const POD_PHASE_RUNNING = 'Running' export const POD_CONDITION_INITIALIZED = 'Initialized' export const POD_CONDITION_READY = 'Ready' diff --git a/src/core/k8.mjs b/src/core/k8.mjs index cf749f73b..bbef478c9 100644 --- a/src/core/k8.mjs +++ b/src/core/k8.mjs @@ -767,7 +767,7 @@ export class K8 { } } - async recyclePodByLabels (podLabels, maxAttempts = 50) { + async recyclePodByLabels (podLabels, maxAttempts = 30, delay = 2000, waitForPodMaxAttempts = 10, waitForPodDelay = 2000) { const podArray = await this.getPodsByLabel(podLabels) for (const pod of podArray) { const podName = pod.metadata.name @@ -776,14 +776,17 @@ export class K8 { let attempts = 0 while (attempts++ < maxAttempts) { - // wait longer for pods to be deleted and recreated when running in CI with high loads of parallel runners - const status = await this.waitForPod(constants.POD_STATUS_RUNNING, podLabels, 1, 120, 2000) - if (status) { - const newPods = await this.getPodsByLabel(podLabels) - if (newPods.length === podArray.length) return newPods + try { + const pods = await this.waitForPods([constants.POD_PHASE_RUNNING], + podLabels, 1, waitForPodMaxAttempts, waitForPodDelay) + if (pods.length === podArray.length) { + return pods + } + } catch (e) { + this.logger.warn(`deleted pod still not running [${podLabels.join(',')}, attempt: ${attempts}/${maxAttempts}]`) } - await sleep(2000) + await sleep(delay) } throw new FullstackTestingError(`pods are not running after deletion with labels [${podLabels.join(',')}]`) @@ -791,25 +794,25 @@ export class K8 { /** * Wait for pod - * @param status phase of the pod + * @param phases an array of acceptable phases of the pods * @param labels pod labels * @param podCount number of pod expected * @param maxAttempts maximum attempts to check * @param delay delay between checks in milliseconds + * @param podItemPredicate a predicate function to check the pod item * @return a Promise that checks the status of an array of pods */ - async waitForPod (status = 'Running', labels = [], podCount = 1, maxAttempts = 10, delay = 500) { + async waitForPods (phases = [constants.POD_PHASE_RUNNING], labels = [], podCount = 1, maxAttempts = 10, delay = 500, podItemPredicate) { const ns = this._getNamespace() - const fieldSelector = `status.phase=${status}` const labelSelector = labels.join(',') - this.logger.debug(`WaitForPod [namespace:${ns}, fieldSector(${fieldSelector}, labelSelector: ${labelSelector}], maxAttempts: ${maxAttempts}`) + this.logger.debug(`WaitForPod [namespace:${ns}, labelSelector: ${labelSelector}], maxAttempts: ${maxAttempts}`) return new Promise((resolve, reject) => { let attempts = 0 - const check = async () => { - this.logger.debug(`Checking for pod [namespace:${ns}, fieldSector(${fieldSelector}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) + const check = async (resolve, reject) => { + this.logger.debug(`Checking for pod [namespace:${ns}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) // wait for the pod to be available with the given status and labels const resp = await this.kubeClient.listNamespacedPod( @@ -817,24 +820,39 @@ export class K8 { false, false, undefined, - fieldSelector, + undefined, labelSelector, podCount ) - this.logger.debug(`${resp.body.items.length}/${podCount} pod found [namespace:${ns}, fieldSector(${fieldSelector}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) - if (resp.body && resp.body.items && resp.body.items.length === podCount) { - return resolve(resp.body.items) + this.logger.debug(`${resp.body?.items?.length}/${podCount} pod found [namespace:${ns}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) + if (resp.body?.items?.length === podCount) { + let phaseMatchCount = 0 + let predicateMatchCount = 0 + + for (const item of resp.body.items) { + if (phases.includes(item.status?.phase)) { + phaseMatchCount++ + } + + if (podItemPredicate && podItemPredicate(item)) { + predicateMatchCount++ + } + } + + if (phaseMatchCount === podCount && (!podItemPredicate || (predicateMatchCount === podCount))) { + return resolve(resp.body.items) + } } - if (attempts++ < maxAttempts) { - setTimeout(check, delay) + if (++attempts < maxAttempts) { + setTimeout(() => check(resolve, reject), delay) } else { - return reject(new FullstackTestingError(`Expected number of pod (${podCount}) not found ${fieldSelector} ${labelSelector} [attempts = ${attempts}/${maxAttempts}]`)) + return reject(new FullstackTestingError(`Expected number of pod (${podCount}) not found for labels: ${labelSelector}, phases: ${phases.join(',')} [attempts = ${attempts}/${maxAttempts}]`)) } } - check() + check(resolve, reject) }) } @@ -848,7 +866,7 @@ export class K8 { */ async waitForPodReady (labels = [], podCount = 1, maxAttempts = 10, delay = 500) { try { - return await this.waitForPodCondition(K8.PodReadyCondition, labels, podCount, maxAttempts, delay) + return await this.waitForPodConditions(K8.PodReadyCondition, labels, podCount, maxAttempts, delay) } catch (e) { throw new FullstackTestingError(`Pod not ready [maxAttempts = ${maxAttempts}]`, e) } @@ -864,67 +882,28 @@ export class K8 { * @return {Promise} */ - async waitForPodCondition ( + async waitForPodConditions ( conditionsMap, labels = [], podCount = 1, maxAttempts = 10, delay = 500) { if (!conditionsMap || conditionsMap.size === 0) throw new MissingArgumentError('pod conditions are required') - const ns = this._getNamespace() - const labelSelector = labels.join(',') - - this.logger.debug(`WaitForCondition [namespace:${ns}, conditions = ${conditionsMap.toString()} labelSelector: ${labelSelector}], maxAttempts: ${maxAttempts}`) - return new Promise((resolve, reject) => { - let attempts = 0 - - const check = async () => { - this.logger.debug(`Checking for pod ready [namespace:${ns}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) - - // wait for the pod to be available with the given status and labels - let pods - try { - pods = await this.waitForPod(constants.POD_STATUS_RUNNING, labels, podCount, maxAttempts, delay) - this.logger.debug(`${pods.length}/${podCount} pod found [namespace:${ns}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) - - if (pods.length >= podCount) { - const podWithMatchedCondition = [] - - // check conditions - for (const pod of pods) { - let matchedCondition = 0 - for (const cond of pod.status.conditions) { - for (const entry of conditionsMap.entries()) { - const condType = entry[0] - const condStatus = entry[1] - if (cond.type === condType && cond.status === condStatus) { - this.logger.debug(`Pod condition met for ${pod.metadata.name} [type: ${cond.type} status: ${cond.status}]`) - matchedCondition++ - } - } - - if (matchedCondition >= conditionsMap.size) { - podWithMatchedCondition.push(pod) - break - } - } - } - - if (podWithMatchedCondition.length >= podCount) { - return resolve(podWithMatchedCondition) + return await this.waitForPods([constants.POD_PHASE_RUNNING], labels, podCount, maxAttempts, delay, (pod) => { + if (pod.status?.conditions?.length > 0) { + for (const cond of pod.status.conditions) { + for (const entry of conditionsMap.entries()) { + const condType = entry[0] + const condStatus = entry[1] + if (cond.type === condType && cond.status === condStatus) { + this.logger.debug(`Pod condition met for ${pod.metadata.name} [type: ${cond.type} status: ${cond.status}]`) + return true } } - } catch (e) { - this.logger.error(`Pod not found with expected conditions [maxAttempts = ${maxAttempts}], ${e.message}`, e) - } - - if (attempts++ < maxAttempts) { - setTimeout(check, delay) - } else { - return reject(new FullstackTestingError(`Pod not found with expected conditions [maxAttempts = ${maxAttempts}]`)) } } - check() + // condition not found + return false }) } diff --git a/test/e2e/core/k8_e2e.test.mjs b/test/e2e/core/k8_e2e.test.mjs index b446bf2b1..bfb562adc 100644 --- a/test/e2e/core/k8_e2e.test.mjs +++ b/test/e2e/core/k8_e2e.test.mjs @@ -127,7 +127,7 @@ describe('K8', () => { 'fullstack.hedera.com/type=network-node' ] - const pods = await k8.waitForPod(constants.POD_STATUS_RUNNING, labels, 1) + const pods = await k8.waitForPods([constants.POD_PHASE_RUNNING], labels, 1) expect(pods.length).toStrictEqual(1) }) @@ -149,7 +149,7 @@ describe('K8', () => { .set(constants.POD_CONDITION_INITIALIZED, constants.POD_CONDITION_STATUS_TRUE) .set(constants.POD_CONDITION_POD_SCHEDULED, constants.POD_CONDITION_STATUS_TRUE) .set(constants.POD_CONDITION_READY, constants.POD_CONDITION_STATUS_TRUE) - const pods = await k8.waitForPodCondition(conditions, labels, 1) + const pods = await k8.waitForPodConditions(conditions, labels, 1) expect(pods.length).toStrictEqual(1) }) diff --git a/test/unit/core/k8.test.mjs b/test/unit/core/k8.test.mjs new file mode 100644 index 000000000..82bb59b6a --- /dev/null +++ b/test/unit/core/k8.test.mjs @@ -0,0 +1,136 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import { afterAll, beforeAll, describe, expect, it, jest } from '@jest/globals' +import { constants, K8 } from '../../../src/core/index.mjs' +import { getTestConfigManager, testLogger } from '../../test_util.js' +import { flags } from '../../../src/commands/index.mjs' + +export function listNamespacedPodMockSetup (k8, numOfFailures, result) { + for (let i = 0; i < numOfFailures - 1; i++) { + k8.kubeClient.listNamespacedPod.mockReturnValueOnce(Promise.resolve({ + body: { + items: [] + } + })) + } + k8.kubeClient.listNamespacedPod.mockReturnValueOnce(Promise.resolve({ + body: { + items: result + } + })) +} +describe('K8 Unit Tests', () => { + const argv = { } + const expectedResult = [ + { + metadata: { name: 'pod' }, + status: { + phase: constants.POD_PHASE_RUNNING, + conditions: [ + { + type: constants.POD_CONDITION_READY, + status: constants.POD_CONDITION_STATUS_TRUE + } + ] + } + } + ] + const k8InitSpy = jest.spyOn(K8.prototype, 'init').mockImplementation(() => {}) + const k8GetPodsByLabelSpy = jest.spyOn(K8.prototype, 'getPodsByLabel').mockResolvedValue(expectedResult) + let k8 + + beforeAll(() => { + argv[flags.namespace.name] = 'namespace' + const configManager = getTestConfigManager('k8-solo.config') + configManager.update(argv, true) + k8 = new K8(configManager, testLogger) + k8.kubeClient = { + listNamespacedPod: jest.fn(), + deleteNamespacedPod: jest.fn() + } + }) + + afterAll(() => { + k8InitSpy.mockRestore() + k8GetPodsByLabelSpy.mockRestore() + }) + + it('waitForPods with first time failure, later success', async () => { + const maxNumOfFailures = 500 + listNamespacedPodMockSetup(k8, maxNumOfFailures, expectedResult) + + const result = await k8.waitForPods([constants.POD_PHASE_RUNNING], ['labels'], 1, maxNumOfFailures, 0) + expect(result).toBe(expectedResult) + }) + + it('waitForPodConditions with first time failure, later success', async () => { + const maxNumOfFailures = 500 + listNamespacedPodMockSetup(k8, maxNumOfFailures, expectedResult) + + const result = await k8.waitForPodConditions(K8.PodReadyCondition, ['labels'], 1, maxNumOfFailures, 0) + expect(result).not.toBeNull() + expect(result[0]).toBe(expectedResult[0]) + }) + + it('waitForPodConditions with partial pod data', async () => { + const expectedResult = [ + { + metadata: { name: 'pod' } + } + ] + + const maxNumOfFailures = 5 + listNamespacedPodMockSetup(k8, maxNumOfFailures, expectedResult) + + try { + await k8.waitForPodConditions(K8.PodReadyCondition, ['labels'], 1, maxNumOfFailures, 0) + } catch (e) { + expect(e).not.toBeNull() + expect(e.message).toContain('Expected number of pod (1) not found for labels: labels, phases: Running [attempts = ') + } + }) + + it('waitForPodConditions with no conditions', async () => { + const expectedResult = [ + { + metadata: { name: 'pod' }, + status: { + phase: constants.POD_PHASE_RUNNING + } + } + ] + + const maxNumOfFailures = 5 + listNamespacedPodMockSetup(k8, maxNumOfFailures, expectedResult) + + try { + await k8.waitForPodConditions(K8.PodReadyCondition, ['labels'], 1, maxNumOfFailures, 0) + } catch (e) { + expect(e).not.toBeNull() + expect(e.message).toContain('Expected number of pod (1) not found for labels: labels, phases: Running [attempts = ') + } + }) + + it('recyclePodByLabels with first time failure, later success', async () => { + const waitForPodMaxAttempts = 120 + const numOfFailures = waitForPodMaxAttempts * 2 + listNamespacedPodMockSetup(k8, numOfFailures, expectedResult) + + const result = await k8.recyclePodByLabels(['labels'], 2, 0, waitForPodMaxAttempts, 0) + expect(result[0]).toBe(expectedResult[0]) + }) +})