diff --git a/src/commands/cluster/configs.ts b/src/commands/cluster/configs.ts index f579e6670..2aea57685 100644 --- a/src/commands/cluster/configs.ts +++ b/src/commands/cluster/configs.ts @@ -77,6 +77,7 @@ export const resetConfigBuilder = async function (argv, ctx, task) { }); if (!confirm) { + // eslint-disable-next-line n/no-process-exit process.exit(0); } } diff --git a/src/commands/cluster/index.ts b/src/commands/cluster/index.ts index 016224b54..1c1fd474d 100644 --- a/src/commands/cluster/index.ts +++ b/src/commands/cluster/index.ts @@ -32,7 +32,7 @@ export class ClusterCommand extends BaseCommand { constructor(opts: Opts) { super(opts); - this.handlers = new ClusterCommandHandlers(this, new ClusterCommandTasks(this), this.remoteConfigManager); + this.handlers = new ClusterCommandHandlers(this, new ClusterCommandTasks(this, this.k8), this.remoteConfigManager); } getCommandDefinition() { diff --git a/src/commands/cluster/tasks.ts b/src/commands/cluster/tasks.ts index 79c298c0e..fca930eff 100644 --- a/src/commands/cluster/tasks.ts +++ b/src/commands/cluster/tasks.ts @@ -24,11 +24,16 @@ import * as constants from '../../core/constants.js'; import path from 'path'; import chalk from 'chalk'; import {ListrLease} from '../../core/lease/listr_lease.js'; +import {type K8} from '../../core/k8.js'; +import {ListrEnquirerPromptAdapter} from '@listr2/prompt-adapter-enquirer'; export class ClusterCommandTasks { private readonly parent: BaseCommand; - constructor(parent) { + constructor( + parent, + private readonly k8: K8, + ) { this.parent = parent; } @@ -131,9 +136,9 @@ export class ClusterCommandTasks { let valuesArg = chartDir ? `-f ${path.join(chartDir, 'solo-cluster-setup', 'values.yaml')}` : ''; valuesArg += ` --set cloud.prometheusStack.enabled=${prometheusStackEnabled}`; - valuesArg += ` --set cloud.minio.enabled=${minioEnabled}`; valuesArg += ` --set cloud.certManager.enabled=${certManagerEnabled}`; valuesArg += ` --set cert-manager.installCRDs=${certManagerCrdsEnabled}`; + valuesArg += ` --set cloud.minio.enabled=${minioEnabled}`; if (certManagerEnabled && !certManagerCrdsEnabled) { this.parent.logger.showUser( @@ -246,13 +251,15 @@ export class ClusterCommandTasks { const cluster = this.parent.getK8().getKubeConfig().getCurrentCluster(); this.parent.logger.showJSON(`Cluster Information (${cluster.name})`, cluster); this.parent.logger.showUser('\n'); - } catch (e: Error | any) { + } catch (e: Error | unknown) { this.parent.logger.showUserError(e); } }); } prepareChartValues(argv) { + const self = this; + return new Task( 'Prepare chart values', async (ctx: any, task: ListrTaskWrapper) => { @@ -261,6 +268,40 @@ export class ClusterCommandTasks { constants.SOLO_TESTING_CHART_URL, constants.SOLO_CLUSTER_SETUP_CHART, ); + + // if minio is already present, don't deploy it + if (ctx.config.deployMinio && (await self.k8.isMinioInstalled(ctx.config.clusterSetupNamespace))) { + ctx.config.deployMinio = false; + } + + // if prometheus is found, don't deploy it + if ( + ctx.config.deployPrometheusStack && + !(await self.k8.isPrometheusInstalled(ctx.config.clusterSetupNamespace)) + ) { + ctx.config.deployPrometheusStack = false; + } + + // if cert manager is installed, don't deploy it + if ( + (ctx.config.deployCertManager || ctx.config.deployCertManagerCrds) && + (await self.k8.isCertManagerInstalled()) + ) { + ctx.config.deployCertManager = false; + ctx.config.deployCertManagerCrds = false; + } + + // If all are already present or not wanted, skip installation + if ( + !ctx.config.deployPrometheusStack && + !ctx.config.deployMinio && + !ctx.config.deployCertManager && + !ctx.config.deployCertManagerCrds + ) { + ctx.isChartInstalled = true; + return; + } + ctx.valuesArg = this.prepareValuesArg( ctx.config.chartDir, ctx.config.deployPrometheusStack, @@ -287,7 +328,7 @@ export class ClusterCommandTasks { await parent .getChartManager() .install(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART, ctx.chartPath, version, valuesArg); - } catch (e: Error | any) { + } catch (e: Error | unknown) { // if error, uninstall the chart and rethrow the error parent.logger.debug( `Error on installing ${constants.SOLO_CLUSTER_SETUP_CHART}. attempting to rollback by uninstalling the chart`, @@ -295,7 +336,7 @@ export class ClusterCommandTasks { ); try { await parent.getChartManager().uninstall(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); - } catch (ex) { + } catch { // ignore error during uninstall since we are doing the best-effort uninstall here } @@ -319,10 +360,28 @@ export class ClusterCommandTasks { uninstallClusterChart(argv) { const parent = this.parent; + const self = this; + return new Task( `Uninstall '${constants.SOLO_CLUSTER_SETUP_CHART}' chart`, async (ctx: any, task: ListrTaskWrapper) => { const clusterSetupNamespace = ctx.config.clusterSetupNamespace; + + if (!argv.force && (await self.k8.isRemoteConfigPresentInAnyNamespace())) { + const confirm = await task.prompt(ListrEnquirerPromptAdapter).run({ + type: 'toggle', + default: false, + message: + 'There is remote config for one of the deployments' + + 'Are you sure you would like to uninstall the cluster?', + }); + + if (!confirm) { + // eslint-disable-next-line n/no-process-exit + process.exit(0); + } + } + await parent.getChartManager().uninstall(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); if (argv.dev) { await this.showInstalledChartList(clusterSetupNamespace); diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index b028d16ab..15f1dc70c 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -128,46 +128,38 @@ export class MirrorNodeCommand extends BaseCommand { /** * @param config * @param config.tlsClusterIssuerType - must be one of - acme-staging, acme-prod, or self-signed - * @param config.enableHederaExplorerTls * @param config.namespace - used for classname ingress class name prefix * @param config.hederaExplorerTlsLoadBalancerIp - can be an empty string * @param config.hederaExplorerTlsHostName */ - private prepareSoloChartSetupValuesArg(config: MirrorNodeDeployConfigClass) { - if (!config.enableHederaExplorerTls) return ''; - - const { - tlsClusterIssuerType, - enableHederaExplorerTls, - namespace, - hederaExplorerTlsLoadBalancerIp, - hederaExplorerTlsHostName, - } = config; + private async prepareSoloChartSetupValuesArg(config: MirrorNodeDeployConfigClass) { + const {tlsClusterIssuerType, namespace, hederaExplorerTlsLoadBalancerIp, hederaExplorerTlsHostName} = config; let valuesArg = ''; - if (enableHederaExplorerTls) { - if (!['acme-staging', 'acme-prod', 'self-signed'].includes(tlsClusterIssuerType)) { - throw new Error( - `Invalid TLS cluster issuer type: ${tlsClusterIssuerType}, must be one of: "acme-staging", "acme-prod", or "self-signed"`, - ); - } + if (!['acme-staging', 'acme-prod', 'self-signed'].includes(tlsClusterIssuerType)) { + throw new Error( + `Invalid TLS cluster issuer type: ${tlsClusterIssuerType}, must be one of: "acme-staging", "acme-prod", or "self-signed"`, + ); + } + // Install ingress controller only if it's not already present + if (!(await this.k8.isIngressControllerInstalled())) { valuesArg += ' --set ingress.enabled=true'; valuesArg += ' --set haproxyIngressController.enabled=true'; valuesArg += ` --set ingressClassName=${namespace}-hedera-explorer-ingress-class`; valuesArg += ` --set-json 'ingress.hosts[0]={"host":"${hederaExplorerTlsHostName}","paths":[{"path":"/","pathType":"Prefix"}]}'`; + } - if (hederaExplorerTlsLoadBalancerIp !== '') { - valuesArg += ` --set haproxy-ingress.controller.service.loadBalancerIP=${hederaExplorerTlsLoadBalancerIp}`; - } + if (hederaExplorerTlsLoadBalancerIp !== '') { + valuesArg += ` --set haproxy-ingress.controller.service.loadBalancerIP=${hederaExplorerTlsLoadBalancerIp}`; + } - if (tlsClusterIssuerType === 'self-signed') { - valuesArg += ' --set selfSignedClusterIssuer.enabled=true'; - } else { - valuesArg += ' --set acmeClusterIssuer.enabled=true'; - valuesArg += ` --set certClusterIssuerType=${tlsClusterIssuerType}`; - } + if (tlsClusterIssuerType === 'self-signed') { + valuesArg += ' --set selfSignedClusterIssuer.enabled=true'; + } else { + valuesArg += ' --set acmeClusterIssuer.enabled=true'; + valuesArg += ` --set certClusterIssuerType=${tlsClusterIssuerType}`; } return valuesArg; @@ -314,7 +306,7 @@ export class MirrorNodeCommand extends BaseCommand { constants.SOLO_CLUSTER_SETUP_CHART, ); - const soloChartSetupValuesArg = self.prepareSoloChartSetupValuesArg(config); + const soloChartSetupValuesArg = await self.prepareSoloChartSetupValuesArg(config); await self.chartManager.upgrade( clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART, diff --git a/src/core/config/remote/remote_config_manager.ts b/src/core/config/remote/remote_config_manager.ts index 884893056..d611d357a 100644 --- a/src/core/config/remote/remote_config_manager.ts +++ b/src/core/config/remote/remote_config_manager.ts @@ -231,7 +231,7 @@ export class RemoteConfigManager { * @returns the remote configuration data. * @throws {@link SoloError} if the ConfigMap could not be read and the error is not a 404 status. */ - private async getConfigMap(): Promise { + public async getConfigMap(): Promise { try { return await this.k8.getNamespacedConfigMap(constants.SOLO_REMOTE_CONFIGMAP_NAME); } catch (error: any) { diff --git a/src/core/constants.ts b/src/core/constants.ts index 75d8c4ad8..775ab775e 100644 --- a/src/core/constants.ts +++ b/src/core/constants.ts @@ -35,6 +35,7 @@ export const ROOT_CONTAINER = 'root-container'; export const SOLO_REMOTE_CONFIGMAP_NAME = 'solo-remote-config'; export const SOLO_REMOTE_CONFIGMAP_LABELS = {'solo.hedera.com/type': 'remote-config'}; export const SOLO_REMOTE_CONFIG_MAX_COMMAND_IN_HISTORY = 50; +export const SOLO_REMOTE_CONFIGMAP_LABEL_SELECTOR = 'solo.hedera.com/type=remote-config'; // --------------- Hedera network and node related constants -------------------------------------------------------------------- export const HEDERA_CHAIN_ID = process.env.SOLO_CHAIN_ID || '298'; diff --git a/src/core/k8.ts b/src/core/k8.ts index ea7a8b6f0..ad95e4f18 100644 --- a/src/core/k8.ts +++ b/src/core/k8.ts @@ -33,11 +33,12 @@ import * as constants from './constants.js'; import {ConfigManager} from './config_manager.js'; import {SoloLogger} from './logging.js'; import {type PodName, type TarCreateFilter} from '../types/aliases.js'; -import type {ExtendedNetServer, LocalContextObject} from '../types/index.js'; +import type {ExtendedNetServer, LocalContextObject, Optional} from '../types/index.js'; import {HEDERA_HAPI_PATH, ROOT_CONTAINER, SOLO_LOGS_DIR} from './constants.js'; import {Duration} from './time/duration.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; +import type {Namespace} from './config/remote/types.js'; interface TDirectoryData { directory: boolean; @@ -65,6 +66,7 @@ export class K8 { private kubeConfig!: k8s.KubeConfig; kubeClient!: k8s.CoreV1Api; private coordinationApiClient: k8s.CoordinationV1Api; + private networkingApi: k8s.NetworkingV1Api; constructor( @inject(ConfigManager) private readonly configManager?: ConfigManager, @@ -93,6 +95,7 @@ export class K8 { } this.kubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api); + this.networkingApi = this.kubeConfig.makeApiClient(k8s.NetworkingV1Api); this.coordinationApiClient = this.kubeConfig.makeApiClient(k8s.CoordinationV1Api); return this; // to enable chaining @@ -104,7 +107,7 @@ export class K8 { * @param [filters] - an object with metadata fields and value * @returns a list of items that match the filters */ - applyMetadataFilter(items: (object | any)[], filters = {}) { + applyMetadataFilter(items: (object | any)[], filters: Record = {}) { if (!filters) throw new MissingArgumentError('filters are required'); const matched = []; @@ -135,7 +138,7 @@ export class K8 { * @param items - list of items * @param [filters] - an object with metadata fields and value */ - filterItem(items: (object | any)[], filters = {}) { + filterItem(items: (object | any)[], filters: Record = {}) { const filtered = this.applyMetadataFilter(items, filters); if (filtered.length > 1) throw new SoloError('multiple items found with filters', {filters}); return filtered[0]; @@ -171,8 +174,7 @@ export class K8 { if (resp.body && resp.body.items) { const namespaces: string[] = []; resp.body.items.forEach(item => { - // @ts-ignore - namespaces.push(item.metadata.name as string); + namespaces.push(item.metadata!.name); }); return namespaces; @@ -400,7 +402,7 @@ export class K8 { } return items; - } catch (e: Error | any) { + } catch (e) { throw new SoloError(`unable to check path in '${podName}':${containerName}' - ${destPath}: ${e.message}`, e); } } @@ -427,12 +429,10 @@ export class K8 { for (const entry of filterMap.entries()) { const field = entry[0]; const value = entry[1]; - // @ts-ignore this.logger.debug( `Checking file ${podName}:${containerName} ${destPath}; ${field} expected ${value}, found ${item[field]}`, {filters}, ); - // @ts-ignore if (`${value}` !== `${item[field]}`) { found = false; break; @@ -445,7 +445,7 @@ export class K8 { } } } - } catch (e: Error | any) { + } catch (e) { const error = new SoloError( `unable to check file in '${podName}':${containerName}' - ${destPath}: ${e.message}`, e, @@ -644,7 +644,7 @@ export class K8 { self.registerErrorStreamOnError(localContext, messagePrefix, inputPassthroughStream); }); - } catch (e: Error | any) { + } catch (e) { const errorMessage = `${messagePrefix} failed to upload file: ${e.message}`; self.logger.error(errorMessage, e); throw new SoloError(errorMessage, e); @@ -778,7 +778,7 @@ export class K8 { localContext, `${messagePrefix} files did not match, srcFileSize=${srcFileSize}, stat.size=${stat?.size}`, ); - } catch (e: Error | any) { + } catch { return self.exitWithError(localContext, `${messagePrefix} failed to complete download`); } }); @@ -789,7 +789,7 @@ export class K8 { self.registerErrorStreamOnError(localContext, messagePrefix, outputFileStream); }); - } catch (e: Error | any) { + } catch (e) { const errorMessage = `${messagePrefix}failed to download file: ${e.message}`; self.logger.error(errorMessage, e); throw new SoloError(errorMessage, e); @@ -991,7 +991,7 @@ export class K8 { if (isPortOpen) { return; } - } catch (e: Error | any) { + } catch { return; } await sleep(Duration.ofMillis(timeout)); @@ -1007,7 +1007,7 @@ export class K8 { podCount = 1, maxAttempts = constants.PODS_RUNNING_MAX_ATTEMPTS, delay = constants.PODS_RUNNING_DELAY, - podItemPredicate?: (items: k8s.V1Pod) => any, + podItemPredicate?: (items: k8s.V1Pod) => boolean, ): Promise { const ns = this._getNamespace(); const labelSelector = labels.join(','); @@ -1017,7 +1017,7 @@ export class K8 { return new Promise((resolve, reject) => { let attempts = 0; - const check = async (resolve: (items: k8s.V1Pod[]) => void, reject: (reason?: any) => void) => { + const check = async (resolve: (items: k8s.V1Pod[]) => void, reject: (reason?: Error) => void) => { // wait for the pod to be available with the given status and labels try { const resp = await this.kubeClient.listNamespacedPod( @@ -1055,7 +1055,7 @@ export class K8 { return resolve(resp.body.items); } } - } catch (e: Error | any) { + } catch (e) { this.logger.info('Error occurred while waiting for pods, retrying', e); } @@ -1084,7 +1084,7 @@ export class K8 { async waitForPodReady(labels: string[] = [], podCount = 1, maxAttempts = 10, delay = 500) { try { return await this.waitForPodConditions(K8.PodReadyCondition, labels, podCount, maxAttempts, delay); - } catch (e: Error | any) { + } catch (e: Error | unknown) { throw new SoloError(`Pod not ready [maxAttempts = ${maxAttempts}]`, e); } } @@ -1263,7 +1263,7 @@ export class K8 { namespace: string, secretType: string, data: Record, - labels: any, + labels: Optional>, recreate: boolean, ) { if (recreate) { @@ -1287,7 +1287,7 @@ export class K8 { const resp = await this.kubeClient.createNamespacedSecret(namespace, v1Secret); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create secret ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1345,7 +1345,7 @@ export class K8 { const resp = await this.kubeClient.createNamespacedConfigMap(namespace, configMap); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create configmap ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1377,7 +1377,7 @@ export class K8 { const resp = await this.kubeClient.replaceNamespacedConfigMap(name, namespace, configMap); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create configmap ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1390,7 +1390,7 @@ export class K8 { const resp = await this.kubeClient.deleteNamespacedConfigMap(name, namespace); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create configmap ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1399,6 +1399,7 @@ export class K8 { } // --------------------------------------- LEASES --------------------------------------- // + async createNamespacedLease(namespace: string, leaseName: string, holderName: string, durationSeconds = 20) { const lease = new k8s.V1Lease(); @@ -1471,6 +1472,100 @@ export class K8 { return body as k8s.V1Status; } + // --------------------------------------- Pod Identifiers --------------------------------------- // + + /** + * Check if cert-manager is installed inside any namespace. + * @returns if cert-manager is found + */ + public async isCertManagerInstalled(): Promise { + try { + const pods = await this.kubeClient.listPodForAllNamespaces(undefined, undefined, undefined, 'app=cert-manager'); + + return pods.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + /** + * Check if minio is installed inside the namespace. + * @returns if minio is found + */ + public async isMinioInstalled(namespace: Namespace): Promise { + try { + // TODO DETECT THE OPERATOR + const pods = await this.kubeClient.listNamespacedPod( + namespace, + undefined, + undefined, + undefined, + undefined, + 'app=minio', + ); + + return pods.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + /** + * Check if the ingress controller is installed inside any namespace. + * @returns if ingress controller is found + */ + public async isIngressControllerInstalled(): Promise { + try { + const response = await this.networkingApi.listIngressClass(); + + return response.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + public async isRemoteConfigPresentInAnyNamespace() { + try { + const configmaps = await this.kubeClient.listConfigMapForAllNamespaces( + undefined, + undefined, + undefined, + constants.SOLO_REMOTE_CONFIGMAP_LABEL_SELECTOR, + ); + + return configmaps.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + public async isPrometheusInstalled(namespace: Namespace) { + try { + const pods = await this.kubeClient.listNamespacedPod( + namespace, + undefined, + undefined, + undefined, + undefined, + 'app.kubernetes.io/name=prometheus', + ); + + return pods.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + /* ------------- Utilities ------------- */ /** @@ -1480,7 +1575,11 @@ export class K8 { * * @throws SoloError - if the status code is not OK */ - private handleKubernetesClientError(response: http.IncomingMessage, error: Error | any, errorMessage: string): void { + private handleKubernetesClientError( + response: http.IncomingMessage, + error: Error | unknown, + errorMessage: string, + ): void { const statusCode = +response?.statusCode || StatusCodes.INTERNAL_SERVER_ERROR; if (statusCode <= StatusCodes.ACCEPTED) return; @@ -1490,7 +1589,7 @@ export class K8 { throw new SoloError(errorMessage, errorMessage, {statusCode: statusCode}); } - private _getNamespace() { + private _getNamespace(): Namespace { const ns = this.configManager.getFlag(flags.namespace); if (!ns) throw new MissingArgumentError('namespace is not set'); return ns; @@ -1582,7 +1681,7 @@ export class K8 { ]); await this.execContainer(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${scriptName}`); await this.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/${podName}.zip`, targetDir); - } catch (e: Error | any) { + } catch (e: Error | unknown) { // not throw error here, so we can continue to finish downloading logs from other pods // and also delete namespace in the end this.logger.error(`${constants.NODE_LOG_FAILURE_MSG} ${podName}`, e); @@ -1592,7 +1691,6 @@ export class K8 { /** * Download state files from a pod - * @param k8 - an instance of core/K8 * @param namespace - the namespace of the network * @param nodeAlias - the pod name * @returns a promise that resolves when the state files are downloaded @@ -1618,7 +1716,7 @@ export class K8 { const zipCommand = `tar -czf ${HEDERA_HAPI_PATH}/${podName}-state.zip -C ${HEDERA_HAPI_PATH}/data/saved .`; await this.execContainer(podName, ROOT_CONTAINER, zipCommand); await this.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${podName}-state.zip`, targetDir); - } catch (e: Error | any) { + } catch (e: Error | unknown) { this.logger.error(`failed to download state from pod ${podName}`, e); this.logger.showUser(`Failed to download state from pod ${podName}` + e); } diff --git a/test/unit/commands/cluster.test.ts b/test/unit/commands/cluster.test.ts index db90391f1..a8a8dffef 100644 --- a/test/unit/commands/cluster.test.ts +++ b/test/unit/commands/cluster.test.ts @@ -61,7 +61,11 @@ import {stringify} from 'yaml'; const getBaseCommandOpts = () => ({ logger: sinon.stub(), helm: sinon.stub(), - k8: sinon.stub(), + k8: { + isMinioInstalled: sinon.stub().returns(false), + isPrometheusInstalled: sinon.stub().returns(false), + isCertManagerInstalled: sinon.stub().returns(false), + }, chartManager: sinon.stub(), configManager: sinon.stub(), depManager: sinon.stub(), @@ -153,6 +157,9 @@ describe('ClusterCommand unit tests', () => { {cluster: 'cluster-2', user: 'user-2', name: 'context-2', namespace: 'deployment-2'}, {cluster: 'cluster-3', user: 'user-3', name: 'context-3', namespace: 'deployment-3'}, ]); + k8Stub.isMinioInstalled.returns(new Promise(() => true)); + k8Stub.isPrometheusInstalled.returns(new Promise(() => true)); + k8Stub.isCertManagerInstalled.returns(new Promise(() => true)); const kubeConfigStub = sandbox.createStubInstance(KubeConfig); kubeConfigStub.getCurrentContext.returns('context-from-kubeConfig'); kubeConfigStub.getCurrentCluster.returns({ @@ -199,7 +206,7 @@ describe('ClusterCommand unit tests', () => { describe('updateLocalConfig', () => { async function runUpdateLocalConfigTask(opts) { command = new ClusterCommand(opts); - tasks = new ClusterCommandTasks(command); + tasks = new ClusterCommandTasks(command, opts.k8); const taskObj = tasks.updateLocalConfig({}); await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); return command; @@ -337,7 +344,7 @@ describe('ClusterCommand unit tests', () => { describe('selectContext', () => { async function runSelectContextTask(opts) { command = new ClusterCommand(opts); - tasks = new ClusterCommandTasks(command); + tasks = new ClusterCommandTasks(command, opts.k8); const taskObj = tasks.selectContext({}); await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); return command; diff --git a/test/unit/commands/network.test.ts b/test/unit/commands/network.test.ts index cf11457cc..e57f71fbf 100644 --- a/test/unit/commands/network.test.ts +++ b/test/unit/commands/network.test.ts @@ -77,6 +77,10 @@ describe('NetworkCommand unit tests', () => { opts.k8.waitForPodReady = sinon.stub(); opts.k8.waitForPods = sinon.stub(); opts.k8.readNamespacedLease = sinon.stub(); + opts.k8.isMinioInstalled = sinon.stub(); + opts.k8.isPrometheusInstalled = sinon.stub(); + opts.k8.isCertManagerInstalled = sinon.stub(); + opts.k8.logger = opts.logger; container.registerInstance(K8, opts.k8);