diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 86affe1d2..89636d2db 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -21,6 +21,7 @@ import * as path from 'node:path'; import {type Optional, type SoloListrTask} from '../types/index.js'; import * as Base64 from 'js-base64'; import {type NamespaceName} from '../core/kube/namespace_name.js'; +import {PodRef} from '../core/kube/pod_ref.js'; interface MirrorNodeDeployConfigClass { chartDirectory: string; @@ -357,8 +358,9 @@ export class MirrorNodeCommand extends BaseCommand { } const postgresPodName = PodName.of(pods[0].metadata.name); const postgresContainerName = 'postgresql'; + const postgresPodRef = PodRef.of(namespace, postgresPodName); const mirrorEnvVars = await self.k8.execContainer( - postgresPodName, + postgresPodRef, postgresContainerName, '/bin/bash -c printenv', ); @@ -376,7 +378,7 @@ export class MirrorNodeCommand extends BaseCommand { 'HEDERA_MIRROR_IMPORTER_DB_NAME', ); - await self.k8.execContainer(postgresPodName, postgresContainerName, [ + await self.k8.execContainer(postgresPodRef, postgresContainerName, [ 'psql', `postgresql://${HEDERA_MIRROR_IMPORTER_DB_OWNER}:${HEDERA_MIRROR_IMPORTER_DB_OWNERPASSWORD}@localhost:5432/${HEDERA_MIRROR_IMPORTER_DB_NAME}`, '-c', diff --git a/src/commands/node/configs.ts b/src/commands/node/configs.ts index 6187b7520..3b6c15271 100644 --- a/src/commands/node/configs.ts +++ b/src/commands/node/configs.ts @@ -16,6 +16,7 @@ import {type PodName} from '../../core/kube/pod_name.js'; import {type NetworkNodeServices} from '../../core/network_node_services.js'; import {type NodeAddConfigClass} from './node_add_config.js'; import {type NamespaceName} from '../../core/kube/namespace_name.js'; +import {type PodRef} from '../../core/kube/pod_ref.js'; export const PREPARE_UPGRADE_CONFIGS_NAME = 'prepareUpgradeConfig'; export const DOWNLOAD_GENERATED_FILES_CONFIGS_NAME = 'downloadGeneratedFilesConfig'; @@ -83,7 +84,7 @@ export const upgradeConfigBuilder = async function (argv, ctx, task, shouldLoadN 'existingNodeAliases', 'keysDir', 'nodeClient', - 'podNames', + 'podRefs', 'stagingDir', 'stagingKeysDir', ]) as NodeUpgradeConfigClass; @@ -118,7 +119,7 @@ export const updateConfigBuilder = async function (argv, ctx, task, shouldLoadNo 'freezeAdminPrivateKey', 'keysDir', 'nodeClient', - 'podNames', + 'podRefs', 'serviceMap', 'stagingDir', 'stagingKeysDir', @@ -161,7 +162,7 @@ export const deleteConfigBuilder = async function (argv, ctx, task, shouldLoadNo 'freezeAdminPrivateKey', 'keysDir', 'nodeClient', - 'podNames', + 'podRefs', 'serviceMap', 'stagingDir', 'stagingKeysDir', @@ -206,7 +207,7 @@ export const addConfigBuilder = async function (argv, ctx, task, shouldLoadNodeC 'keysDir', 'lastStateZipPath', 'nodeClient', - 'podNames', + 'podRefs', 'serviceMap', 'stagingDir', 'stagingKeysDir', @@ -269,7 +270,7 @@ export const statesConfigBuilder = function (argv, ctx, task) { }; export const refreshConfigBuilder = async function (argv, ctx, task) { - ctx.config = this.getConfig(REFRESH_CONFIGS_NAME, argv.flags, ['nodeAliases', 'podNames']) as NodeRefreshConfigClass; + ctx.config = this.getConfig(REFRESH_CONFIGS_NAME, argv.flags, ['nodeAliases', 'podRefs']) as NodeRefreshConfigClass; ctx.config.nodeAliases = helpers.parseNodeAliases(ctx.config.nodeAliasesUnparsed); @@ -323,7 +324,7 @@ export const startConfigBuilder = async function (argv, ctx, task) { }; export const setupConfigBuilder = async function (argv, ctx, task) { - const config = this.getConfig(SETUP_CONFIGS_NAME, argv.flags, ['nodeAliases', 'podNames']) as NodeSetupConfigClass; + const config = this.getConfig(SETUP_CONFIGS_NAME, argv.flags, ['nodeAliases', 'podRefs']) as NodeSetupConfigClass; config.nodeAliases = helpers.parseNodeAliases(config.nodeAliasesUnparsed); @@ -349,7 +350,7 @@ export interface NodeRefreshConfigClass { nodeAliasesUnparsed: string; releaseTag: string; nodeAliases: NodeAliases; - podNames: Record; + podRefs: Record; getUnusedConfigs: () => string[]; } @@ -365,12 +366,6 @@ export interface NodeKeysConfigClass { getUnusedConfigs: () => string[]; } -export interface NodeStopConfigClass { - namespace: string; - nodeAliases: NodeAliases; - podNames: Record; -} - export interface NodeStartConfigClass { app: string; cacheDir: string; @@ -378,7 +373,7 @@ export interface NodeStartConfigClass { namespace: string; nodeAliases: NodeAliases; stagingDir: string; - podNames: Record; + podRefs: Record; nodeAliasesUnparsed: string; } @@ -401,7 +396,7 @@ export interface NodeDeleteConfigClass { freezeAdminPrivateKey: string; keysDir: string; nodeClient: any; - podNames: Record; + podRefs: Record; serviceMap: Map; stagingDir: string; stagingKeysDir: string; @@ -421,7 +416,7 @@ export interface NodeSetupConfigClass { nodeAliasesUnparsed: string; releaseTag: string; nodeAliases: NodeAliases; - podNames: object; + podRefs: Record; getUnusedConfigs: () => string[]; } @@ -444,7 +439,7 @@ export interface NodeUpgradeConfigClass { freezeAdminPrivateKey: PrivateKey | string; keysDir: string; nodeClient: any; - podNames: Record; + podRefs: Record; stagingDir: string; stagingKeysDir: string; treasuryKey: PrivateKey; @@ -479,7 +474,7 @@ export interface NodeUpdateConfigClass { freezeAdminPrivateKey: PrivateKey | string; keysDir: string; nodeClient: any; - podNames: Record; + podRefs: Record; serviceMap: Map; stagingDir: string; stagingKeysDir: string; diff --git a/src/commands/node/helper.ts b/src/commands/node/helper.ts index 31c4af993..781d2252d 100644 --- a/src/commands/node/helper.ts +++ b/src/commands/node/helper.ts @@ -42,7 +42,7 @@ export class NodeHelper { config.existingNodeAliases = ctxData.existingNodeAliases; config.allNodeAliases = ctxData.existingNodeAliases; ctx.upgradeZipHash = ctxData.upgradeZipHash; - config.podNames = {}; + config.podRefs = {}; } /** @@ -106,7 +106,7 @@ export class NodeHelper { config.existingNodeAliases = ctxData.existingNodeAliases; config.allNodeAliases = ctxData.allNodeAliases; ctx.upgradeZipHash = ctxData.upgradeZipHash; - config.podNames = {}; + config.podRefs = {}; } /** @@ -135,6 +135,6 @@ export class NodeHelper { config.gossipPrivateKey = ctxData.gossipPrivateKey; config.allNodeAliases = ctxData.allNodeAliases; ctx.upgradeZipHash = ctxData.upgradeZipHash; - config.podNames = {}; + config.podRefs = {}; } } diff --git a/src/commands/node/node_add_config.ts b/src/commands/node/node_add_config.ts index c8af249e1..c1c69d57a 100644 --- a/src/commands/node/node_add_config.ts +++ b/src/commands/node/node_add_config.ts @@ -2,7 +2,7 @@ * SPDX-License-Identifier: Apache-2.0 */ import {type NodeAlias, type NodeAliases} from '../../types/aliases.js'; -import {type PodName} from '../../core/kube/pod_name.js'; +import {type PodRef} from '../../core/kube/pod_ref.js'; import {type NetworkNodeServices} from '../../core/network_node_services.js'; import {type PrivateKey} from '@hashgraph/sdk'; @@ -32,7 +32,7 @@ export interface NodeAddConfigClass { keysDir: string; lastStateZipPath: string; nodeClient: any; - podNames: Record; + podRefs: Record; serviceMap: Map; treasuryKey: PrivateKey; stagingDir: string; diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts index 75d093f81..aef5c44da 100644 --- a/src/commands/node/tasks.ts +++ b/src/commands/node/tasks.ts @@ -62,6 +62,7 @@ import {type BaseCommand} from '../base.js'; import {type NodeAddConfigClass} from './node_add_config.js'; import {GenesisNetworkDataConstructor} from '../../core/genesis_network_models/genesis_network_data_constructor.js'; import {type NamespaceName} from '../../core/kube/namespace_name.js'; +import {PodRef} from '../../core/kube/pod_ref.js'; export class NodeCommandTasks { private readonly accountManager: AccountManager; @@ -187,7 +188,7 @@ export class NodeCommandTasks { _uploadPlatformSoftware( nodeAliases: NodeAliases, - podNames: any, + podRefs: Record, task: ListrTaskWrapper, localBuildPath: string, ) { @@ -209,7 +210,7 @@ export class NodeCommandTasks { let localDataLibBuildPath: string; for (const nodeAlias of nodeAliases) { - const podName = podNames[nodeAlias]; + const podRef = podRefs[nodeAlias]; if (buildPathMap.has(nodeAlias)) { localDataLibBuildPath = buildPathMap.get(nodeAlias); } else { @@ -229,7 +230,7 @@ export class NodeCommandTasks { return !(path.includes('data/keys') || path.includes('data/config')); }; await self.k8.copyTo( - podName, + podRef, constants.ROOT_CONTAINER, localDataLibBuildPath, `${constants.HEDERA_HAPI_PATH}`, @@ -239,7 +240,7 @@ export class NodeCommandTasks { const testJsonFiles: string[] = this.configManager.getFlag(flags.appConfig)!.split(','); for (const jsonFile of testJsonFiles) { if (fs.existsSync(jsonFile)) { - await self.k8.copyTo(podName, constants.ROOT_CONTAINER, jsonFile, `${constants.HEDERA_HAPI_PATH}`); + await self.k8.copyTo(podRef, constants.ROOT_CONTAINER, jsonFile, `${constants.HEDERA_HAPI_PATH}`); } } } @@ -255,17 +256,17 @@ export class NodeCommandTasks { _fetchPlatformSoftware( nodeAliases: NodeAliases, - podNames: Record, + podRefs: Record, releaseTag: string, task: ListrTaskWrapper, platformInstaller: PlatformInstaller, ) { const subTasks = []; for (const nodeAlias of nodeAliases) { - const podName = podNames[nodeAlias]; + const podRef = podRefs[nodeAlias]; subTasks.push({ title: `Update node: ${chalk.yellow(nodeAlias)} [ platformVersion = ${releaseTag} ]`, - task: async () => await platformInstaller.fetchPlatform(podName, releaseTag), + task: async () => await platformInstaller.fetchPlatform(podRef, releaseTag), }); } @@ -303,7 +304,7 @@ export class NodeCommandTasks { if (enableDebugger) { await sleep(Duration.ofHours(1)); } - ctx.config.podNames[nodeAlias] = await this._checkNetworkNodeActiveness( + ctx.config.podRefs[nodeAlias] = await this._checkNetworkNodeActiveness( namespace, nodeAlias, task, @@ -334,9 +335,10 @@ export class NodeCommandTasks { maxAttempts = constants.NETWORK_NODE_ACTIVE_MAX_ATTEMPTS, delay = constants.NETWORK_NODE_ACTIVE_DELAY, timeout = constants.NETWORK_NODE_ACTIVE_TIMEOUT, - ) { + ): Promise { nodeAlias = nodeAlias.trim() as NodeAlias; const podName = Templates.renderNetworkPodName(nodeAlias); + const podRef = PodRef.of(namespace, podName); task.title = `${title} - status ${chalk.yellow('STARTING')}, attempt ${chalk.blueBright(`0/${maxAttempts}`)}`; let attempt = 0; @@ -350,7 +352,7 @@ export class NodeCommandTasks { }, timeout); try { - const response = await this.k8.execContainer(podName, constants.ROOT_CONTAINER, [ + const response = await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'bash', '-c', 'curl -s http://localhost:9999/metrics | grep platform_PlatformStatus | grep -v \\#', @@ -404,7 +406,7 @@ export class NodeCommandTasks { await sleep(Duration.ofSeconds(2)); // delaying prevents - gRPC service error - return podName; + return podRef; } /** Return task for check if node proxies are ready */ @@ -673,10 +675,11 @@ export class NodeCommandTasks { : config.existingNodeAliases[0]; const nodeFullyQualifiedPodName = Templates.renderNetworkPodName(nodeAlias); + const podRef = PodRef.of(config.namespace, nodeFullyQualifiedPodName); // copy the config.txt file from the node1 upgrade directory await self.k8.copyFrom( - nodeFullyQualifiedPodName, + podRef, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/config.txt`, config.stagingDir, @@ -684,20 +687,20 @@ export class NodeCommandTasks { // if directory data/upgrade/current/data/keys does not exist, then use data/upgrade/current let keyDir = `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/data/keys`; - if (!(await self.k8.hasDir(nodeFullyQualifiedPodName, constants.ROOT_CONTAINER, keyDir))) { + if (!(await self.k8.hasDir(podRef, constants.ROOT_CONTAINER, keyDir))) { keyDir = `${constants.HEDERA_HAPI_PATH}/data/upgrade/current`; } - const signedKeyFiles = ( - await self.k8.listDir(nodeFullyQualifiedPodName, constants.ROOT_CONTAINER, keyDir) - ).filter(file => file.name.startsWith(constants.SIGNING_KEY_PREFIX)); - await self.k8.execContainer(nodeFullyQualifiedPodName, constants.ROOT_CONTAINER, [ + const signedKeyFiles = (await self.k8.listDir(podRef, constants.ROOT_CONTAINER, keyDir)).filter(file => + file.name.startsWith(constants.SIGNING_KEY_PREFIX), + ); + await self.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'bash', '-c', `mkdir -p ${constants.HEDERA_HAPI_PATH}/data/keys_backup && cp -r ${keyDir} ${constants.HEDERA_HAPI_PATH}/data/keys_backup/`, ]); for (const signedKeyFile of signedKeyFiles) { await self.k8.copyFrom( - nodeFullyQualifiedPodName, + podRef, constants.ROOT_CONTAINER, `${keyDir}/${signedKeyFile.name}`, `${config.keysDir}`, @@ -706,13 +709,13 @@ export class NodeCommandTasks { if ( await self.k8.hasFile( - nodeFullyQualifiedPodName, + podRef, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/application.properties`, ) ) { await self.k8.copyFrom( - nodeFullyQualifiedPodName, + podRef, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/application.properties`, `${config.stagingDir}/templates`, @@ -731,6 +734,7 @@ export class NodeCommandTasks { const nodeAlias = ctx.config.nodeAliases[0]; const nodeFullyQualifiedPodName = Templates.renderNetworkPodName(nodeAlias); + const podRef = PodRef.of(config.namespace, nodeFullyQualifiedPodName); // found all files under ${constants.HEDERA_HAPI_PATH}/data/upgrade/current/ const upgradeDirectories = [ @@ -740,10 +744,10 @@ export class NodeCommandTasks { ]; for (const upgradeDir of upgradeDirectories) { // check if directory upgradeDir exist in root container - if (!(await self.k8.hasDir(nodeFullyQualifiedPodName, constants.ROOT_CONTAINER, upgradeDir))) { + if (!(await self.k8.hasDir(podRef, constants.ROOT_CONTAINER, upgradeDir))) { continue; } - const files = await self.k8.listDir(nodeFullyQualifiedPodName, constants.ROOT_CONTAINER, upgradeDir); + const files = await self.k8.listDir(podRef, constants.ROOT_CONTAINER, upgradeDir); // iterate all files and copy them to the staging directory for (const file of files) { if (file.name.endsWith('.mf')) { @@ -754,7 +758,7 @@ export class NodeCommandTasks { } this.logger.debug(`Copying file: ${file.name}`); await self.k8.copyFrom( - nodeFullyQualifiedPodName, + podRef, constants.ROOT_CONTAINER, `${upgradeDir}/${file.name}`, `${config.stagingDir}`, @@ -773,7 +777,7 @@ export class NodeCommandTasks { ): Listr { if (!ctx.config) ctx.config = {}; - ctx.config.podNames = {}; + ctx.config.podRefs = {}; const subTasks = []; const self = this; @@ -782,7 +786,7 @@ export class NodeCommandTasks { title: `Check network pod: ${chalk.yellow(nodeAlias)}`, task: async (ctx: any) => { try { - ctx.config.podNames[nodeAlias] = await self.checkNetworkNodePod( + ctx.config.podRefs[nodeAlias] = await self.checkNetworkNodePod( ctx.config.namespace, nodeAlias, maxAttempts, @@ -805,13 +809,14 @@ export class NodeCommandTasks { /** Check if the network node pod is running */ async checkNetworkNodePod( - namespace: string, + namespace: NamespaceName, nodeAlias: NodeAlias, maxAttempts = constants.PODS_RUNNING_MAX_ATTEMPTS, delay = constants.PODS_RUNNING_DELAY, ) { nodeAlias = nodeAlias.trim() as NodeAlias; const podName = Templates.renderNetworkPodName(nodeAlias); + const podRef = PodRef.of(namespace, podName); try { await this.k8.waitForPods( @@ -822,7 +827,7 @@ export class NodeCommandTasks { delay, ); - return podName; + return podRef; } catch (e: Error | any) { throw new SoloError(`no pod found for nodeAlias: ${nodeAlias}`, e); } @@ -852,19 +857,19 @@ export class NodeCommandTasks { const zipFile = config.stateFile; self.logger.debug(`zip file: ${zipFile}`); for (const nodeAlias of ctx.config.nodeAliases) { - const podName = ctx.config.podNames[nodeAlias]; - self.logger.debug(`Uploading state files to pod ${podName}`); - await self.k8.copyTo(podName, constants.ROOT_CONTAINER, zipFile, `${constants.HEDERA_HAPI_PATH}/data`); + const podRef = ctx.config.podRefs[nodeAlias]; + self.logger.debug(`Uploading state files to pod ${podRef.podName.name}`); + await self.k8.copyTo(podRef, constants.ROOT_CONTAINER, zipFile, `${constants.HEDERA_HAPI_PATH}/data`); self.logger.info( - `Deleting the previous state files in pod ${podName} directory ${constants.HEDERA_HAPI_PATH}/data/saved`, + `Deleting the previous state files in pod ${podRef.podName.name} directory ${constants.HEDERA_HAPI_PATH}/data/saved`, ); - await self.k8.execContainer(podName, constants.ROOT_CONTAINER, [ + await self.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'rm', '-rf', `${constants.HEDERA_HAPI_PATH}/data/saved/*`, ]); - await self.k8.execContainer(podName, constants.ROOT_CONTAINER, [ + await self.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'tar', '-xvf', `${constants.HEDERA_HAPI_PATH}/data/${path.basename(zipFile)}`, @@ -887,19 +892,22 @@ export class NodeCommandTasks { fetchPlatformSoftware(aliasesField: string) { const self = this; return new Task('Fetch platform software into network nodes', (ctx: any, task: ListrTaskWrapper) => { - const {podNames, releaseTag, localBuildPath} = ctx.config; + const {podRefs, releaseTag, localBuildPath} = ctx.config; if (localBuildPath !== '') { - return self._uploadPlatformSoftware(ctx.config[aliasesField], podNames, task, localBuildPath); + return self._uploadPlatformSoftware(ctx.config[aliasesField], podRefs, task, localBuildPath); } - return self._fetchPlatformSoftware(ctx.config[aliasesField], podNames, releaseTag, task, this.platformInstaller); + return self._fetchPlatformSoftware(ctx.config[aliasesField], podRefs, releaseTag, task, this.platformInstaller); }); } populateServiceMap() { return new Task('Populate serviceMap', async (ctx: any, task: ListrTaskWrapper) => { ctx.config.serviceMap = await this.accountManager.getNodeServiceMap(ctx.config.namespace); - ctx.config.podNames[ctx.config.nodeAlias] = ctx.config.serviceMap.get(ctx.config.nodeAlias).nodePodName; + ctx.config.podRefs[ctx.config.nodeAlias] = PodRef.of( + ctx.config.namespace, + ctx.config.serviceMap.get(ctx.config.nodeAlias).nodePodName, + ); }); } @@ -916,10 +924,10 @@ export class NodeCommandTasks { const subTasks = []; for (const nodeAlias of ctx.config[nodeAliasesProperty]) { - const podName = ctx.config.podNames[nodeAlias]; + const podRef = ctx.config.podRefs[nodeAlias]; subTasks.push({ title: `Node: ${chalk.yellow(nodeAlias)}`, - task: () => this.platformInstaller.taskSetup(podName, ctx.config.stagingDir, isGenesis), + task: () => this.platformInstaller.taskSetup(podRef, ctx.config.stagingDir, isGenesis), }); } @@ -997,11 +1005,11 @@ export class NodeCommandTasks { // ctx.config.allNodeAliases = ctx.config.existingNodeAliases for (const nodeAlias of nodeAliases) { - const podName = config.podNames[nodeAlias]; + const podRef = config.podRefs[nodeAlias]; subTasks.push({ title: `Start node: ${chalk.yellow(nodeAlias)}`, task: async () => { - await this.k8.execContainer(podName, constants.ROOT_CONTAINER, ['systemctl', 'restart', 'network-node']); + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, ['systemctl', 'restart', 'network-node']); }, }); } @@ -1021,9 +1029,9 @@ export class NodeCommandTasks { return new Task( 'Enable port forwarding for JVM debugger', async (ctx: any, task: ListrTaskWrapper) => { - const podName = PodName.of(`network-${ctx.config.debugNodeAlias}-0`); - this.logger.debug(`Enable port forwarding for JVM debugger on pod ${podName.name}`); - await this.k8.portForward(podName, constants.JVM_DEBUG_PORT, constants.JVM_DEBUG_PORT); + const podRef = PodRef.of(ctx.config.namespace, PodName.of(`network-${ctx.config.debugNodeAlias}-0`)); + this.logger.debug(`Enable port forwarding for JVM debugger on pod ${podRef.podName.name}`); + await this.k8.portForward(podRef, constants.JVM_DEBUG_PORT, constants.JVM_DEBUG_PORT); }, (ctx: any) => !ctx.config.debugNodeAlias, ); @@ -1146,11 +1154,11 @@ export class NodeCommandTasks { if (!ctx.config.skipStop) { await this.accountManager.close(); for (const nodeAlias of ctx.config.nodeAliases) { - const podName = ctx.config.podNames[nodeAlias]; + const podRef = ctx.config.podRefs[nodeAlias]; subTasks.push({ title: `Stop node: ${chalk.yellow(nodeAlias)}`, task: async () => - await this.k8.execContainer(podName, constants.ROOT_CONTAINER, 'systemctl stop network-node'), + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, 'systemctl stop network-node'), }); } } @@ -1179,11 +1187,11 @@ export class NodeCommandTasks { const config: NodeRefreshConfigClass = ctx.config; const subTasks = []; for (const nodeAlias of config.nodeAliases) { - const podName = config.podNames[nodeAlias]; + const podRef = config.podRefs[nodeAlias]; subTasks.push({ title: `Node: ${chalk.yellow(nodeAlias)}`, task: async () => - await this.k8.execContainer(podName, constants.ROOT_CONTAINER, [ + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'bash', '-c', `rm -rf ${constants.HEDERA_HAPI_PATH}/data/saved/*`, @@ -1551,7 +1559,7 @@ export class NodeCommandTasks { return new Task('Kill nodes', async (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; for (const service of config.serviceMap.values()) { - await this.k8.killPod(service.nodePodName, config.namespace); + await this.k8.killPod(PodRef.of(config.namespace, service.nodePodName)); } }); } @@ -1565,15 +1573,15 @@ export class NodeCommandTasks { config.serviceMap = await this.accountManager.getNodeServiceMap(config.namespace); for (const service of config.serviceMap.values()) { - await this.k8.killPod(service.nodePodName, config.namespace); + await this.k8.killPod(PodRef.of(config.namespace, service.nodePodName)); } // again, the pod names will change after the pods are killed config.serviceMap = await this.accountManager.getNodeServiceMap(config.namespace); - config.podNames = {}; + config.podRefs = {}; for (const service of config.serviceMap.values()) { - config.podNames[service.nodeAlias] = service.nodePodName; + config.podRefs[service.nodeAlias] = PodRef.of(service.namespace, service.nodePodName); } }, ); @@ -1617,19 +1625,15 @@ export class NodeCommandTasks { return new Task('Download last state from an existing node', async (ctx, task) => { const config = ctx.config; const node1FullyQualifiedPodName = Templates.renderNetworkPodName(config.existingNodeAliases[0]); + const podRef = PodRef.of(config.namespace, node1FullyQualifiedPodName); const upgradeDirectory = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/0/123`; // zip the contents of the newest folder on node1 within /opt/hgcapp/services-hedera/HapiApp2.0/data/saved/com.hedera.services.ServicesMain/0/123/ - const zipFileName = await this.k8.execContainer(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, [ + const zipFileName = await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'bash', '-c', `cd ${upgradeDirectory} && mapfile -t states < <(ls -1t .) && jar cf "\${states[0]}.zip" -C "\${states[0]}" . && echo -n \${states[0]}.zip`, ]); - await this.k8.copyFrom( - node1FullyQualifiedPodName, - constants.ROOT_CONTAINER, - `${upgradeDirectory}/${zipFileName}`, - config.stagingDir, - ); + await this.k8.copyFrom(podRef, constants.ROOT_CONTAINER, `${upgradeDirectory}/${zipFileName}`, config.stagingDir); config.lastStateZipPath = path.join(config.stagingDir, zipFileName); }); } @@ -1640,22 +1644,14 @@ export class NodeCommandTasks { async (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; const newNodeFullyQualifiedPodName = Templates.renderNetworkPodName(config.nodeAlias); + const podRef = PodRef.of(config.namespace, newNodeFullyQualifiedPodName); const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias); const savedStateDir = config.lastStateZipPath.match(/\/(\d+)\.zip$/)[1]; const savedStatePath = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/${nodeId}/123/${savedStateDir}`; - await this.k8.execContainer(newNodeFullyQualifiedPodName, constants.ROOT_CONTAINER, [ - 'bash', - '-c', - `mkdir -p ${savedStatePath}`, - ]); - await this.k8.copyTo( - newNodeFullyQualifiedPodName, - constants.ROOT_CONTAINER, - config.lastStateZipPath, - savedStatePath, - ); - await this.platformInstaller.setPathPermission(newNodeFullyQualifiedPodName, constants.HEDERA_HAPI_PATH); - await this.k8.execContainer(newNodeFullyQualifiedPodName, constants.ROOT_CONTAINER, [ + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, ['bash', '-c', `mkdir -p ${savedStatePath}`]); + await this.k8.copyTo(podRef, constants.ROOT_CONTAINER, config.lastStateZipPath, savedStatePath); + await this.platformInstaller.setPathPermission(podRef, constants.HEDERA_HAPI_PATH); + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, [ 'bash', '-c', `cd ${savedStatePath} && jar xf ${path.basename(config.lastStateZipPath)} && rm -f ${path.basename(config.lastStateZipPath)}`, diff --git a/src/core/account_manager.ts b/src/core/account_manager.ts index ecf288dd1..abfad1d24 100644 --- a/src/core/account_manager.ts +++ b/src/core/account_manager.ts @@ -38,6 +38,7 @@ import {Duration} from './time/duration.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; import {type NamespaceName} from './kube/namespace_name.js'; +import {PodRef} from './kube/pod_ref.js'; const REASON_FAILED_TO_GET_KEYS = 'failed to get keys for accountId'; const REASON_SKIPPED = 'skipped since it does not have a genesis key'; @@ -337,7 +338,13 @@ export class AccountManager { const targetPort = localPort; if (this._portForwards.length < totalNodes) { - this._portForwards.push(await this.k8.portForward(networkNodeService.haProxyPodName, localPort, port)); + this._portForwards.push( + await this.k8.portForward( + PodRef.of(networkNodeService.namespace, networkNodeService.haProxyPodName), + localPort, + port, + ), + ); } this.logger.debug(`using local host port forward: ${host}:${targetPort}`); diff --git a/src/core/kube/k8.ts b/src/core/kube/k8.ts index b3972c2bc..a6454f256 100644 --- a/src/core/kube/k8.ts +++ b/src/core/kube/k8.ts @@ -3,7 +3,7 @@ */ import type * as k8s from '@kubernetes/client-node'; import {type TarCreateFilter} from '../../types/aliases.js'; -import {type PodName} from './pod_name.js'; +import {type PodRef} from './pod_ref.js'; import {type ExtendedNetServer, type Optional} from '../../types/index.js'; import {type TDirectoryData} from './t_directory_data.js'; import {type V1Lease} from '@kubernetes/client-node'; @@ -46,10 +46,10 @@ export interface K8 { hasNamespace(namespace: NamespaceName): Promise; /** - * Get a podName by name - * @param name - podName name + * Get a pod by PodRef + * @param podRef - the pod reference */ - getPodByName(name: PodName): Promise; + getPodByName(podRef: PodRef): Promise; /** * Get pods by labels @@ -98,46 +98,46 @@ export interface K8 { * name: config.txt * }] * - * @param podName - * @param containerName + * @param podRef - the pod reference + * @param containerName - the container name * @param destPath - path inside the container * @returns a promise that returns array of directory entries, custom object */ - listDir(podName: PodName, containerName: string, destPath: string): Promise; + listDir(podRef: PodRef, containerName: string, destPath: string): Promise; /** * Check if a filepath exists in the container - * @param podName - * @param containerName + * @param podRef - the pod reference + * @param containerName - the container name * @param destPath - path inside the container * @param [filters] - an object with metadata fields and value */ - hasFile(podName: PodName, containerName: string, destPath: string, filters?: object): Promise; + hasFile(podRef: PodRef, containerName: string, destPath: string, filters?: object): Promise; /** * Check if a directory path exists in the container - * @param podName - * @param containerName + * @param podRef - the pod reference + * @param containerName - the container name * @param destPath - path inside the container */ - hasDir(podName: PodName, containerName: string, destPath: string): Promise; + hasDir(podRef: PodRef, containerName: string, destPath: string): Promise; - mkdir(podName: PodName, containerName: string, destPath: string): Promise; + mkdir(podRef: PodRef, containerName: string, destPath: string): Promise; /** * Copy a file into a container * * It overwrites any existing file inside the container at the destination directory * - * @param podName - * @param containerName + * @param podRef - the pod reference + * @param containerName - the container name * @param srcPath - source file path in the local * @param destDir - destination directory in the container * @param [filter] - the filter to pass to tar to keep or skip files or directories * @returns a Promise that performs the copy operation */ copyTo( - podName: PodName, + podRef: PodRef, containerName: string, srcPath: string, destDir: string, @@ -149,29 +149,32 @@ export interface K8 { * * It overwrites any existing file at the destination directory * - * @param podName - * @param containerName + * @param podRef - the pod reference + * @param containerName - the container name * @param srcPath - source file path in the container * @param destDir - destination directory in the local */ - copyFrom(podName: PodName, containerName: string, srcPath: string, destDir: string): Promise; + copyFrom(podRef: PodRef, containerName: string, srcPath: string, destDir: string): Promise; /** * Invoke sh command within a container and return the console output as string - * @param podName - * @param containerName + * @param podRef - the pod reference + * @param containerName - the container name * @param command - sh commands as an array to be run within the containerName (e.g 'ls -la /opt/hgcapp') * @returns console output as string */ - execContainer(podName: PodName, containerName: string, command: string | string[]): Promise; + execContainer(podRef: PodRef, containerName: string, command: string | string[]): Promise; /** * Port forward a port from a pod to localhost * * This simple server just forwards traffic from itself to a service running in kubernetes * -> localhost:localPort -> port-forward-tunnel -> kubernetes-pod:targetPort + * @param podRef - the pod reference + * @param localPort - the local port to forward to + * @param podPort - the pod port to forward from */ - portForward(podName: PodName, localPort: number, podPort: number): Promise; + portForward(podRef: PodRef, localPort: number, podPort: number): Promise; /** * Stop the port forwarder server @@ -346,10 +349,9 @@ export interface K8 { /** * Get a pod by name and namespace, will check every 1 second until the pod is no longer found. * Can throw a SoloError if there is an error while deleting the pod. - * @param podName - the name of the pod - * @param namespace - the namespace of the pod + * @param podRef - the pod reference */ - killPod(podName: PodName, namespace: NamespaceName): Promise; + killPod(podRef: PodRef): Promise; /** * Download logs files from all network pods and save to local solo log directory diff --git a/src/core/kube/k8_client.ts b/src/core/kube/k8_client.ts index 682128699..ae871ba39 100644 --- a/src/core/kube/k8_client.ts +++ b/src/core/kube/k8_client.ts @@ -32,6 +32,7 @@ import {type Namespaces} from './namespaces.js'; import {NamespaceName} from './namespace_name.js'; import K8ClientClusters from './k8_client/k8_client_clusters.js'; import {type Clusters} from './clusters.js'; +import {PodRef} from './pod_ref.js'; /** * A kubernetes API wrapper class providing custom functionalities required by solo @@ -182,9 +183,9 @@ export class K8Client implements K8 { return namespaces.some(namespaces => namespaces.equals(namespace)); } - public async getPodByName(podName: PodName): Promise { - const ns = this.getNamespace(); - const fieldSelector = `metadata.name=${podName.name}`; + public async getPodByName(podRef: PodRef): Promise { + const ns = podRef.namespaceName; + const fieldSelector = `metadata.name=${podRef.podName.name}`; const resp = await this.kubeClient.listNamespacedPod( ns.name, undefined, @@ -199,7 +200,7 @@ export class K8Client implements K8 { Duration.ofMinutes(5).toMillis(), ); - return this.filterItem(resp.body.items, {name: podName.name}); + return this.filterItem(resp.body.items, {name: podRef.podName.name}); } public async getPodsByLabel(labels: string[] = []) { @@ -284,7 +285,7 @@ export class K8Client implements K8 { return this.cachedContexts; } - public async listDir(podName: PodName, containerName: string, destPath: string) { + public async listDir(podRef: PodRef, containerName: string, destPath: string) { // TODO future, return the following // return this.pods.byName(podName).listDir(containerName, destPath); // byName(podName) can use an underlying cache to avoid multiple calls to the API @@ -305,7 +306,7 @@ export class K8Client implements K8 { // below implementation moves to K8Pod class, current usage would still compile. try { - const output = (await this.execContainer(podName, containerName, ['ls', '-la', destPath])) as string; + const output = (await this.execContainer(podRef, containerName, ['ls', '-la', destPath])) as string; if (!output) return []; // parse the output and return the entries @@ -339,17 +340,20 @@ export class K8Client implements K8 { return items; } catch (e) { - throw new SoloError(`unable to check path in '${podName}':${containerName}' - ${destPath}: ${e.message}`, e); + throw new SoloError( + `unable to check path in '${podRef.podName.name}':${containerName}' - ${destPath}: ${e.message}`, + e, + ); } } - public async hasFile(podName: PodName, containerName: string, destPath: string, filters: object = {}) { + public async hasFile(podRef: PodRef, containerName: string, destPath: string, filters: object = {}) { const parentDir = path.dirname(destPath); const fileName = path.basename(destPath); const filterMap = new Map(Object.entries(filters)); try { - const entries = await this.listDir(podName, containerName, parentDir); + const entries = await this.listDir(podRef, containerName, parentDir); for (const item of entries) { if (item.name === fileName && !item.directory) { @@ -359,7 +363,7 @@ export class K8Client implements K8 { const field = entry[0]; const value = entry[1]; this.logger.debug( - `Checking file ${podName}:${containerName} ${destPath}; ${field} expected ${value}, found ${item[field]}`, + `Checking file ${podRef.podName.name}:${containerName} ${destPath}; ${field} expected ${value}, found ${item[field]}`, {filters}, ); if (`${value}` !== `${item[field]}`) { @@ -369,14 +373,14 @@ export class K8Client implements K8 { } if (found) { - this.logger.debug(`File check succeeded ${podName}:${containerName} ${destPath}`, {filters}); + this.logger.debug(`File check succeeded ${podRef.podName.name}:${containerName} ${destPath}`, {filters}); return true; } } } } catch (e) { const error = new SoloError( - `unable to check file in '${podName}':${containerName}' - ${destPath}: ${e.message}`, + `unable to check file in '${podRef.podName.name}':${containerName}' - ${destPath}: ${e.message}`, e, ); this.logger.error(error.message, error); @@ -386,9 +390,9 @@ export class K8Client implements K8 { return false; } - public async hasDir(podName: PodName, containerName: string, destPath: string) { + public async hasDir(podRef: PodRef, containerName: string, destPath: string) { return ( - (await this.execContainer(podName, containerName, [ + (await this.execContainer(podRef, containerName, [ 'bash', '-c', '[[ -d "' + destPath + '" ]] && echo -n "true" || echo -n "false"', @@ -396,8 +400,8 @@ export class K8Client implements K8 { ); } - public mkdir(podName: PodName, containerName: string, destPath: string) { - return this.execContainer(podName, containerName, ['bash', '-c', 'mkdir -p "' + destPath + '"']); + public mkdir(podRef: PodRef, containerName: string, destPath: string) { + return this.execContainer(podRef, containerName, ['bash', '-c', 'mkdir -p "' + destPath + '"']); } private exitWithError(localContext: LocalContextObject, errorMessage: string) { @@ -476,22 +480,22 @@ export class K8Client implements K8 { } public async copyTo( - podName: PodName, + podRef: PodRef, containerName: string, srcPath: string, destDir: string, filter: TarCreateFilter | undefined = undefined, ) { const self = this; - const namespace = this.getNamespace(); + const namespace = podRef.namespaceName; const guid = uuid4(); - const messagePrefix = `copyTo[${podName},${guid}]: `; + const messagePrefix = `copyTo[${podRef.podName.name},${guid}]: `; - if (!(await self.getPodByName(podName))) throw new IllegalArgumentError(`Invalid pod ${podName}`); + if (!(await self.getPodByName(podRef))) throw new IllegalArgumentError(`Invalid pod ${podRef.podName.name}`); self.logger.info(`${messagePrefix}[srcPath=${srcPath}, destDir=${destDir}]`); - if (!(await this.hasDir(podName, containerName, destDir))) { + if (!(await this.hasDir(podRef, containerName, destDir))) { throw new SoloError(`invalid destination path: ${destDir}`); } @@ -523,7 +527,7 @@ export class K8Client implements K8 { execInstance .exec( namespace.name, - podName.name, + podRef.podName.name, containerName, command, null, @@ -565,18 +569,18 @@ export class K8Client implements K8 { } } - public async copyFrom(podName: PodName, containerName: string, srcPath: string, destDir: string) { + public async copyFrom(podRef: PodRef, containerName: string, srcPath: string, destDir: string) { const self = this; - const namespace = self.getNamespace(); + const namespace = podRef.namespaceName; const guid = uuid4(); - const messagePrefix = `copyFrom[${podName},${guid}]: `; + const messagePrefix = `copyFrom[${podRef.podName.name},${guid}]: `; - if (!(await self.getPodByName(podName))) throw new IllegalArgumentError(`Invalid pod ${podName}`); + if (!(await self.getPodByName(podRef))) throw new IllegalArgumentError(`Invalid pod ${podRef.podName.name}`); self.logger.info(`${messagePrefix}[srcPath=${srcPath}, destDir=${destDir}]`); // get stat for source file in the container - let entries = await self.listDir(podName, containerName, srcPath); + let entries = await self.listDir(podRef, containerName, srcPath); if (entries.length !== 1) { throw new SoloError(`${messagePrefix}invalid source path: ${srcPath}`); } @@ -586,7 +590,7 @@ export class K8Client implements K8 { path.dirname(srcPath), entries[0].name.substring(entries[0].name.indexOf(' -> ') + 4), ); - entries = await self.listDir(podName, containerName, redirectSrcPath); + entries = await self.listDir(podRef, containerName, redirectSrcPath); if (entries.length !== 1) { throw new SoloError(`${messagePrefix}invalid source path: ${redirectSrcPath}`); } @@ -631,7 +635,7 @@ export class K8Client implements K8 { execInstance .exec( namespace.name, - podName.name, + podRef.podName.name, containerName, command, outputFileStream, @@ -698,13 +702,13 @@ export class K8Client implements K8 { } } - public async execContainer(podName: PodName, containerName: string, command: string | string[]) { + public async execContainer(podRef: PodRef, containerName: string, command: string | string[]) { const self = this; - const namespace = self.getNamespace(); + const namespace = podRef.namespaceName; const guid = uuid4(); - const messagePrefix = `execContainer[${podName},${guid}]:`; + const messagePrefix = `execContainer[${podRef.podName.name},${guid}]:`; - if (!(await self.getPodByName(podName))) throw new IllegalArgumentError(`Invalid pod ${podName}`); + if (!(await self.getPodByName(podRef))) throw new IllegalArgumentError(`Invalid pod ${podRef.podName.name}`); if (!command) throw new MissingArgumentError('command cannot be empty'); if (!Array.isArray(command)) { @@ -717,7 +721,7 @@ export class K8Client implements K8 { const localContext = {} as LocalContextObject; localContext.reject = reject; const execInstance = new k8s.Exec(self.kubeConfig); - const tmpFile = self.tempFileFor(`${podName}-output.txt`); + const tmpFile = self.tempFileFor(`${podRef.podName.name}-output.txt`); const outputFileStream = fs.createWriteStream(tmpFile); const outputPassthroughStream = new stream.PassThrough({highWaterMark: 10 * 1024 * 1024}); const errPassthroughStream = new stream.PassThrough(); @@ -737,7 +741,7 @@ export class K8Client implements K8 { execInstance .exec( namespace.name, - podName.name, + podRef.podName.name, containerName, command, outputFileStream, @@ -777,22 +781,24 @@ export class K8Client implements K8 { }); } - public async portForward(podName: PodName, localPort: number, podPort: number) { + public async portForward(podRef: PodRef, localPort: number, podPort: number) { try { - this.logger.debug(`Creating port-forwarder for ${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`); - const ns = this.getNamespace(); + this.logger.debug( + `Creating port-forwarder for ${podRef.podName.name}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`, + ); + const ns = podRef.namespaceName; const forwarder = new k8s.PortForward(this.kubeConfig, false); const server = (await net.createServer(socket => { - forwarder.portForward(ns.name, podName.name, [podPort], socket, null, socket, 3); + forwarder.portForward(ns.name, podRef.podName.name, [podPort], socket, null, socket, 3); })) as ExtendedNetServer; // add info for logging - server.info = `${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`; + server.info = `${podRef.podName.name}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`; server.localPort = localPort; this.logger.debug(`Starting port-forwarder [${server.info}]`); return server.listen(localPort, constants.LOCAL_HOST); } catch (e) { - const message = `failed to start port-forwarder [${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}]: ${e.message}`; + const message = `failed to start port-forwarder [${podRef.podName.name}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}]: ${e.message}`; this.logger.error(message, e); throw new SoloError(message, e); } @@ -1467,17 +1473,23 @@ export class K8Client implements K8 { } } - public async killPod(podName: PodName, namespace: NamespaceName) { + public async killPod(podRef: PodRef) { try { - const result = await this.kubeClient.deleteNamespacedPod(podName.name, namespace.name, undefined, undefined, 1); + const result = await this.kubeClient.deleteNamespacedPod( + podRef.podName.name, + podRef.namespaceName.name, + undefined, + undefined, + 1, + ); if (result.response.statusCode !== StatusCodes.OK) { throw new SoloError( - `Failed to delete pod ${podName} in namespace ${namespace}: statusCode: ${result.response.statusCode}`, + `Failed to delete pod ${podRef.podName.name} in namespace ${podRef.namespaceName.name}: statusCode: ${result.response.statusCode}`, ); } let podExists = true; while (podExists) { - const pod = await this.getPodByName(podName); + const pod = await this.getPodByName(podRef); if (!pod?.metadata?.deletionTimestamp) { podExists = false; } else { @@ -1485,7 +1497,7 @@ export class K8Client implements K8 { } } } catch (e) { - const errorMessage = `Failed to delete pod ${podName} in namespace ${namespace}: ${e.message}`; + const errorMessage = `Failed to delete pod ${podRef.podName.name} in namespace ${podRef.namespaceName.name}: ${e.message}`; if (e.body?.code === StatusCodes.NOT_FOUND || e.response?.body?.code === StatusCodes.NOT_FOUND) { this.logger.info(`Pod not found: ${errorMessage}`, e); return; @@ -1508,39 +1520,39 @@ export class K8Client implements K8 { const promises = []; for (const pod of pods) { - promises.push(this.getNodeLog(pod, namespace.name, timeString)); + promises.push(this.getNodeLog(pod, namespace, timeString)); } return await Promise.all(promises); } - private async getNodeLog(pod: V1Pod, namespace: string, timeString: string) { - const podName = PodName.of(pod.metadata!.name); + private async getNodeLog(pod: V1Pod, namespace: NamespaceName, timeString: string) { + const podRef = PodRef.of(namespace, PodName.of(pod.metadata!.name)); this.logger.debug(`getNodeLogs(${pod.metadata.name}): begin...`); - const targetDir = path.join(SOLO_LOGS_DIR, namespace, timeString); + const targetDir = path.join(SOLO_LOGS_DIR, namespace.name, timeString); try { if (!fs.existsSync(targetDir)) { fs.mkdirSync(targetDir, {recursive: true}); } const scriptName = 'support-zip.sh'; const sourcePath = path.join(constants.RESOURCES_DIR, scriptName); // script source path - await this.copyTo(podName, ROOT_CONTAINER, sourcePath, `${HEDERA_HAPI_PATH}`); + await this.copyTo(podRef, ROOT_CONTAINER, sourcePath, `${HEDERA_HAPI_PATH}`); await sleep(Duration.ofSeconds(3)); // wait for the script to sync to the file system - await this.execContainer(podName, ROOT_CONTAINER, [ + await this.execContainer(podRef, ROOT_CONTAINER, [ 'bash', '-c', `sync ${HEDERA_HAPI_PATH} && sudo chown hedera:hedera ${HEDERA_HAPI_PATH}/${scriptName}`, ]); - await this.execContainer(podName, ROOT_CONTAINER, [ + await this.execContainer(podRef, ROOT_CONTAINER, [ 'bash', '-c', `sudo chmod 0755 ${HEDERA_HAPI_PATH}/${scriptName}`, ]); - await this.execContainer(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${scriptName}`); - await this.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/${podName}.zip`, targetDir); + await this.execContainer(podRef, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${scriptName}`); + await this.copyFrom(podRef, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/${podRef.podName.name}.zip`, targetDir); } catch (e: Error | unknown) { // not throw error here, so we can continue to finish downloading logs from other pods // and also delete namespace in the end - this.logger.error(`${constants.NODE_LOG_FAILURE_MSG} ${podName}`, e); + this.logger.error(`${constants.NODE_LOG_FAILURE_MSG} ${podRef}`, e); } this.logger.debug(`getNodeLogs(${pod.metadata.name}): ...end`); } @@ -1560,25 +1572,25 @@ export class K8Client implements K8 { // get length of pods const promises = []; for (const pod of pods) { - promises.push(this.getNodeState(pod, namespace.name)); + promises.push(this.getNodeState(pod, namespace)); } return await Promise.all(promises); } - public async getNodeState(pod: V1Pod, namespace: string) { - const podName = PodName.of(pod.metadata!.name); + public async getNodeState(pod: V1Pod, namespace: NamespaceName) { + const podRef = PodRef.of(namespace, PodName.of(pod.metadata!.name)); this.logger.debug(`getNodeState(${pod.metadata.name}): begin...`); - const targetDir = path.join(SOLO_LOGS_DIR, namespace); + const targetDir = path.join(SOLO_LOGS_DIR, namespace.name); try { if (!fs.existsSync(targetDir)) { fs.mkdirSync(targetDir, {recursive: true}); } - const zipCommand = `tar -czf ${HEDERA_HAPI_PATH}/${podName}-state.zip -C ${HEDERA_HAPI_PATH}/data/saved .`; - await this.execContainer(podName, ROOT_CONTAINER, zipCommand); - await this.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${podName}-state.zip`, targetDir); + const zipCommand = `tar -czf ${HEDERA_HAPI_PATH}/${podRef.podName.name}-state.zip -C ${HEDERA_HAPI_PATH}/data/saved .`; + await this.execContainer(podRef, ROOT_CONTAINER, zipCommand); + await this.copyFrom(podRef, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${podRef.podName.name}-state.zip`, targetDir); } catch (e: Error | unknown) { - this.logger.error(`failed to download state from pod ${podName}`, e); - this.logger.showUser(`Failed to download state from pod ${podName}` + e); + this.logger.error(`failed to download state from pod ${podRef.podName.name}`, e); + this.logger.showUser(`Failed to download state from pod ${podRef.podName.name}` + e); } this.logger.debug(`getNodeState(${pod.metadata.name}): ...end`); } diff --git a/src/core/kube/kube_errors.ts b/src/core/kube/kube_errors.ts index de91fbae7..2ea0ee767 100644 --- a/src/core/kube/kube_errors.ts +++ b/src/core/kube/kube_errors.ts @@ -6,8 +6,10 @@ import {SoloError} from '../errors.js'; const RFC_1123_POSTFIX = (prefix: string) => `${prefix} is invalid, must be a valid RFC-1123 DNS label. \` + "A DNS 1123 label must consist of lower case alphanumeric characters, '-' " + "or '.', and must start and end with an alphanumeric character.`; + export class NamespaceNameInvalidError extends SoloError { public static NAMESPACE_NAME_INVALID = (name: string) => RFC_1123_POSTFIX(`Namespace name '${name}'`); + /** * Instantiates a new error with a message and an optional cause. * @@ -34,3 +36,31 @@ export class PodNameInvalidError extends SoloError { super(PodNameInvalidError.POD_NAME_INVALID(podName), cause, meta); } } + +export class MissingNamespaceNameError extends SoloError { + public static MISSING_NAMESPACE_NAME = 'Namespace name is required.'; + + /** + * Instantiates a new error with a message and an optional cause. + * + * @param cause - optional underlying cause of the error. + * @param meta - optional metadata to be reported. + */ + public constructor(cause: Error | any = {}, meta: any = {}) { + super(MissingNamespaceNameError.MISSING_NAMESPACE_NAME, cause, meta); + } +} + +export class MissingPodNameError extends SoloError { + public static MISSING_POD_NAME = 'Pod name is required.'; + + /** + * Instantiates a new error with a message and an optional cause. + * + * @param cause - optional underlying cause of the error. + * @param meta - optional metadata to be reported. + */ + public constructor(cause: Error | any = {}, meta: any = {}) { + super(MissingPodNameError.MISSING_POD_NAME, cause, meta); + } +} diff --git a/src/core/kube/pod_ref.ts b/src/core/kube/pod_ref.ts new file mode 100644 index 000000000..ccedb1626 --- /dev/null +++ b/src/core/kube/pod_ref.ts @@ -0,0 +1,51 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + */ +import {MissingNamespaceNameError, MissingPodNameError} from './kube_errors.js'; +import {type PodName} from './pod_name.js'; +import {type NamespaceName} from './namespace_name.js'; + +/** + * Represents a Kubernetes pod reference which includes the namespace name and pod name. + */ +export class PodRef { + private constructor( + public readonly namespaceName: NamespaceName, + public readonly podName: PodName, + ) { + if (!namespaceName) { + throw new MissingNamespaceNameError(); + } + if (!podName) { + throw new MissingPodNameError(); + } + } + + /** + * Creates a pod reference. + * @param namespace The namespace name. + * @param podName The pod name. + */ + public static of(namespace: NamespaceName, podName: PodName): PodRef { + return new PodRef(namespace, podName); + } + + /** + * Compares this instance with another PodRef. + * @param other The other PodRef instance. + * @returns true if both instances have the same namespace name and pod name. + */ + public equals(other: PodRef): boolean { + return ( + other instanceof PodRef && this.namespaceName.equals(other.namespaceName) && this.podName.equals(other.podName) + ); + } + + /** + * Allows implicit conversion to a string. + * @returns The pod reference as a string. + */ + public toString(): string { + return `{namespaceName: ${this.namespaceName}, podName: ${this.podName}}`; + } +} diff --git a/src/core/platform_installer.ts b/src/core/platform_installer.ts index b50a0cf50..f09877fd1 100644 --- a/src/core/platform_installer.ts +++ b/src/core/platform_installer.ts @@ -15,12 +15,12 @@ import chalk from 'chalk'; import {SoloLogger} from './logging.js'; import {type NodeAlias, type NodeAliases} from '../types/aliases.js'; -import {type PodName} from './kube/pod_name.js'; import {Duration} from './time/duration.js'; import {sleep} from './helpers.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; import {type NamespaceName} from './kube/namespace_name.js'; +import {type PodRef} from './kube/pod_ref.js'; /** PlatformInstaller install platform code in the root-container of a network pod */ @injectable() @@ -81,24 +81,24 @@ export class PlatformInstaller { } /** Fetch and extract platform code into the container */ - async fetchPlatform(podName: PodName, tag: string) { - if (!podName) throw new MissingArgumentError('podName is required'); + async fetchPlatform(podRef: PodRef, tag: string) { + if (!podRef) throw new MissingArgumentError('podRef is required'); if (!tag) throw new MissingArgumentError('tag is required'); try { const scriptName = 'extract-platform.sh'; const sourcePath = path.join(constants.RESOURCES_DIR, scriptName); // script source path - await this.copyFiles(podName, [sourcePath], constants.HEDERA_USER_HOME_DIR); + await this.copyFiles(podRef, [sourcePath], constants.HEDERA_USER_HOME_DIR); // wait a few seconds before calling the script to avoid "No such file" error await sleep(Duration.ofSeconds(2)); const extractScript = path.join(constants.HEDERA_USER_HOME_DIR, scriptName); // inside the container - await this.k8.execContainer(podName, constants.ROOT_CONTAINER, `chmod +x ${extractScript}`); - await this.k8.execContainer(podName, constants.ROOT_CONTAINER, [extractScript, tag]); + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, `chmod +x ${extractScript}`); + await this.k8.execContainer(podRef, constants.ROOT_CONTAINER, [extractScript, tag]); return true; } catch (e: Error | any) { - const message = `failed to extract platform code in this pod '${podName.name}': ${e.message}`; + const message = `failed to extract platform code in this pod '${podRef.podName.name}': ${e.message}`; this.logger.error(message, e); throw new SoloError(message, e); } @@ -106,13 +106,13 @@ export class PlatformInstaller { /** * Copy a list of files to a directory in the container - * @param podName + * @param podRef - pod reference * @param srcFiles - list of source files * @param destDir - destination directory * @param [container] - name of the container * @returns a list of pathso of the copied files insider the container */ - async copyFiles(podName: PodName, srcFiles: string[], destDir: string, container = constants.ROOT_CONTAINER) { + async copyFiles(podRef: PodRef, srcFiles: string[], destDir: string, container = constants.ROOT_CONTAINER) { try { const copiedFiles: string[] = []; @@ -122,12 +122,12 @@ export class PlatformInstaller { throw new SoloError(`file does not exist: ${srcPath}`); } - if (!(await this.k8.hasDir(podName, container, destDir))) { - await this.k8.mkdir(podName, container, destDir); + if (!(await this.k8.hasDir(podRef, container, destDir))) { + await this.k8.mkdir(podRef, container, destDir); } - this.logger.debug(`Copying file into ${podName}: ${srcPath} -> ${destDir}`); - await this.k8.copyTo(podName, container, srcPath, destDir); + this.logger.debug(`Copying file into ${podRef.podName.name}: ${srcPath} -> ${destDir}`); + await this.k8.copyTo(podRef, container, srcPath, destDir); const fileName = path.basename(srcPath); copiedFiles.push(path.join(destDir, fileName)); @@ -135,7 +135,7 @@ export class PlatformInstaller { return copiedFiles; } catch (e: Error | any) { - throw new SoloError(`failed to copy files to pod '${podName.name}': ${e.message}`, e); + throw new SoloError(`failed to copy files to pod '${podRef.podName.name}': ${e.message}`, e); } } @@ -225,22 +225,22 @@ export class PlatformInstaller { } async setPathPermission( - podName: PodName, + podRef: PodRef, destPath: string, mode = '0755', recursive = true, container = constants.ROOT_CONTAINER, ) { - if (!podName) throw new MissingArgumentError('podName is required'); + if (!podRef) throw new MissingArgumentError('podRef is required'); if (!destPath) throw new MissingArgumentError('destPath is required'); const recursiveFlag = recursive ? '-R' : ''; - await this.k8.execContainer(podName, container, [ + await this.k8.execContainer(podRef, container, [ 'bash', '-c', `chown ${recursiveFlag} hedera:hedera ${destPath} 2>/dev/null || true`, ]); - await this.k8.execContainer(podName, container, [ + await this.k8.execContainer(podRef, container, [ 'bash', '-c', `chmod ${recursiveFlag} ${mode} ${destPath} 2>/dev/null || true`, @@ -249,35 +249,35 @@ export class PlatformInstaller { return true; } - async setPlatformDirPermissions(podName: PodName) { + async setPlatformDirPermissions(podRef: PodRef) { const self = this; - if (!podName) throw new MissingArgumentError('podName is required'); + if (!podRef) throw new MissingArgumentError('podRef is required'); try { const destPaths = [constants.HEDERA_HAPI_PATH, constants.HEDERA_HGCAPP_DIR]; for (const destPath of destPaths) { - await self.setPathPermission(podName, destPath); + await self.setPathPermission(podRef, destPath); } return true; } catch (e: Error | any) { - throw new SoloError(`failed to set permission in '${podName}'`, e); + throw new SoloError(`failed to set permission in '${podRef.podName.name}'`, e); } } /** Return a list of task to perform node directory setup */ - taskSetup(podName: PodName, stagingDir: string, isGenesis: boolean) { + taskSetup(podRef: PodRef, stagingDir: string, isGenesis: boolean) { const self = this; return new Listr( [ { title: 'Copy configuration files', - task: async () => await self.copyConfigurationFiles(stagingDir, podName, isGenesis), + task: async () => await self.copyConfigurationFiles(stagingDir, podRef, isGenesis), }, { title: 'Set file permissions', - task: async () => await self.setPlatformDirPermissions(podName), + task: async () => await self.setPlatformDirPermissions(podRef), }, ], { @@ -292,14 +292,14 @@ export class PlatformInstaller { /** * Copy configuration files to the network consensus node pod * @param stagingDir - staging directory path - * @param podName - network consensus node pod name + * @param podRef - pod reference * @param isGenesis - true if this is `solo node setup` and we are at genesis * @private */ - private async copyConfigurationFiles(stagingDir: string, podName: PodName, isGenesis: boolean) { + private async copyConfigurationFiles(stagingDir: string, podRef: PodRef, isGenesis: boolean) { if (isGenesis) { const genesisNetworkJson = [path.join(stagingDir, 'genesis-network.json')]; - await this.copyFiles(podName, genesisNetworkJson, `${constants.HEDERA_HAPI_PATH}/data/config`); + await this.copyFiles(podRef, genesisNetworkJson, `${constants.HEDERA_HAPI_PATH}/data/config`); } } diff --git a/test/e2e/commands/mirror_node.test.ts b/test/e2e/commands/mirror_node.test.ts index 8f9a05e5d..856935c5e 100644 --- a/test/e2e/commands/mirror_node.test.ts +++ b/test/e2e/commands/mirror_node.test.ts @@ -23,6 +23,7 @@ import {PackageDownloader} from '../../../src/core/package_downloader.js'; import {Duration} from '../../../src/core/time/duration.js'; import {ExplorerCommand} from '../../../src/commands/explorer.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const testName = 'mirror-cmd-e2e'; const namespace = NamespaceName.of(testName); @@ -105,7 +106,7 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin const pods = await k8.getPodsByLabel(['app.kubernetes.io/component=hedera-explorer']); const explorerPod = pods[0]; - portForwarder = await k8.portForward(PodName.of(explorerPod.metadata.name), 8_080, 8_080); + portForwarder = await k8.portForward(PodRef.of(namespace, PodName.of(explorerPod.metadata.name)), 8_080, 8_080); await sleep(Duration.ofSeconds(2)); // check if mirror node api server is running diff --git a/test/e2e/commands/network.test.ts b/test/e2e/commands/network.test.ts index 1fcc867d3..2561f8ff0 100644 --- a/test/e2e/commands/network.test.ts +++ b/test/e2e/commands/network.test.ts @@ -15,6 +15,7 @@ import {Flags as flags} from '../../../src/commands/flags.js'; import {Duration} from '../../../src/core/time/duration.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; import {PodName} from '../../../src/core/kube/pod_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; describe('NetworkCommand', () => { const testName = 'network-cmd-e2e'; @@ -70,10 +71,9 @@ describe('NetworkCommand', () => { expect(await networkCmd.deploy(argv)).to.be.true; // check pod names should match expected values - await expect(k8.getPodByName(PodName.of('network-node1-0'))).eventually.to.have.nested.property( - 'metadata.name', - 'network-node1-0', - ); + await expect( + k8.getPodByName(PodRef.of(namespace, PodName.of('network-node1-0'))), + ).eventually.to.have.nested.property('metadata.name', 'network-node1-0'); // get list of pvc using k8 listPvcsByNamespace function and print to log const pvcs = await k8.listPvcsByNamespace(namespace); networkCmd.logger.showList('PVCs', pvcs); diff --git a/test/e2e/commands/node_delete.test.ts b/test/e2e/commands/node_delete.test.ts index 05137f74f..96be85bed 100644 --- a/test/e2e/commands/node_delete.test.ts +++ b/test/e2e/commands/node_delete.test.ts @@ -19,6 +19,7 @@ import {PodName} from '../../../src/core/kube/pod_name.js'; import * as NodeCommandConfigs from '../../../src/commands/node/configs.js'; import {Duration} from '../../../src/core/time/duration.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const namespace = NamespaceName.of('node-delete'); const deleteNodeAlias = 'node1'; @@ -81,7 +82,7 @@ e2eTestSuite( const pods = await k8.getPodsByLabel(['solo.hedera.com/type=network-node']); const podName = PodName.of(pods[0].metadata.name); const tmpDir = getTmpDir(); - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); + await k8.copyFrom(PodRef.of(namespace, podName), ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); expect(configTxt).not.to.contain(deleteNodeAlias); diff --git a/test/e2e/commands/node_update.test.ts b/test/e2e/commands/node_update.test.ts index 480ccc499..8fc7e2220 100644 --- a/test/e2e/commands/node_update.test.ts +++ b/test/e2e/commands/node_update.test.ts @@ -21,6 +21,7 @@ import {PodName} from '../../../src/core/kube/pod_name.js'; import * as NodeCommandConfigs from '../../../src/commands/node/configs.js'; import {Duration} from '../../../src/core/time/duration.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const defaultTimeout = Duration.ofMinutes(2).toMillis(); const namespace = NamespaceName.of('node-update'); @@ -71,12 +72,7 @@ e2eTestSuite( it('cache current version of private keys', async () => { existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); }).timeout(defaultTimeout); it('should succeed with init command', async () => { @@ -118,12 +114,7 @@ e2eTestSuite( accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); it('signing key and tls key should not match previous one', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); for (const [nodeAlias, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeAlias); @@ -150,7 +141,7 @@ e2eTestSuite( const pods = await k8.getPodsByLabel(['solo.hedera.com/type=network-node']); const podName = PodName.of(pods[0].metadata.name); const tmpDir = getTmpDir(); - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); + await k8.copyFrom(PodRef.of(namespace, podName), ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); diff --git a/test/e2e/commands/node_upgrade.test.ts b/test/e2e/commands/node_upgrade.test.ts index 9fab88967..af8bb90d9 100644 --- a/test/e2e/commands/node_upgrade.test.ts +++ b/test/e2e/commands/node_upgrade.test.ts @@ -13,6 +13,7 @@ import {PodName} from '../../../src/core/kube/pod_name.js'; import fs from 'fs'; import {Zippy} from '../../../src/core/zippy.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const namespace = NamespaceName.of('node-upgrade'); const argv = getDefaultArgv(); @@ -86,7 +87,12 @@ e2eTestSuite( const tmpDir = getTmpDir(); const pods = await k8.getPodsByLabel(['solo.hedera.com/type=network-node']); const podName = PodName.of(pods[0].metadata.name); - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/upgrade/current/version.txt`, tmpDir); + await k8.copyFrom( + PodRef.of(namespace, podName), + ROOT_CONTAINER, + `${HEDERA_HAPI_PATH}/data/upgrade/current/version.txt`, + tmpDir, + ); // compare the version.txt const version = fs.readFileSync(`${tmpDir}/version.txt`, 'utf8'); diff --git a/test/e2e/commands/separate_node_add.test.ts b/test/e2e/commands/separate_node_add.test.ts index 2246045f7..7bd253bb6 100644 --- a/test/e2e/commands/separate_node_add.test.ts +++ b/test/e2e/commands/separate_node_add.test.ts @@ -76,12 +76,7 @@ e2eTestSuite( it('cache current version of private keys', async () => { // @ts-ignore existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); }).timeout(defaultTimeout); it('should succeed with init command', async () => { @@ -112,12 +107,7 @@ e2eTestSuite( accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); it('existing nodes private keys should not have changed', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); for (const [nodeAlias, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeAlias); diff --git a/test/e2e/commands/separate_node_delete.test.ts b/test/e2e/commands/separate_node_delete.test.ts index 1b14ba564..0d9d8814c 100644 --- a/test/e2e/commands/separate_node_delete.test.ts +++ b/test/e2e/commands/separate_node_delete.test.ts @@ -20,6 +20,7 @@ import {PodName} from '../../../src/core/kube/pod_name.js'; import * as NodeCommandConfigs from '../../../src/commands/node/configs.js'; import {Duration} from '../../../src/core/time/duration.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const namespace = NamespaceName.of('node-delete-separate'); const nodeAlias = 'node1' as NodeAlias; @@ -93,8 +94,9 @@ e2eTestSuite( // read config.txt file from first node, read config.txt line by line, it should not contain value of nodeAlias const pods = await k8.getPodsByLabel(['solo.hedera.com/type=network-node']); const podName = PodName.of(pods[0].metadata.name); + const podRef = PodRef.of(namespace, podName); const tmpDir = getTmpDir(); - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); + await k8.copyFrom(podRef, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); expect(configTxt).not.to.contain(nodeAlias); diff --git a/test/e2e/commands/separate_node_update.test.ts b/test/e2e/commands/separate_node_update.test.ts index c0f60db51..009c5f7a2 100644 --- a/test/e2e/commands/separate_node_update.test.ts +++ b/test/e2e/commands/separate_node_update.test.ts @@ -21,6 +21,7 @@ import {PodName} from '../../../src/core/kube/pod_name.js'; import * as NodeCommandConfigs from '../../../src/commands/node/configs.js'; import {Duration} from '../../../src/core/time/duration.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const defaultTimeout = Duration.ofMinutes(2).toMillis(); const namespace = NamespaceName.of('node-update-separate'); @@ -71,12 +72,7 @@ e2eTestSuite( it('cache current version of private keys', async () => { existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); }).timeout(Duration.ofMinutes(8).toMillis()); it('should succeed with init command', async () => { @@ -129,12 +125,7 @@ e2eTestSuite( accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); it('signing key and tls key should not match previous one', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); for (const [nodeAlias, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeAlias); @@ -160,8 +151,9 @@ e2eTestSuite( // read config.txt file from first node, read config.txt line by line, it should not contain value of newAccountId const pods = await k8.getPodsByLabel(['solo.hedera.com/type=network-node']); const podName = PodName.of(pods[0].metadata.name); + const podRef = PodRef.of(namespace, podName); const tmpDir = getTmpDir(); - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); + await k8.copyFrom(podRef, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); diff --git a/test/e2e/commands/separate_node_upgrade.test.ts b/test/e2e/commands/separate_node_upgrade.test.ts index 4ffcd2136..e34f01409 100644 --- a/test/e2e/commands/separate_node_upgrade.test.ts +++ b/test/e2e/commands/separate_node_upgrade.test.ts @@ -13,6 +13,7 @@ import {PodName} from '../../../src/core/kube/pod_name.js'; import fs from 'fs'; import {Zippy} from '../../../src/core/zippy.js'; import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; const namespace = NamespaceName.of('node-upgrade'); const argv = getDefaultArgv(); @@ -92,7 +93,8 @@ e2eTestSuite( const tmpDir = getTmpDir(); const pods = await k8.getPodsByLabel(['solo.hedera.com/type=network-node']); const podName = PodName.of(pods[0].metadata.name); - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/upgrade/current/version.txt`, tmpDir); + const podRef = PodRef.of(namespace, podName); + await k8.copyFrom(podRef, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/upgrade/current/version.txt`, tmpDir); // compare the version.txt const version = fs.readFileSync(`${tmpDir}/version.txt`, 'utf8'); diff --git a/test/e2e/e2e_node_util.ts b/test/e2e/e2e_node_util.ts index 7211e8b85..af27aab5a 100644 --- a/test/e2e/e2e_node_util.ts +++ b/test/e2e/e2e_node_util.ts @@ -24,6 +24,7 @@ import {Duration} from '../../src/core/time/duration.js'; import {container} from 'tsyringe-neo'; import {NamespaceName} from '../../src/core/kube/namespace_name.js'; import {PodName} from '../../src/core/kube/pod_name.js'; +import {PodRef} from '../../src/core/kube/pod_ref.js'; export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag = HEDERA_PLATFORM_VERSION_TAG) { const namespace = NamespaceName.of(testName); @@ -100,7 +101,7 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag const podName = await nodeRefreshTestSetup(argv, testName, k8, nodeAlias); if (mode === 'kill') { - await k8.killPod(podName, namespace); + await k8.killPod(PodRef.of(namespace, podName)); } else if (mode === 'stop') { expect(await nodeCmd.handlers.stop(argv)).to.be.true; await sleep(Duration.ofSeconds(20)); // give time for node to stop and update its logs @@ -124,7 +125,7 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag it(`${nodeAlias} should be running`, async () => { try { // @ts-ignore to access tasks which is a private property - expect((await nodeCmd.tasks.checkNetworkNodePod(namespace, nodeAlias)).name).to.equal( + expect((await nodeCmd.tasks.checkNetworkNodePod(namespace, nodeAlias)).podName.name).to.equal( `network-${nodeAlias}-0`, ); } catch (e) { diff --git a/test/e2e/integration/core/account_manager.test.ts b/test/e2e/integration/core/account_manager.test.ts index dc0867748..11059e5dc 100644 --- a/test/e2e/integration/core/account_manager.test.ts +++ b/test/e2e/integration/core/account_manager.test.ts @@ -12,6 +12,7 @@ import {Duration} from '../../../../src/core/time/duration.js'; import {type K8} from '../../../../src/core/kube/k8.js'; import {type AccountManager} from '../../../../src/core/account_manager.js'; import {NamespaceName} from '../../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../../src/core/kube/pod_ref.js'; const namespace = NamespaceName.of('account-mngr-e2e'); const argv = getDefaultArgv(); @@ -56,6 +57,7 @@ e2eTestSuite( const localHost = '127.0.0.1'; const podName = PodName.of('minio-console'); // use a svc that is less likely to be used by other tests + const podRef: PodRef = PodRef.of(namespace, podName); const podPort = 9_090; const localPort = 19_090; @@ -67,7 +69,7 @@ e2eTestSuite( // ports should be opened // @ts-expect-error - TS2341: Property _portForwards is private and only accessible within class AccountManager - accountManager._portForwards.push(await k8.portForward(podName, localPort, podPort)); + accountManager._portForwards.push(await k8.portForward(podRef, localPort, podPort)); // ports should be closed await accountManager.close(); diff --git a/test/e2e/integration/core/k8_e2e.test.ts b/test/e2e/integration/core/k8_e2e.test.ts index acf9854bd..c16befaa1 100644 --- a/test/e2e/integration/core/k8_e2e.test.ts +++ b/test/e2e/integration/core/k8_e2e.test.ts @@ -36,20 +36,15 @@ import {Duration} from '../../../../src/core/time/duration.js'; import {container} from 'tsyringe-neo'; import {type K8Client} from '../../../../src/core/kube/k8_client.js'; import {NamespaceName} from '../../../../src/core/kube/namespace_name.js'; +import {PodRef} from '../../../../src/core/kube/pod_ref.js'; const defaultTimeout = Duration.ofMinutes(2).toMillis(); -async function createPod( - podName: PodName, - containerName: string, - podLabelValue: string, - testNamespace: NamespaceName, - k8: K8Client, -): Promise { +async function createPod(podRef: PodRef, containerName: string, podLabelValue: string, k8: K8Client): Promise { const v1Pod = new V1Pod(); const v1Metadata = new V1ObjectMeta(); - v1Metadata.name = podName.name; - v1Metadata.namespace = testNamespace.name; + v1Metadata.name = podRef.podName.name; + v1Metadata.namespace = podRef.namespaceName.name; v1Metadata.labels = {app: podLabelValue}; v1Pod.metadata = v1Metadata; const v1Container = new V1Container(); @@ -64,7 +59,7 @@ async function createPod( const v1Spec = new V1PodSpec(); v1Spec.containers = [v1Container]; v1Pod.spec = v1Spec; - await k8.kubeClient.createNamespacedPod(testNamespace.name, v1Pod); + await k8.kubeClient.createNamespacedPod(podRef.namespaceName.name, v1Pod); } describe('K8', () => { @@ -74,6 +69,7 @@ describe('K8', () => { const testNamespace = NamespaceName.of('k8-e2e'); const argv = []; const podName = PodName.of(`test-pod-${uuid4()}`); + const podRef = PodRef.of(testNamespace, podName); const containerName = 'alpine'; const podLabelValue = `test-${uuid4()}`; const serviceName = `test-service-${uuid4()}`; @@ -86,7 +82,7 @@ describe('K8', () => { if (!(await k8.hasNamespace(testNamespace))) { await k8.createNamespace(testNamespace); } - await createPod(podName, containerName, podLabelValue, testNamespace, k8); + await createPod(podRef, containerName, podLabelValue, k8); const v1Svc = new V1Service(); const v1SvcMetadata = new V1ObjectMeta(); v1SvcMetadata.name = serviceName; @@ -109,7 +105,7 @@ describe('K8', () => { after(async function () { this.timeout(defaultTimeout); try { - await k8.killPod(podName, testNamespace); + await k8.killPod(PodRef.of(testNamespace, podName)); argv[flags.namespace.name] = constants.SOLO_SETUP_NAMESPACE.name; configManager.update(argv); } catch (e) { @@ -158,7 +154,7 @@ describe('K8', () => { it('should be able to check if a path is directory inside a container', async () => { const pods = await k8.getPodsByLabel([`app=${podLabelValue}`]); const podName = PodName.of(pods[0].metadata.name); - expect(await k8.hasDir(podName, containerName, '/tmp')).to.be.true; + expect(await k8.hasDir(PodRef.of(testNamespace, podName), containerName, '/tmp')).to.be.true; }).timeout(defaultTimeout); const testCases = ['test/data/pem/keys/a-private-node0.pem', 'test/data/build-v0.54.0-alpha.4.zip']; @@ -177,10 +173,10 @@ describe('K8', () => { const originalStat = fs.statSync(localFilePath); // upload the file - expect(await k8.copyTo(podName, containerName, localFilePath, remoteTmpDir)).to.be.true; + expect(await k8.copyTo(podRef, containerName, localFilePath, remoteTmpDir)).to.be.true; // download the same file - expect(await k8.copyFrom(podName, containerName, remoteFilePath, localTmpDir)).to.be.true; + expect(await k8.copyFrom(podRef, containerName, remoteFilePath, localTmpDir)).to.be.true; const downloadedFilePath = path.join(localTmpDir, fileName); const downloadedFileData = fs.readFileSync(downloadedFilePath); const downloadedFileHash = crypto.createHash('sha384').update(downloadedFileData).digest('hex'); @@ -190,7 +186,7 @@ describe('K8', () => { expect(downloadedFileHash, 'downloaded file hash should match original file hash').to.equal(originalFileHash); // rm file inside the container - await k8.execContainer(podName, containerName, ['rm', '-f', remoteFilePath]); + await k8.execContainer(podRef, containerName, ['rm', '-f', remoteFilePath]); fs.rmdirSync(localTmpDir, {recursive: true}); }).timeout(defaultTimeout); @@ -200,25 +196,27 @@ describe('K8', () => { const podName = Templates.renderNetworkPodName('node1'); const localPort = +constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT; try { - k8.portForward(podName, localPort, +constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT).then(server => { - expect(server).not.to.be.null; + k8.portForward(PodRef.of(testNamespace, podName), localPort, +constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT).then( + server => { + expect(server).not.to.be.null; - // client - const s = new net.Socket(); - s.on('ready', async () => { - s.destroy(); - await k8.stopPortForward(server); - done(); - }); + // client + const s = new net.Socket(); + s.on('ready', async () => { + s.destroy(); + await k8.stopPortForward(server); + done(); + }); - s.on('error', async e => { - s.destroy(); - await k8.stopPortForward(server); - done(new SoloError(`could not connect to local port '${localPort}': ${e.message}`, e)); - }); + s.on('error', async e => { + s.destroy(); + await k8.stopPortForward(server); + done(new SoloError(`could not connect to local port '${localPort}': ${e.message}`, e)); + }); - s.connect(localPort); - }); + s.connect(localPort); + }, + ); } catch (e) { testLogger.showUserError(e); expect.fail(); @@ -229,7 +227,7 @@ describe('K8', () => { it('should be able to cat a file inside the container', async () => { const pods = await k8.getPodsByLabel([`app=${podLabelValue}`]); const podName = PodName.of(pods[0].metadata.name); - const output = await k8.execContainer(podName, containerName, ['cat', '/etc/hostname']); + const output = await k8.execContainer(PodRef.of(testNamespace, podName), containerName, ['cat', '/etc/hostname']); expect(output.indexOf(podName.name)).to.equal(0); }).timeout(defaultTimeout); @@ -259,9 +257,10 @@ describe('K8', () => { it('should be able to kill a pod', async () => { const podName = PodName.of(`test-pod-${uuid4()}`); + const podRef = PodRef.of(testNamespace, podName); const podLabelValue = `test-${uuid4()}`; - await createPod(podName, containerName, podLabelValue, testNamespace, k8); - await k8.killPod(podName, testNamespace); + await createPod(podRef, containerName, podLabelValue, k8); + await k8.killPod(podRef); const newPods = await k8.getPodsByLabel([`app=${podLabelValue}`]); expect(newPods).to.have.lengthOf(0); }); diff --git a/test/e2e/integration/core/platform_installer_e2e.test.ts b/test/e2e/integration/core/platform_installer_e2e.test.ts index f40d56fcd..11d5f4e15 100644 --- a/test/e2e/integration/core/platform_installer_e2e.test.ts +++ b/test/e2e/integration/core/platform_installer_e2e.test.ts @@ -16,6 +16,7 @@ import {type AccountManager} from '../../../../src/core/account_manager.js'; import {type PlatformInstaller} from '../../../../src/core/platform_installer.js'; import {NamespaceName} from '../../../../src/core/kube/namespace_name.js'; import {PodName} from '../../../../src/core/kube/pod_name.js'; +import {PodRef} from '../../../../src/core/kube/pod_ref.js'; const defaultTimeout = Duration.ofSeconds(20).toMillis(); @@ -48,6 +49,7 @@ e2eTestSuite( let accountManager: AccountManager; let installer: PlatformInstaller; const podName = PodName.of('network-node1-0'); + const podRef = PodRef.of(namespace, podName); const packageVersion = 'v0.42.5'; before(() => { @@ -75,24 +77,27 @@ e2eTestSuite( it('should fail with invalid pod', async () => { try { // @ts-ignore - await installer.fetchPlatform('', packageVersion); + await installer.fetchPlatform(null, packageVersion); throw new Error(); // fail-safe, should not reach here } catch (e) { - expect(e.message).to.include('podName is required'); + expect(e.message).to.include('podRef is required'); } try { // @ts-ignore - await installer.fetchPlatform('INVALID', packageVersion); + await installer.fetchPlatform( + PodRef.of(NamespaceName.of('valid-namespace'), PodName.of('INVALID_POD')), + packageVersion, + ); throw new Error(); // fail-safe, should not reach here } catch (e) { - expect(e.message).to.include('failed to extract platform code in this pod'); + expect(e.message).to.include('must be a valid RFC-1123 DNS label'); } }).timeout(defaultTimeout); it('should fail with invalid tag', async () => { try { - await installer.fetchPlatform(podName, 'INVALID'); + await installer.fetchPlatform(podRef, 'INVALID'); throw new Error(); // fail-safe, should not reach here } catch (e) { expect(e.message).to.include('curl: (22) The requested URL returned error: 404'); @@ -100,9 +105,9 @@ e2eTestSuite( }).timeout(defaultTimeout); it('should succeed with valid tag and pod', async () => { - expect(await installer.fetchPlatform(podName, packageVersion)).to.be.true; + expect(await installer.fetchPlatform(podRef, packageVersion)).to.be.true; const outputs = await k8.execContainer( - podName, + podRef, constants.ROOT_CONTAINER, `ls -la ${constants.HEDERA_HAPI_PATH}`, ); diff --git a/test/test_add.ts b/test/test_add.ts index d727adfdd..28913f210 100644 --- a/test/test_add.ts +++ b/test/test_add.ts @@ -76,12 +76,7 @@ export function testNodeAdd( it('cache current version of private keys', async () => { existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( - existingServiceMap, - namespace, - k8, - getTmpDir(), - ); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); }).timeout(defaultTimeout); it('should succeed with init command', async () => { @@ -106,7 +101,6 @@ export function testNodeAdd( it('existing nodes private keys should not have changed', async () => { const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( existingServiceMap, - namespace, k8, getTmpDir(), ); diff --git a/test/test_util.ts b/test/test_util.ts index 9bd4d8ccc..e9fd59c40 100644 --- a/test/test_util.ts +++ b/test/test_util.ts @@ -48,6 +48,7 @@ import {Duration} from '../src/core/time/duration.js'; import {container} from 'tsyringe-neo'; import {resetTestContainer} from './test_container.js'; import {NamespaceName} from '../src/core/kube/namespace_name.js'; +import {PodRef} from '../src/core/kube/pod_ref.js'; export const TEST_CLUSTER = 'solo-e2e'; export const HEDERA_PLATFORM_VERSION_TAG = HEDERA_PLATFORM_VERSION; @@ -389,7 +390,6 @@ export function accountCreationShouldSucceed( export async function getNodeAliasesPrivateKeysHash( networkNodeServicesMap: Map, - namespace: NamespaceName, k8: K8, destDir: string, ) { @@ -404,6 +404,7 @@ export async function getNodeAliasesPrivateKeysHash( fs.mkdirSync(uniqueNodeDestDir, {recursive: true}); } await addKeyHashToMap( + networkNodeServices.namespace, k8, nodeAlias, dataKeysDir, @@ -411,13 +412,22 @@ export async function getNodeAliasesPrivateKeysHash( keyHashMap, Templates.renderGossipPemPrivateKeyFile(nodeAlias), ); - await addKeyHashToMap(k8, nodeAlias, tlsKeysDir, uniqueNodeDestDir, keyHashMap, 'hedera.key'); + await addKeyHashToMap( + networkNodeServices.namespace, + k8, + nodeAlias, + tlsKeysDir, + uniqueNodeDestDir, + keyHashMap, + 'hedera.key', + ); nodeKeyHashMap.set(nodeAlias, keyHashMap); } return nodeKeyHashMap; } async function addKeyHashToMap( + namespace: NamespaceName, k8: K8, nodeAlias: NodeAlias, keyDir: string, @@ -426,7 +436,7 @@ async function addKeyHashToMap( privateKeyFileName: string, ) { await k8.copyFrom( - Templates.renderNetworkPodName(nodeAlias), + PodRef.of(namespace, Templates.renderNetworkPodName(nodeAlias)), ROOT_CONTAINER, path.join(keyDir, privateKeyFileName), uniqueNodeDestDir, @@ -436,26 +446,6 @@ async function addKeyHashToMap( keyHashMap.set(privateKeyFileName, crypto.createHash('sha256').update(keyString).digest('base64')); } -export function getK8Instance(configManager: ConfigManager) { - try { - return container.resolve('K8'); - // TODO: return a mock without running the init within constructor after we convert to Mocha, Jest ESModule mocks are broke. - } catch (e) { - if (!(e instanceof SoloError)) { - throw e; - } - - // Set envs - process.env.SOLO_CLUSTER_NAME = 'solo-e2e'; - process.env.SOLO_NAMESPACE = 'solo-e2e'; - process.env.SOLO_CLUSTER_SETUP_NAMESPACE = 'solo-setup'; - - // Create cluster - execSync(`kind create cluster --name "${process.env.SOLO_CLUSTER_NAME}"`, {stdio: 'inherit'}); - return container.resolve('K8'); - } -} - export const testLocalConfigData = { userEmailAddress: 'john.doe@example.com', deployments: { diff --git a/test/unit/core/platform_installer.test.ts b/test/unit/core/platform_installer.test.ts index dfbd1c0a2..afbf896b0 100644 --- a/test/unit/core/platform_installer.test.ts +++ b/test/unit/core/platform_installer.test.ts @@ -12,6 +12,8 @@ import {PlatformInstaller} from '../../../src/core/platform_installer.js'; import {IllegalArgumentError, MissingArgumentError} from '../../../src/core/errors.js'; import {PodName} from '../../../src/core/kube/pod_name.js'; import {container} from 'tsyringe-neo'; +import {PodRef} from '../../../src/core/kube/pod_ref.js'; +import {NamespaceName} from '../../../src/core/kube/namespace_name.js'; describe('PackageInstaller', () => { let installer: PlatformInstaller; @@ -77,10 +79,15 @@ describe('PackageInstaller', () => { describe('extractPlatform', () => { it('should fail for missing pod name', async () => { - await expect(installer.fetchPlatform(null as PodName, 'v0.42.5')).to.be.rejectedWith(MissingArgumentError); + await expect(installer.fetchPlatform(null as PodRef, 'v0.42.5')).to.be.rejectedWith(MissingArgumentError); }); it('should fail for missing tag', async () => { - await expect(installer.fetchPlatform(PodName.of('network-node1-0'), '')).to.be.rejectedWith(MissingArgumentError); + await expect( + installer.fetchPlatform( + PodRef.of(NamespaceName.of('platform-installer-test'), PodName.of('network-node1-0')), + '', + ), + ).to.be.rejectedWith(MissingArgumentError); }); });