diff --git a/deployment/cdk/opensearch-service-migration/default-values.json b/deployment/cdk/opensearch-service-migration/default-values.json index 4a8114a52..3d15ff04a 100644 --- a/deployment/cdk/opensearch-service-migration/default-values.json +++ b/deployment/cdk/opensearch-service-migration/default-values.json @@ -1,5 +1,6 @@ { "engineVersion": "OS_2.9", + "targetClusterVersion": "OS_2.9", "domainName": "os-service-domain", "tlsSecurityPolicy": "TLS_1_2", "enforceHTTPS": true, @@ -15,4 +16,4 @@ "trafficReplayerServiceEnabled": false, "otelCollectorEnabled": true, "dpPipelineTemplatePath": "./dp_pipeline_template.yaml" -} \ No newline at end of file +} diff --git a/deployment/cdk/opensearch-service-migration/lib/common-utilities.ts b/deployment/cdk/opensearch-service-migration/lib/common-utilities.ts index a98acd60b..619b3acc4 100644 --- a/deployment/cdk/opensearch-service-migration/lib/common-utilities.ts +++ b/deployment/cdk/opensearch-service-migration/lib/common-utilities.ts @@ -7,6 +7,7 @@ import { ICertificate } from "aws-cdk-lib/aws-certificatemanager"; import { IStringParameter, StringParameter } from "aws-cdk-lib/aws-ssm"; import * as forge from 'node-forge'; import * as yargs from 'yargs'; +import { ClusterYaml } from "./migration-services-yaml"; // parseAndMergeArgs, see @common-utilities.test.ts for an example of different cases @@ -314,3 +315,125 @@ export enum MigrationSSMParameter { TRAFFIC_STREAM_SOURCE_ACCESS_SECURITY_GROUP_ID = 'trafficStreamSourceAccessSecurityGroupId', VPC_ID = 'vpcId', } + + +export class ClusterNoAuth {}; + +export class ClusterSigV4Auth { + region?: string; + serviceSigningName?: string; + constructor({region, serviceSigningName: service}: {region: string, serviceSigningName: string}) { + this.region = region; + this.serviceSigningName = service; + } +} + +export class ClusterBasicAuth { + username: string; + password?: string; + password_from_secret_arn?: string; + + constructor({ + username, + password, + password_from_secret_arn, + }: { + username: string; + password?: string; + password_from_secret_arn?: string; + }) { + this.username = username; + this.password = password; + this.password_from_secret_arn = password_from_secret_arn; + + // Validation: Exactly one of password or password_from_secret_arn must be provided + if ((password && password_from_secret_arn) || (!password && !password_from_secret_arn)) { + throw new Error('Exactly one of password or password_from_secret_arn must be provided'); + } + } +} + +export class ClusterAuth { + basicAuth?: ClusterBasicAuth + noAuth?: ClusterNoAuth + sigv4?: ClusterSigV4Auth + + constructor({basicAuth, noAuth, sigv4}: {basicAuth?: ClusterBasicAuth, noAuth?: ClusterNoAuth, sigv4?: ClusterSigV4Auth}) { + this.basicAuth = basicAuth; + this.noAuth = noAuth; + this.sigv4 = sigv4; + } + + validate() { + const numDefined = (this.basicAuth? 1 : 0) + (this.noAuth? 1 : 0) + (this.sigv4? 1 : 0) + if (numDefined != 1) { + throw new Error(`Exactly one authentication method can be defined. ${numDefined} are currently set.`) + } + } + + toDict() { + if (this.basicAuth) { + return {basic_auth: this.basicAuth}; + } + if (this.noAuth) { + return {no_auth: ""}; + } + if (this.sigv4) { + return {sigv4: this.sigv4}; + } + return {}; + } +} + +function getBasicClusterAuth(basicAuthObject: { [key: string]: any }): ClusterBasicAuth { + // Destructure and validate the input object + const { username, password, passwordFromSecretArn } = basicAuthObject; + // Ensure the required 'username' field is present + if (typeof username !== 'string' || !username) { + throw new Error('Invalid input: "username" must be a non-empty string'); + } + // Ensure that exactly one of 'password' or 'passwordFromSecretArn' is provided + const hasPassword = typeof password === 'string' && password.trim() !== ''; + const hasPasswordFromSecretArn = typeof passwordFromSecretArn === 'string' && passwordFromSecretArn.trim() !== ''; + if ((hasPassword && hasPasswordFromSecretArn) || (!hasPassword && !hasPasswordFromSecretArn)) { + throw new Error('Exactly one of "password" or "passwordFromSecretArn" must be provided'); + } + return new ClusterBasicAuth({ + username, + password: hasPassword ? password : undefined, + password_from_secret_arn: hasPasswordFromSecretArn ? passwordFromSecretArn : undefined, + }); +} + +function getSigV4ClusterAuth(sigv4AuthObject: { [key: string]: any }): ClusterSigV4Auth { + // Destructure and validate the input object + const { serviceSigningName, region } = sigv4AuthObject; + + // Create and return the ClusterSigV4Auth object + return new ClusterSigV4Auth({serviceSigningName, region}); +} + +// Function to parse and validate auth object +function parseAuth(json: any): ClusterAuth | null { + if (json.type === 'basic' && typeof json.username === 'string' && (typeof json.password === 'string' || typeof json.passwordFromSecretArn === 'string') && !(typeof json.password === 'string' && typeof json.passwordFromSecretArn === 'string')) { + return new ClusterAuth({basicAuth: getBasicClusterAuth(json)}); + } else if (json.type === 'sigv4' && typeof json.region === 'string' && typeof json.serviceSigningName === 'string') { + return new ClusterAuth({sigv4: getSigV4ClusterAuth(json)}); + } else if (json.type === 'none') { + return new ClusterAuth({noAuth: new ClusterNoAuth()}); + } + return null; // Invalid auth type +} + +export function parseClusterDefinition(json: any): ClusterYaml { + const endpoint = json.endpoint + const version = json.version + if (!endpoint) { + throw new Error('Missing required field in cluster definition: endpoint') + } + const auth = parseAuth(json.auth) + if (!auth) { + throw new Error(`Invalid auth type when parsing cluster definition: ${json.auth.type}`) + } + return new ClusterYaml({endpoint, version, auth}) +} \ No newline at end of file diff --git a/deployment/cdk/opensearch-service-migration/lib/migration-services-yaml.ts b/deployment/cdk/opensearch-service-migration/lib/migration-services-yaml.ts index 2027173a0..c456d494b 100644 --- a/deployment/cdk/opensearch-service-migration/lib/migration-services-yaml.ts +++ b/deployment/cdk/opensearch-service-migration/lib/migration-services-yaml.ts @@ -1,34 +1,26 @@ +import { EngineVersion } from 'aws-cdk-lib/aws-opensearchservice'; +import { ClusterAuth } from './common-utilities'; import * as yaml from 'yaml'; -export class ClusterBasicAuth { - username: string; - password?: string; - password_from_secret_arn?: string; - - constructor({ - username, - password, - password_from_secret_arn, - }: { - username: string; - password?: string; - password_from_secret_arn?: string; - }) { - this.username = username; - this.password = password; - this.password_from_secret_arn = password_from_secret_arn; - - // Validation: Exactly one of password or password_from_secret_arn must be provided - if ((password && password_from_secret_arn) || (!password && !password_from_secret_arn)) { - throw new Error('Exactly one of password or password_from_secret_arn must be provided'); - } - } -} - export class ClusterYaml { endpoint: string = ''; - no_auth?: string | null; - basic_auth?: ClusterBasicAuth | null; + version?: EngineVersion; + auth: ClusterAuth; + + constructor({endpoint, auth, version} : {endpoint: string, auth: ClusterAuth, version?: EngineVersion}) { + this.endpoint = endpoint; + this.auth = auth; + this.version = version; + } + toDict() { + return { + endpoint: this.endpoint, + ...this.auth.toDict(), + // TODO: figure out how version should be incorporated + // https://opensearch.atlassian.net/browse/MIGRATIONS-1951 + // version: this.version?.version + }; + } } export class MetricsSourceYaml { @@ -142,8 +134,8 @@ export class ServicesYaml { stringify(): string { return yaml.stringify({ - source_cluster: this.source_cluster, - target_cluster: this.target_cluster, + source_cluster: this.source_cluster?.toDict(), + target_cluster: this.target_cluster?.toDict(), metrics_source: this.metrics_source, backfill: this.backfill?.toDict(), snapshot: this.snapshot?.toDict(), diff --git a/deployment/cdk/opensearch-service-migration/lib/opensearch-domain-stack.ts b/deployment/cdk/opensearch-service-migration/lib/opensearch-domain-stack.ts index 002ee8eaa..eab4807df 100644 --- a/deployment/cdk/opensearch-service-migration/lib/opensearch-domain-stack.ts +++ b/deployment/cdk/opensearch-service-migration/lib/opensearch-domain-stack.ts @@ -14,7 +14,8 @@ import {AnyPrincipal, Effect, PolicyStatement} from "aws-cdk-lib/aws-iam"; import {ILogGroup, LogGroup} from "aws-cdk-lib/aws-logs"; import {ISecret, Secret} from "aws-cdk-lib/aws-secretsmanager"; import {StackPropsExt} from "./stack-composer"; -import { ClusterBasicAuth, ClusterYaml } from "./migration-services-yaml"; +import { ClusterYaml } from "./migration-services-yaml"; +import { ClusterAuth, ClusterBasicAuth, ClusterNoAuth } from "./common-utilities" import { MigrationSSMParameter, createMigrationStringParameter, getMigrationStringParameterValue } from "./common-utilities"; @@ -115,15 +116,15 @@ export class OpenSearchDomainStack extends Stack { } } - generateTargetClusterYaml(domain: Domain, adminUserName: string | undefined, adminUserSecret: ISecret|undefined) { - let targetCluster = new ClusterYaml() - targetCluster.endpoint = `https://${domain.domainEndpoint}:443`; + generateTargetClusterYaml(domain: Domain, adminUserName: string | undefined, adminUserSecret: ISecret|undefined, version: EngineVersion) { + let clusterAuth = new ClusterAuth({}); if (adminUserName) { - targetCluster.basic_auth = new ClusterBasicAuth({ username: adminUserName, password_from_secret_arn: adminUserSecret?.secretArn }) + clusterAuth.basicAuth = new ClusterBasicAuth({ username: adminUserName, password_from_secret_arn: adminUserSecret?.secretArn }) } else { - targetCluster.no_auth = '' + clusterAuth.noAuth = new ClusterNoAuth(); } - this.targetClusterYaml = targetCluster; + this.targetClusterYaml = new ClusterYaml({endpoint: `https://${domain.domainEndpoint}:443`, auth: clusterAuth, version}) + } constructor(scope: Construct, id: string, props: OpensearchDomainStackProps) { @@ -229,6 +230,6 @@ export class OpenSearchDomainStack extends Stack { }); this.createSSMParameters(domain, adminUserName, adminUserSecret, props.stage, deployId) - this.generateTargetClusterYaml(domain, adminUserName, adminUserSecret) + this.generateTargetClusterYaml(domain, adminUserName, adminUserSecret, props.version) } } diff --git a/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts b/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts index a67ad6eb5..42d4a0995 100644 --- a/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts +++ b/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts @@ -16,7 +16,7 @@ import { import {StreamingSourceType} from "../streaming-source-type"; import {LogGroup, RetentionDays} from "aws-cdk-lib/aws-logs"; import {Fn, RemovalPolicy} from "aws-cdk-lib"; -import {MetadataMigrationYaml, ServicesYaml} from "../migration-services-yaml"; +import {ClusterYaml, MetadataMigrationYaml, ServicesYaml} from "../migration-services-yaml"; import {ELBTargetGroup, MigrationServiceCore} from "./migration-service-core"; import { OtelCollectorSidecar } from "./migration-otel-collector-sidecar"; import { SharedLogFileSystem } from "../components/shared-log-file-system"; @@ -33,7 +33,7 @@ export interface MigrationConsoleProps extends StackPropsExt { readonly targetGroups?: ELBTargetGroup[], readonly servicesYaml: ServicesYaml, readonly otelCollectorEnabled?: boolean, - readonly sourceClusterDisabled?: boolean, + readonly sourceCluster?: ClusterYaml, } export class MigrationConsoleStack extends MigrationServiceCore { @@ -151,10 +151,6 @@ export class MigrationConsoleStack extends MigrationServiceCore { ...props, parameter: MigrationSSMParameter.OS_CLUSTER_ENDPOINT, }); - const sourceClusterEndpoint = props.sourceClusterDisabled ? null : getMigrationStringParameterValue(this, { - ...props, - parameter: MigrationSSMParameter.SOURCE_CLUSTER_ENDPOINT, - }); const brokerEndpoints = props.streamingSourceType != StreamingSourceType.DISABLED ? getMigrationStringParameterValue(this, { ...props, @@ -236,18 +232,15 @@ export class MigrationConsoleStack extends MigrationServiceCore { ] }) - const getSecretsPolicy = props.servicesYaml.target_cluster.basic_auth?.password_from_secret_arn ? - getTargetPasswordAccessPolicy(props.servicesYaml.target_cluster.basic_auth.password_from_secret_arn) : null; + const getTargetSecretsPolicy = props.servicesYaml.target_cluster.auth.basicAuth?.password_from_secret_arn ? + getTargetPasswordAccessPolicy(props.servicesYaml.target_cluster.auth.basicAuth?.password_from_secret_arn) : null; + + const getSourceSecretsPolicy = props.servicesYaml.source_cluster?.auth.basicAuth?.password_from_secret_arn ? + getTargetPasswordAccessPolicy(props.servicesYaml.source_cluster?.auth.basicAuth?.password_from_secret_arn) : null; // Upload the services.yaml file to Parameter Store let servicesYaml = props.servicesYaml - if (!props.sourceClusterDisabled && sourceClusterEndpoint) { - servicesYaml.source_cluster = { - 'endpoint': sourceClusterEndpoint, - // TODO: We're not currently supporting auth here, this may need to be handled on the migration console - 'no_auth': '' - } - } + servicesYaml.source_cluster = props.sourceCluster servicesYaml.metadata_migration = new MetadataMigrationYaml(); if (props.otelCollectorEnabled) { const otelSidecarEndpoint = OtelCollectorSidecar.getOtelLocalhostEndpoint(); @@ -277,7 +270,9 @@ export class MigrationConsoleStack extends MigrationServiceCore { const openSearchServerlessPolicy = createOpenSearchServerlessIAMAccessPolicy(this.partition, this.region, this.account) let servicePolicies = [sharedLogFileSystem.asPolicyStatement(), openSearchPolicy, openSearchServerlessPolicy, ecsUpdateServicePolicy, clusterTasksPolicy, listTasksPolicy, artifactS3PublishPolicy, describeVPCPolicy, getSSMParamsPolicy, getMetricsPolicy, - ...(getSecretsPolicy ? [getSecretsPolicy] : []) // only add secrets policy if it's non-null + // only add secrets policies if they're non-null + ...(getTargetSecretsPolicy ? [getTargetSecretsPolicy] : []), + ...(getSourceSecretsPolicy ? [getSourceSecretsPolicy] : []) ] if (props.streamingSourceType === StreamingSourceType.AWS_MSK) { const mskAdminPolicies = this.createMSKAdminIAMPolicies(props.stage, props.defaultDeployId) diff --git a/deployment/cdk/opensearch-service-migration/lib/service-stacks/reindex-from-snapshot-stack.ts b/deployment/cdk/opensearch-service-migration/lib/service-stacks/reindex-from-snapshot-stack.ts index 358952db2..17a6502f9 100644 --- a/deployment/cdk/opensearch-service-migration/lib/service-stacks/reindex-from-snapshot-stack.ts +++ b/deployment/cdk/opensearch-service-migration/lib/service-stacks/reindex-from-snapshot-stack.ts @@ -11,9 +11,10 @@ import { createOpenSearchServerlessIAMAccessPolicy, getTargetPasswordAccessPolicy, getMigrationStringParameterValue, - parseAndMergeArgs + parseAndMergeArgs, + ClusterAuth } from "../common-utilities"; -import { ClusterYaml, RFSBackfillYaml, SnapshotYaml } from "../migration-services-yaml"; +import { RFSBackfillYaml, SnapshotYaml } from "../migration-services-yaml"; import { OtelCollectorSidecar } from "./migration-otel-collector-sidecar"; import { SharedLogFileSystem } from "../components/shared-log-file-system"; @@ -23,7 +24,7 @@ export interface ReindexFromSnapshotProps extends StackPropsExt { readonly fargateCpuArch: CpuArchitecture, readonly extraArgs?: string, readonly otelCollectorEnabled: boolean, - readonly clusterAuthDetails: ClusterYaml + readonly clusterAuthDetails: ClusterAuth } export class ReindexFromSnapshotStack extends MigrationServiceCore { @@ -68,24 +69,25 @@ export class ReindexFromSnapshotStack extends MigrationServiceCore { }); const s3Uri = `s3://migration-artifacts-${this.account}-${props.stage}-${this.region}/rfs-snapshot-repo`; let rfsCommand = `/rfs-app/runJavaWithClasspath.sh com.rfs.RfsMigrateDocuments --s3-local-dir /tmp/s3_files --s3-repo-uri ${s3Uri} --s3-region ${this.region} --snapshot-name rfs-snapshot --lucene-dir '/lucene' --target-host ${osClusterEndpoint}` + rfsCommand = props.clusterAuthDetails.sigv4 ? rfsCommand.concat(`--target-aws-service-signing-name ${props.clusterAuthDetails.sigv4.serviceSigningName} --target-aws-region ${props.clusterAuthDetails.sigv4.region}`) : rfsCommand rfsCommand = props.otelCollectorEnabled ? rfsCommand.concat(` --otel-collector-endpoint ${OtelCollectorSidecar.getOtelLocalhostEndpoint()}`) : rfsCommand rfsCommand = parseAndMergeArgs(rfsCommand, props.extraArgs); let targetUser = ""; let targetPassword = ""; let targetPasswordArn = ""; - if (props.clusterAuthDetails.basic_auth) { - targetUser = props.clusterAuthDetails.basic_auth.username, - targetPassword = props.clusterAuthDetails.basic_auth.password? props.clusterAuthDetails.basic_auth.password : "", - targetPasswordArn = props.clusterAuthDetails.basic_auth.password_from_secret_arn? props.clusterAuthDetails.basic_auth.password_from_secret_arn : "" + if (props.clusterAuthDetails.basicAuth) { + targetUser = props.clusterAuthDetails.basicAuth.username, + targetPassword = props.clusterAuthDetails.basicAuth.password || "", + targetPasswordArn = props.clusterAuthDetails.basicAuth.password_from_secret_arn || "" }; const sharedLogFileSystem = new SharedLogFileSystem(this, props.stage, props.defaultDeployId); const openSearchPolicy = createOpenSearchIAMAccessPolicy(this.partition, this.region, this.account); const openSearchServerlessPolicy = createOpenSearchServerlessIAMAccessPolicy(this.partition, this.region, this.account); let servicePolicies = [sharedLogFileSystem.asPolicyStatement(), artifactS3PublishPolicy, openSearchPolicy, openSearchServerlessPolicy]; - const getSecretsPolicy = props.clusterAuthDetails.basic_auth?.password_from_secret_arn ? - getTargetPasswordAccessPolicy(props.clusterAuthDetails.basic_auth.password_from_secret_arn) : null; + const getSecretsPolicy = props.clusterAuthDetails.basicAuth?.password_from_secret_arn ? + getTargetPasswordAccessPolicy(props.clusterAuthDetails.basicAuth.password_from_secret_arn) : null; if (getSecretsPolicy) { servicePolicies.push(getSecretsPolicy); } diff --git a/deployment/cdk/opensearch-service-migration/lib/service-stacks/traffic-replayer-stack.ts b/deployment/cdk/opensearch-service-migration/lib/service-stacks/traffic-replayer-stack.ts index 8d40b2edb..8dcd964d2 100644 --- a/deployment/cdk/opensearch-service-migration/lib/service-stacks/traffic-replayer-stack.ts +++ b/deployment/cdk/opensearch-service-migration/lib/service-stacks/traffic-replayer-stack.ts @@ -1,11 +1,12 @@ import {StackPropsExt} from "../stack-composer"; import {IVpc, SecurityGroup} from "aws-cdk-lib/aws-ec2"; -import {CpuArchitecture, MountPoint, Volume} from "aws-cdk-lib/aws-ecs"; +import {CpuArchitecture} from "aws-cdk-lib/aws-ecs"; import {Construct} from "constructs"; import {join} from "path"; import {MigrationServiceCore} from "./migration-service-core"; import {Effect, PolicyStatement} from "aws-cdk-lib/aws-iam"; import { + ClusterAuth, MigrationSSMParameter, createMSKConsumerIAMPolicies, createOpenSearchIAMAccessPolicy, @@ -21,7 +22,7 @@ import { SharedLogFileSystem } from "../components/shared-log-file-system"; export interface TrafficReplayerProps extends StackPropsExt { readonly vpc: IVpc, - readonly enableClusterFGACAuth: boolean, + readonly clusterAuthDetails: ClusterAuth, readonly streamingSourceType: StreamingSourceType, readonly fargateCpuArch: CpuArchitecture, readonly addOnMigrationId?: string, @@ -80,15 +81,12 @@ export class TrafficReplayerStack extends MigrationServiceCore { const groupId = props.customKafkaGroupId ? props.customKafkaGroupId : `logging-group-${deployId}` let replayerCommand = `/runJavaWithClasspath.sh org.opensearch.migrations.replay.TrafficReplayer ${osClusterEndpoint} --insecure --kafka-traffic-brokers ${brokerEndpoints} --kafka-traffic-topic logging-traffic-topic --kafka-traffic-group-id ${groupId}` - if (props.enableClusterFGACAuth) { - const osUserAndSecret = getMigrationStringParameterValue(this, { - ...props, - parameter: MigrationSSMParameter.OS_USER_AND_SECRET_ARN, - }); - replayerCommand = replayerCommand.concat(` --auth-header-user-and-secret ${osUserAndSecret}`) + if (props.clusterAuthDetails.basicAuth) { + replayerCommand = replayerCommand.concat(` --auth-header-user-and-secret "${props.clusterAuthDetails.basicAuth.username} ${props.clusterAuthDetails.basicAuth.password_from_secret_arn}"`) } replayerCommand = props.streamingSourceType === StreamingSourceType.AWS_MSK ? replayerCommand.concat(" --kafka-traffic-enable-msk-auth") : replayerCommand replayerCommand = props.userAgentSuffix ? replayerCommand.concat(` --user-agent ${props.userAgentSuffix}`) : replayerCommand + replayerCommand = props.clusterAuthDetails.sigv4 ? replayerCommand.concat(` --sigv4-auth-header-service-region ${props.clusterAuthDetails.sigv4.serviceSigningName},${props.clusterAuthDetails.sigv4.region}`) : replayerCommand replayerCommand = props.otelCollectorEnabled ? replayerCommand.concat(` --otelCollectorEndpoint ${OtelCollectorSidecar.getOtelLocalhostEndpoint()}`) : replayerCommand replayerCommand = parseAndMergeArgs(replayerCommand, props.extraArgs); diff --git a/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts b/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts index fcd8eeb41..04d5b54d0 100644 --- a/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts +++ b/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts @@ -16,9 +16,9 @@ import {KafkaStack} from "./service-stacks/kafka-stack"; import {Application} from "@aws-cdk/aws-servicecatalogappregistry-alpha"; import {OpenSearchContainerStack} from "./service-stacks/opensearch-container-stack"; import {determineStreamingSourceType, StreamingSourceType} from "./streaming-source-type"; -import {MigrationSSMParameter, parseRemovalPolicy, validateFargateCpuArch} from "./common-utilities"; +import {MigrationSSMParameter, parseRemovalPolicy, validateFargateCpuArch, parseClusterDefinition, ClusterNoAuth, ClusterAuth} from "./common-utilities"; import {ReindexFromSnapshotStack} from "./service-stacks/reindex-from-snapshot-stack"; -import {ClusterBasicAuth, ServicesYaml} from "./migration-services-yaml"; +import {ClusterYaml, ServicesYaml} from "./migration-services-yaml"; export interface StackPropsExt extends StackProps { readonly stage: string, @@ -85,7 +85,7 @@ export class StackComposer { } else if (engineVersionString && engineVersionString.startsWith("ES_")) { version = EngineVersion.elasticsearch(engineVersionString.substring(3)) } else { - throw new Error("Engine version is not present or does not match the expected format, i.e. OS_1.3 or ES_7.9") + throw new Error(`Engine version (${engineVersionString}) is not present or does not match the expected format, i.e. OS_1.3 or ES_7.9`) } return version } @@ -144,9 +144,8 @@ export class StackComposer { const stage = this.getContextForType('stage', 'string', defaultValues, contextJSON) - let version: EngineVersion - const domainName = this.getContextForType('domainName', 'string', defaultValues, contextJSON) + const engineVersion = this.getContextForType('engineVersion', 'string', defaultValues, contextJSON) const domainAZCount = this.getContextForType('domainAZCount', 'number', defaultValues, contextJSON) const dataNodeType = this.getContextForType('dataNodeType', 'string', defaultValues, contextJSON) const dataNodeCount = this.getContextForType('dataNodeCount', 'number', defaultValues, contextJSON) @@ -202,17 +201,70 @@ export class StackComposer { const captureProxyExtraArgs = this.getContextForType('captureProxyExtraArgs', 'string', defaultValues, contextJSON) const elasticsearchServiceEnabled = this.getContextForType('elasticsearchServiceEnabled', 'boolean', defaultValues, contextJSON) const kafkaBrokerServiceEnabled = this.getContextForType('kafkaBrokerServiceEnabled', 'boolean', defaultValues, contextJSON) - const targetClusterEndpoint = this.getContextForType('targetClusterEndpoint', 'string', defaultValues, contextJSON) const fetchMigrationEnabled = this.getContextForType('fetchMigrationEnabled', 'boolean', defaultValues, contextJSON) const dpPipelineTemplatePath = this.getContextForType('dpPipelineTemplatePath', 'string', defaultValues, contextJSON) - const sourceClusterDisabled = this.getContextForType('sourceClusterDisabled', 'boolean', defaultValues, contextJSON) - const sourceClusterEndpoint = this.getContextForType('sourceClusterEndpoint', 'string', defaultValues, contextJSON) const osContainerServiceEnabled = this.getContextForType('osContainerServiceEnabled', 'boolean', defaultValues, contextJSON) const otelCollectorEnabled = this.getContextForType('otelCollectorEnabled', 'boolean', defaultValues, contextJSON) const reindexFromSnapshotServiceEnabled = this.getContextForType('reindexFromSnapshotServiceEnabled', 'boolean', defaultValues, contextJSON) const reindexFromSnapshotExtraArgs = this.getContextForType('reindexFromSnapshotExtraArgs', 'string', defaultValues, contextJSON) const albAcmCertArn = this.getContextForType('albAcmCertArn', 'string', defaultValues, contextJSON); + // We're in a transition state from an older model with limited, individually defined fields and heading towards objects + // that fully define the source and target cluster configurations. For the time being, we're supporting both. + const sourceClusterDisabledField = this.getContextForType('sourceClusterDisabled', 'boolean', defaultValues, contextJSON) + const sourceClusterEndpointField = this.getContextForType('sourceClusterEndpoint', 'string', defaultValues, contextJSON) + let sourceClusterDefinition = this.getContextForType('sourceCluster', 'object', defaultValues, contextJSON) + + if (!sourceClusterDefinition && (sourceClusterEndpointField || sourceClusterDisabledField)) { + console.warn("`sourceClusterDisabled` and `sourceClusterEndpoint` are being deprecated in favor of a `sourceCluster` object.") + console.warn("Please update your CDK context block to use the `sourceCluster` object.") + sourceClusterDefinition = { + "disabled": sourceClusterDisabledField, + "endpoint": sourceClusterEndpointField, + "auth": {"type": "none"} + } + } + const sourceClusterDisabled = sourceClusterDefinition?.disabled ? true : false + const sourceCluster = (sourceClusterDefinition && !sourceClusterDisabled) ? parseClusterDefinition(sourceClusterDefinition) : undefined + const sourceClusterEndpoint = sourceCluster?.endpoint + + const targetClusterEndpointField = this.getContextForType('targetClusterEndpoint', 'string', defaultValues, contextJSON) + let targetClusterDefinition = this.getContextForType('targetCluster', 'object', defaultValues, contextJSON) + const usePreexistingTargetCluster = !!(targetClusterEndpointField || targetClusterDefinition) + if (!targetClusterDefinition && usePreexistingTargetCluster) { + console.warn("`targetClusterEndpoint` is being deprecated in favor of a `targetCluster` object.") + console.warn("Please update your CDK context block to use the `targetCluster` object.") + let auth: any = {"type": "none"} + if (fineGrainedManagerUserName || fineGrainedManagerUserSecretManagerKeyARN) { + console.warn(`Use of ${fineGrainedManagerUserName} and ${fineGrainedManagerUserSecretManagerKeyARN} with a preexisting target cluster + will be deprecated in favor of using a \`targetCluster\` object. Please update your CDK context block.`) + auth = { + "type": "basic", + "username": fineGrainedManagerUserName, + "passwordFromSecretArn": fineGrainedManagerUserSecretManagerKeyARN + } + } + targetClusterDefinition = {"endpoint": targetClusterEndpointField, "auth": auth} + } + const targetCluster = usePreexistingTargetCluster ? parseClusterDefinition(targetClusterDefinition) : undefined + + // Ensure that target cluster username and password are not defined in multiple places + if (targetCluster && (fineGrainedManagerUserName || fineGrainedManagerUserSecretManagerKeyARN)) { + throw new Error("The `fineGrainedManagerUserName` and `fineGrainedManagerUserSecretManagerKeyARN` can only be used when a domain is being " + + "provisioned by this tooling, which is contraindicated by `targetCluster` being provided.") + } + + // Ensure that target version is not defined in multiple places, but `engineVersion` is set as a default value, so this is + // a warning instead of an error. + if (usePreexistingTargetCluster && engineVersion) { + console.warn("The `engineVersion` value will be ignored because it's only used when a domain is being provisioned by this tooling" + + "and in this case, `targetCluster` was provided to define an existing target cluster." + ) + } + + const targetClusterAuth = targetCluster?.auth + const targetVersion = this.getEngineVersion(targetCluster?.version || engineVersion) + const requiredFields: { [key: string]: any; } = {"stage":stage, "domainName":domainName} for (let key in requiredFields) { if (!requiredFields[key]) { @@ -222,11 +274,11 @@ export class StackComposer { if (addOnMigrationDeployId && vpcId) { console.warn("Addon deployments will use the original deployment 'vpcId' regardless of passed 'vpcId' values") } - let targetEndpoint - if (targetClusterEndpoint && osContainerServiceEnabled) { - throw new Error("The following options are mutually exclusive as only one target cluster can be specified for a given deployment: [targetClusterEndpoint, osContainerServiceEnabled]") - } else if (targetClusterEndpoint || osContainerServiceEnabled) { - targetEndpoint = targetClusterEndpoint ? targetClusterEndpoint : "https://opensearch:9200" + let preexistingOrContainerTargetEndpoint + if (targetCluster && osContainerServiceEnabled) { + throw new Error("The following options are mutually exclusive as only one target cluster can be specified for a given deployment: [targetCluster, osContainerServiceEnabled]") + } else if (targetCluster || osContainerServiceEnabled) { + preexistingOrContainerTargetEndpoint = targetCluster?.endpoint || "https://opensearch:9200" } const fargateCpuArch = validateFargateCpuArch(defaultFargateCpuArch) @@ -239,9 +291,6 @@ export class StackComposer { streamingSourceType = StreamingSourceType.DISABLED } - const engineVersion = this.getContextForType('engineVersion', 'string', defaultValues, contextJSON) - version = this.getEngineVersion(engineVersion) - const tlsSecurityPolicyName = this.getContextForType('tlsSecurityPolicy', 'string', defaultValues, contextJSON) const tlsSecurityPolicy: TLSSecurityPolicy|undefined = tlsSecurityPolicyName ? TLSSecurityPolicy[tlsSecurityPolicyName as keyof typeof TLSSecurityPolicy] : undefined if (tlsSecurityPolicyName && !tlsSecurityPolicy) { @@ -259,8 +308,8 @@ export class StackComposer { trafficReplayerCustomUserAgent = trafficReplayerUserAgentSuffix ? trafficReplayerUserAgentSuffix : props.customReplayerUserAgent } - if (sourceClusterDisabled && (sourceClusterEndpoint || captureProxyESServiceEnabled || elasticsearchServiceEnabled || captureProxyServiceEnabled)) { - throw new Error("sourceClusterDisabled is mutually exclusive with [sourceClusterEndpoint, captureProxyESServiceEnabled, elasticsearchServiceEnabled, captureProxyServiceEnabled]"); + if (sourceClusterDisabled && (sourceCluster || captureProxyESServiceEnabled || elasticsearchServiceEnabled || captureProxyServiceEnabled)) { + throw new Error("A source cluster must be specified by one of: [sourceCluster, captureProxyESServiceEnabled, elasticsearchServiceEnabled, captureProxyServiceEnabled]"); } const deployId = addOnMigrationDeployId ? addOnMigrationDeployId : defaultDeployId @@ -271,7 +320,7 @@ export class StackComposer { networkStack = new NetworkStack(scope, `networkStack-${deployId}`, { vpcId: vpcId, vpcAZCount: vpcAZCount, - targetClusterEndpoint: targetEndpoint, + targetClusterEndpoint: preexistingOrContainerTargetEndpoint, stackName: `OSMigrations-${stage}-${region}-${deployId}-NetworkInfra`, description: "This stack contains resources to create/manage networking for an OpenSearch Service domain", stage: stage, @@ -285,8 +334,8 @@ export class StackComposer { migrationAPIEnabled, sourceClusterDisabled, sourceClusterEndpoint, - targetClusterUsername: fineGrainedManagerUserName, - targetClusterPasswordSecretArn: fineGrainedManagerUserSecretManagerKeyARN, + targetClusterUsername: targetCluster ? targetClusterAuth?.basicAuth?.username : fineGrainedManagerUserName, + targetClusterPasswordSecretArn: targetCluster ? targetClusterAuth?.basicAuth?.password_from_secret_arn : fineGrainedManagerUserSecretManagerKeyARN, env: props.env }) this.stacks.push(networkStack) @@ -296,9 +345,9 @@ export class StackComposer { // There is an assumption here that for any deployment we will always have a target cluster, whether that be a // created Domain like below or an imported one let openSearchStack - if (!targetEndpoint) { + if (!preexistingOrContainerTargetEndpoint) { openSearchStack = new OpenSearchDomainStack(scope, `openSearchDomainStack-${deployId}`, { - version: version, + version: targetVersion, domainName: domainName, dataNodeInstanceType: dataNodeType, dataNodes: dataNodeCount, @@ -340,13 +389,8 @@ export class StackComposer { this.stacks.push(openSearchStack) servicesYaml.target_cluster = openSearchStack.targetClusterYaml; } else { - servicesYaml.target_cluster = { endpoint: targetEndpoint } - if (fineGrainedManagerUserName && fineGrainedManagerUserSecretManagerKeyARN) { - servicesYaml.target_cluster.basic_auth = new ClusterBasicAuth({username: fineGrainedManagerUserName, - password_from_secret_arn: fineGrainedManagerUserSecretManagerKeyARN - }) - } else { - servicesYaml.target_cluster.no_auth = "" + if (targetCluster) { + servicesYaml.target_cluster = targetCluster } } @@ -387,6 +431,10 @@ export class StackComposer { }) this.addDependentStacks(osContainerStack, [migrationStack]) this.stacks.push(osContainerStack) + servicesYaml.target_cluster = new ClusterYaml({ + endpoint: preexistingOrContainerTargetEndpoint || "", + auth: new ClusterAuth({noAuth: new ClusterNoAuth()}) + }) } let kafkaBrokerStack @@ -426,7 +474,7 @@ export class StackComposer { reindexFromSnapshotStack = new ReindexFromSnapshotStack(scope, "reindexFromSnapshotStack", { vpc: networkStack.vpc, extraArgs: reindexFromSnapshotExtraArgs, - clusterAuthDetails: servicesYaml.target_cluster, + clusterAuthDetails: servicesYaml.target_cluster?.auth, stackName: `OSMigrations-${stage}-${region}-ReindexFromSnapshot`, description: "This stack contains resources to assist migrating historical data, via Reindex from Snapshot, to a target cluster", stage: stage, @@ -465,7 +513,7 @@ export class StackComposer { if ((trafficReplayerServiceEnabled && networkStack && migrationStack) || (addOnMigrationDeployId && networkStack)) { trafficReplayerStack = new TrafficReplayerStack(scope, `traffic-replayer-${deployId}`, { vpc: networkStack.vpc, - enableClusterFGACAuth: trafficReplayerEnableClusterFGACAuth, + clusterAuthDetails: servicesYaml.target_cluster.auth, addOnMigrationDeployId: addOnMigrationDeployId, customKafkaGroupId: trafficReplayerGroupId, userAgentSuffix: trafficReplayerCustomUserAgent, @@ -560,7 +608,7 @@ export class StackComposer { migrationAPIEnabled: migrationAPIEnabled, servicesYaml: servicesYaml, migrationAPIAllowedHosts: migrationAPIAllowedHosts, - sourceClusterDisabled, + sourceCluster, stackName: `OSMigrations-${stage}-${region}-MigrationConsole`, description: "This stack contains resources for the Migration Console ECS service", stage: stage, diff --git a/deployment/cdk/opensearch-service-migration/options.md b/deployment/cdk/opensearch-service-migration/options.md index 1591048ab..f66f43875 100644 --- a/deployment/cdk/opensearch-service-migration/options.md +++ b/deployment/cdk/opensearch-service-migration/options.md @@ -1,8 +1,10 @@ ## Configuration Options + These tables list all CDK context configuration values a user can specify for this project. These will normally be added to the `cdk.context.json` file in the same directory as this markdown file. ### Migration Service Options + | Name | Type | Example | Description | | ------------------------------------ | ------- | -------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | migrationAssistanceEnabled | boolean | true | Flag which controls deploying common Migration Service resources such as MSK, EFS, and an ECS cluster. **Note**: This option must be enabled to use any Migration service | @@ -10,8 +12,6 @@ These tables list all CDK context configuration values a user can specify for th | defaultFargateCpuArch | string | "X86_64", "ARM64" | Provide a default CPU architecture that should be used for all containers. Defaults to using `process.arch` to determine the proper architecture to use | | replayerOutputEFSRemovalPolicy | string | "DESTROY" | Policy to apply when the Replayer output EFS filesystem is removed from the CloudFormation stack | | artifactBucketRemovalPolicy | string | "RETAIN" | Policy to apply when the artifact S3 bucket is removed from the CloudFormation stack | -| captureProxyESServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | -| captureProxyESExtraArgs | string | `"--suppressCaptureForHeaderMatch user-agent .*elastic-java/7.17.0.*"` | Extra arguments to provide to the Capture Proxy command. This includes available arguments specified by the [Capture Proxy](../../../TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/CaptureProxy.java). [^1] | | migrationConsoleServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | | migrationAPIEnabled | boolean | true | **Experimental** flag for enabling an API on the Migration Console for controlling migration actions | | migrationAPIAllowedHosts | string | "test-endpoint1, localhost" | Comma delimited string of host or domain names that the API will serve. Other domains will receive a 400: bad request error | @@ -19,34 +19,71 @@ These tables list all CDK context configuration values a user can specify for th | trafficReplayerServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | | captureProxyServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | | captureProxyExtraArgs | string | `"--suppressCaptureForHeaderMatch user-agent .*elastic-java/7.17.0.*"` | Extra arguments to provide to the Capture Proxy command. This includes available arguments specified by the [Capture Proxy](../../../TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/CaptureProxy.java). [^1] | -| elasticsearchServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | | kafkaBrokerServiceEnabled | boolean | false | Enable deploying the given service, via a new CloudFormation stack. **This stack is experimental and should only be used for development** | -| osContainerServiceEnabled | boolean | false | Enable deploying the given service, via a new CloudFormation stack. | | otelCollectorEnabled | boolean | true | Enable deploying an otel-collector within each service that uses it, in order to publish data to CloudWatch and X-Ray. | | trafficReplayerEnableClusterFGACAuth | boolean | true | Use the configured FGAC manager user for the OpenSearch Domain for auth of replayed requests. **Note**: This is only applicable if this CDK has setup an OpenSearch Domain
and a FGAC user was setup either by the `fineGrainedManagerUserSecretManagerKeyARN` or `enableDemoAdmin` option | | trafficReplayerGroupId | string | "logging-group-default" | The Kafka consumer group ID the Replayer will use, if not specified a default ID will be used | | trafficReplayerUserAgentSuffix | string | "AWS/test/v1.0.0" | A custom user agent that will be provided to the Replayer using the `--user-agent` flag. This will append the provided user agent to any existing user agents when requests are made to the target cluster. This setting could also be specified with the `trafficReplayerExtraArgs` option | | trafficReplayerExtraArgs | string | "--sigv4-auth-header-service-region es,us-east-1 --speedup-factor 5" | Extra arguments to provide to the Replayer command. This includes auth header options and other parameters supported by the [Traffic Replayer](../../../TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java). [^1] | | trafficReplayerMaxUptime | string | "P1D" | The maximum uptime for the Traffic Replayer service, specified in ISO 8601 duration format. This controls how long the Traffic Replayer will run before automatically shutting down. Example values: "PT1H" (hourly), "P1D" (daily). When this duration is reached, ECS will initiate the startup of a new Traffic Replayer task to ensure continuous operation. This mechanism ensures that the Traffic Replayer service can manage its resources effectively and prevent issues associated with long running processes. Set to the greater of the given value 5 minutes. When not specified, the replayer will run continuously. | -| sourceClusterDisabled | boolean | true | Disable configuring a source cluster in any way. This is incompatible with specifying any type of capture proxy or a source cluster endpoint. It's suitable for backfill migrations using ReindexFromSnapshot from an already-existing snapshot. | -| sourceClusterEndpoint | string | `"https://my-source-cluster.com:443"` | The URI of the source cluster from that the migration will reference. **Note**: if this is not provided and elasticsearchService or captureProxyESService is enabled, the migration will reference a uri for that service. | | albAcmCertArn | string | `"arn:aws:acm:us-east-1:12345678912:certificate/abc123de-4888-4fa7-a508-3811e2d49fc3"` | The ACM certificate ARN to use for the ALB. If not specified, and an alb is required based on other deployment parameters, a custom resource will be deployed to create one. If creation must happen locally, a script has been provded to create and upload a cert and can be invoked with `npm run create-acm-cert` and will return the uploaded cert arn. | | targetClusterProxyServiceEnabled | boolean | false | Enable a non-capturing proxy to use a load balancer against a managed OpenSearch cluster. This is needed to enable alb cuttover to a managed opensearch cluster as there is no way to directly route an ALB listener to an AWS Managed OpenSearch Service. If this is specified an ALB will be deployed with listener. | -### Fetch Migration Service Options -| Name | Type | Example | Description | -| ---------------------- | ------- | --------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| fetchMigrationEnabled | boolean | false | Creates ECS resources to enable the kick off of historical Fetch Migration tasks from the Migration Console | -| dpPipelineTemplatePath | string | "path/to/config.yaml" | Path to a local Data Prepper pipeline configuration YAML file that Fetch Migration will use to derive source and target cluster endpoints and other settings. Default value is the included template file i.e. [dp_pipeline_template.yaml](dp_pipeline_template.yaml) | +### Cluster Definition Options + +| Name | Type | Example | Description | +| ------------- | ------ | -------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------- | +| sourceCluster | object | {"endpoint": "https://source-cluster.com", "auth": {"type": "basic", "username": "admin", "password": "admin"}} | A json object defining the source cluster endpoint, auth type, and other details. See below for detailed options. | +| targetCluster | object | {"endpoint": "https://target-cluster.com", "auth": {"type": "sigv4", "serviceSigningName": "aoss", "region": "us-east-1"}} | A json object defining the target cluster endpoint, auth type, and other details. See below for detailed options. | + +#### Structure of the cluster objects + +If no source cluster is being configured, the source cluster object should be `{"disabled": true} and no other fields are necessary. + +In all other cases, the required components of each cluster object are: + +- `endpoint` -- the fully specified endpoint for the cluster +- `auth` -- what authorization strategy the cluster has. The supported options are: + 1. No auth: `{"type": "none"}` + 2. Sigv4 Signing: `{"type": "sigv4", "region": "us-east-1", "serviceSigningName": "es"}` The serviceSigningName is `es` for Elasticsearch and OpenSearch managed service domains, and `aoss` for Amazon OpenSearch Serverless + 3. Basic auth with plaintext password (only supported for the source cluster and not recommended): `{"type": "basic", "username": "admin", "password": "admin123"}` + 4. Basic auth with password in secrets manager (recommended): `{"type": "basic", "username": "admin", "passwordFromSecretArn": "arn:aws:secretsmanager:us-east-1:12345678912:secret:master-user-os-pass-123abc"}` + +The optional component is: + +- `version` -- the Elasticsearch or OpenSearch version of the cluster, in the format of `OS_x.y` or `ES_x.y` ### Reindex from Snapshot (RFS) Service Options + | Name | Type | Example | Description | | --------------------------------- | ------- | -------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | reindexFromSnapshotServiceEnabled | boolean | true | Create resources for deploying and configuring the RFS ECS service | | reindexFromSnapshotExtraArgs | string | "--target-aws-region us-east-1 --target-aws-service-signing-name es" | Extra arguments to provide to the Document Migration command with space separation. See [RFS Arguments](../../../DocumentsFromSnapshotMigration/README.md#Arguments). [^1] | | sourceClusterEndpoint | string | `"https://source-cluster.elb.us-east-1.endpoint.com"` | The endpoint for the source cluster from which RFS will take a snapshot | +### VPC Options + +| Name | Type | Example | Description | +| ---------- | ------- | ----------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | +| vpcEnabled | boolean | true | Enable VPC to place Domain and Migration resources in. If a `vpcId` is not provided a new VPC will be created | +| vpcId | string | "vpc-123456789abcdefgh" | Specify an existing VPC to place the domain inside of | +| vpcAZCount | number | 2 | The number of Availability Zones for the created VPC. **Note**: Only applicable if creating a new VPC (thus `vpcId` must not be provided) | + +### MSK(Kafka) Options + +| Name | Type | Example | Description | +| -------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| mskBrokersPerAZCount | number | 1 | The number of broker nodes per MSK Availability Zone | +| mskSubnetIds | string array | ["subnet-123456789abcdefgh", "subnet-223456789abcdefgh"] | Specify the subnet IDs of an existing VPC to place MSK brokers in. **NOTE** MSK currently requires either 2 or 3 subnets to be specified and EACH subnet should use a different Availability Zone. | +| mskAZCount | number | 2 | The number of Availability Zones for the MSK cluster to use. **NOTE** This value must be 2 or 3 | +| mskARN (Not currently available) | string | `"arn:aws:kafka:us-east-2:12345678912:cluster/msk-cluster-test/81fbae45-5d25-44bb-aff0-108e71cc079b-7"` | Supply an existing MSK cluster ARN to use. **NOTE** As MSK is using an L1 construct this is not currently available for use | + +## Options being deprecated + +A number of options are currently available but deprecated. While they function now, we do not recommend using them, and they may be removed without warning in a future version. + ### OpenSearch Domain Options + | Name | Type | Example | Description | | ----------------------------------------------- | ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | engineVersion | string | "OS_1.3" | The Elasticsearch/OpenSearch version that your domain will leverage. In the format of `OS_x.y` or `ES_x.y` | @@ -80,26 +117,35 @@ These tables list all CDK context configuration values a user can specify for th | openAccessPolicyEnabled | boolean | false | Applies an open access policy to the Domain. **NOTE**: This setting should only be used for Domains placed within a VPC, and is applicable to many use cases where access controlled by Security Groups on the VPC is sufficient. | | domainRemovalPolicy | string | "RETAIN" | Policy to apply when the domain is removed from the CloudFormation stack | +### Fetch Migration Service Options + +| Name | Type | Example | Description | +| ---------------------- | ------- | --------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| fetchMigrationEnabled | boolean | false | Creates ECS resources to enable the kick off of historical Fetch Migration tasks from the Migration Console | +| dpPipelineTemplatePath | string | "path/to/config.yaml" | Path to a local Data Prepper pipeline configuration YAML file that Fetch Migration will use to derive source and target cluster endpoints and other settings. Default value is the included template file i.e. [dp_pipeline_template.yaml](dp_pipeline_template.yaml) | + ### Imported Target Cluster Options + | Name | Type | Example | Description | | --------------------- | ------ | ------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | targetClusterEndpoint | string | `"https://vpc-demo-opensearch-cluster-cv6hggdb66ybpk4kxssqt6zdhu.us-west-2.es.amazonaws.com:443"` | The target cluster endpoint which will be used to replay captured traffic against. This endpoint must be in the same VPC as the Migration services to allow for proper linking | +### Imported Source Cluster Options -### VPC Options -| Name | Type | Example | Description | -| ---------- | ------- | ----------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | -| vpcEnabled | boolean | true | Enable VPC to place Domain and Migration resources in. If a `vpcId` is not provided a new VPC will be created | -| vpcId | string | "vpc-123456789abcdefgh" | Specify an existing VPC to place the domain inside of | -| vpcAZCount | number | 2 | The number of Availability Zones for the created VPC. **Note**: Only applicable if creating a new VPC (thus `vpcId` must not be provided) | +| Name | Type | Example | Description | +| --------------------- | ------- | ------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| sourceClusterDisabled | boolean | true | Disable configuring a source cluster in any way. This is incompatible with specifying any type of capture proxy or a source cluster endpoint. It's suitable for backfill migrations using ReindexFromSnapshot from an already-existing snapshot. | +| sourceClusterEndpoint | string | `"https://my-source-cluster.com:443"` | The URI of the source cluster from that the migration will reference. **Note**: if this is not provided and elasticsearchService or captureProxyESService is enabled, the migration will reference a uri for that service. | -### MSK(Kafka) Options -| Name | Type | Example | Description | -| -------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| mskBrokersPerAZCount | number | 1 | The number of broker nodes per MSK Availability Zone | -| mskSubnetIds | string array | ["subnet-123456789abcdefgh", "subnet-223456789abcdefgh"] | Specify the subnet IDs of an existing VPC to place MSK brokers in. **NOTE** MSK currently requires either 2 or 3 subnets to be specified and EACH subnet should use a different Availability Zone. | -| mskAZCount | number | 2 | The number of Availability Zones for the MSK cluster to use. **NOTE** This value must be 2 or 3 | -| mskARN (Not currently available) | string | `"arn:aws:kafka:us-east-2:12345678912:cluster/msk-cluster-test/81fbae45-5d25-44bb-aff0-108e71cc079b-7"` | Supply an existing MSK cluster ARN to use. **NOTE** As MSK is using an L1 construct this is not currently available for use | + +### Other Options + +| Name | Type | Example | Description | +| ---------------------------- | ------- | ---------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| captureProxyESServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | +| captureProxyESExtraArgs | string | `"--suppressCaptureForHeaderMatch user-agent .*elastic-java/7.17.0.*"` | Extra arguments to provide to the Capture Proxy command. This includes available arguments specified by the [Capture Proxy](../../../TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/CaptureProxy.java). [^1] | +| elasticsearchServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | +| osContainerServiceEnabled | boolean | false | Enable deploying the given service, via a new CloudFormation stack. | diff --git a/deployment/cdk/opensearch-service-migration/test/common-utilities.test.ts b/deployment/cdk/opensearch-service-migration/test/common-utilities.test.ts index 5bcadfa53..6c7a3659c 100644 --- a/deployment/cdk/opensearch-service-migration/test/common-utilities.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/common-utilities.test.ts @@ -1,5 +1,5 @@ import {CpuArchitecture} from "aws-cdk-lib/aws-ecs"; -import {parseAndMergeArgs, validateFargateCpuArch} from "../lib/common-utilities"; +import {parseAndMergeArgs, parseClusterDefinition, validateFargateCpuArch} from "../lib/common-utilities"; import {describe, test, expect} from '@jest/globals'; describe('validateFargateCpuArch', () => { @@ -114,4 +114,49 @@ describe('validateFargateCpuArch', () => { expect(result).toBe('node script.js'); }); + + test('parseClusterDefinition with basic auth parameters', () => { + const clusterDefinition = { + endpoint: 'https://target-cluster', + auth: { + type: 'basic', + username: 'admin', + passwordFromSecretArn: 'arn:aws:secretsmanager:us-east-1:12345678912:secret:master-user-os-pass-123abc' + } + } + const parsed = parseClusterDefinition(clusterDefinition); + expect(parsed).toBeDefined(); + expect(parsed.endpoint).toBe(clusterDefinition.endpoint); + expect(parsed.auth.basicAuth).toBeDefined(); + expect(parsed.auth.basicAuth?.username).toBe(clusterDefinition.auth.username); + expect(parsed.auth.basicAuth?.password_from_secret_arn).toBe(clusterDefinition.auth.passwordFromSecretArn); + }) + + test('parseClusterDefinition with no auth', () => { + const clusterDefinition = { + endpoint: 'XXXXXXXXXXXXXXXXXXXXXX', + auth: {"type": "none"} + } + const parsed = parseClusterDefinition(clusterDefinition); + expect(parsed).toBeDefined(); + expect(parsed.endpoint).toBe(clusterDefinition.endpoint); + expect(parsed.auth.noAuth).toBeDefined(); + }) + + test('parseClusterDefinition with sigv4 auth', () => { + const clusterDefinition = { + endpoint: 'XXXXXXXXXXXXXXXXXXXXXX', + auth: { + type: 'sigv4', + region: 'us-east-1', + serviceSigningName: 'es' + } + } + const parsed = parseClusterDefinition(clusterDefinition); + expect(parsed).toBeDefined(); + expect(parsed.endpoint).toBe(clusterDefinition.endpoint); + expect(parsed.auth.sigv4).toBeDefined(); + expect(parsed.auth.sigv4?.region).toBe(clusterDefinition.auth.region); + expect(parsed.auth.sigv4?.serviceSigningName).toBe(clusterDefinition.auth.serviceSigningName); + }) }) diff --git a/deployment/cdk/opensearch-service-migration/test/fetch-migration-stack.test.ts b/deployment/cdk/opensearch-service-migration/test/fetch-migration-stack.test.ts index 7f19c069b..ab402a717 100644 --- a/deployment/cdk/opensearch-service-migration/test/fetch-migration-stack.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/fetch-migration-stack.test.ts @@ -39,7 +39,10 @@ describe('FetchMigrationStack Tests', () => { migrationAssistanceEnabled: true, migrationConsoleServiceEnabled: true, fetchMigrationEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } }; const stacks = createStackComposer(contextOptions); @@ -66,7 +69,10 @@ describe('FetchMigrationStack Tests', () => { vpcEnabled: true, migrationAssistanceEnabled: true, migrationConsoleServiceEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } }; const stacks = createStackComposer(contextOptions); diff --git a/deployment/cdk/opensearch-service-migration/test/migration-services-yaml.test.ts b/deployment/cdk/opensearch-service-migration/test/migration-services-yaml.test.ts index e7103348b..e1284a185 100644 --- a/deployment/cdk/opensearch-service-migration/test/migration-services-yaml.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/migration-services-yaml.test.ts @@ -1,5 +1,6 @@ import { ContainerImage } from "aws-cdk-lib/aws-ecs"; -import { ClusterBasicAuth, ClusterYaml, RFSBackfillYaml, ServicesYaml, SnapshotYaml } from "../lib/migration-services-yaml" +import { ClusterAuth, ClusterBasicAuth, ClusterNoAuth } from "../lib/common-utilities" +import { ClusterYaml, RFSBackfillYaml, ServicesYaml, SnapshotYaml } from "../lib/migration-services-yaml" import { Template, Capture, Match } from "aws-cdk-lib/assertions"; import { MigrationConsoleStack } from "../lib/service-stacks/migration-console-stack"; import { createStackComposer } from "./test-utils"; @@ -27,9 +28,23 @@ describe('Migration Services YAML Tests', () => { expect(yaml).toBe("metrics_source:\n cloudwatch:\n"); }); + test('Test ClusterAuth.toDict', () => { + const clusterAuth = new ClusterAuth({noAuth: new ClusterNoAuth()}); + const dict = clusterAuth.toDict(); + expect(dict).toEqual({no_auth: ""}); + + const basicAuth = new ClusterAuth({basicAuth: new ClusterBasicAuth({username: "XXX", password: "123"})}); + const basicAuthDict = basicAuth.toDict(); + expect(basicAuthDict).toEqual({basic_auth: {username: "XXX", password: "123"}}); + }) + test('Test servicesYaml with target cluster can be stringified', () => { let servicesYaml = new ServicesYaml(); - const cluster: ClusterYaml = { 'endpoint': 'https://abc.com', 'no_auth': '' }; + + const cluster = new ClusterYaml({ + 'endpoint': 'https://abc.com', + auth: new ClusterAuth({noAuth: new ClusterNoAuth()}) + }); servicesYaml.target_cluster = cluster; expect(servicesYaml.target_cluster).toBeDefined(); @@ -39,13 +54,17 @@ describe('Migration Services YAML Tests', () => { test('Test servicesYaml with source and target cluster can be stringified', () => { let servicesYaml = new ServicesYaml(); - const targetCluster: ClusterYaml = { 'endpoint': 'https://abc.com', 'no_auth': '' }; + const targetCluster = new ClusterYaml({ + 'endpoint': 'https://abc.com', + auth: new ClusterAuth({noAuth: new ClusterNoAuth()}) + }); servicesYaml.target_cluster = targetCluster; const sourceClusterUser = "abc"; const sourceClusterPassword = "XXXXX"; - const sourceCluster: ClusterYaml = { 'endpoint': 'https://xyz.com:9200', - 'basic_auth': new ClusterBasicAuth({ username: sourceClusterUser, password: sourceClusterPassword }) - }; + const basicAuth = new ClusterBasicAuth({ username: sourceClusterUser, password: sourceClusterPassword }); + const sourceCluster = new ClusterYaml({ 'endpoint': 'https://xyz.com:9200', + 'auth': new ClusterAuth({basicAuth: basicAuth}), + }); servicesYaml.source_cluster = sourceCluster; expect(servicesYaml.target_cluster).toBeDefined(); @@ -66,7 +85,6 @@ describe('Migration Services YAML Tests', () => { rfsBackfillYaml.ecs.aws_region = region; servicesYaml.backfill = rfsBackfillYaml; - expect(servicesYaml.backfill).toBeDefined(); expect(servicesYaml.backfill).toBeDefined(); expect(servicesYaml.backfill instanceof RFSBackfillYaml).toBeTruthy(); @@ -100,12 +118,15 @@ describe('Migration Services YAML Tests', () => { expect(s3SnapshotDict).not.toHaveProperty("fs"); }); -test('Test that services yaml parameter is created by migration console stack', () => { +test('Test that services yaml parameter is created by migration console stack with target domain creation', () => { const contextOptions = { vpcEnabled: true, migrationAssistanceEnabled: true, migrationConsoleServiceEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, reindexFromSnapshotServiceEnabled: true, trafficReplayerServiceEnabled: true, fineGrainedManagerUserName: "admin", @@ -147,4 +168,59 @@ test('Test that services yaml parameter is created by migration console stack', expect(Object.keys(parsedFromYaml).length).toEqual(expectedFields.length) expect(new Set(Object.keys(parsedFromYaml))).toEqual(new Set(expectedFields)) }); + + + test('Test that services yaml parameter is created by migration console stack with provided target domain', () => { + const contextOptions = { + vpcEnabled: true, + migrationAssistanceEnabled: true, + migrationConsoleServiceEnabled: true, + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, + targetCluster: { + "endpoint": "https://target-cluster", + "auth": { + "type": "basic", + "username": "admin", + "passwordFromSecretArn": "arn:aws:secretsmanager:us-east-1:12345678912:secret:master-user-os-pass-123abc" + } + }, + reindexFromSnapshotServiceEnabled: true, + trafficReplayerServiceEnabled: true, + } + + const stacks = createStackComposer(contextOptions) + + const migrationConsoleStack: MigrationConsoleStack = (stacks.stacks.filter((s) => s instanceof MigrationConsoleStack)[0]) as MigrationConsoleStack + const migrationConsoleStackTemplate = Template.fromStack(migrationConsoleStack) + + const valueCapture = new Capture(); + migrationConsoleStackTemplate.hasResourceProperties("AWS::SSM::Parameter", { + Type: "String", + Name: Match.stringLikeRegexp("/migration/.*/.*/servicesYamlFile"), + Value: valueCapture, + }); + const value = valueCapture.asObject() + expect(value).toBeDefined(); + expect(value['Fn::Join']).toBeInstanceOf(Array); + expect(value['Fn::Join'][1]).toBeInstanceOf(Array) + // join the strings together to get the yaml file contents + const yamlFileContents = value['Fn::Join'][1].join('') + expect(yamlFileContents).toContain('source_cluster') + expect(yamlFileContents).toContain('target_cluster') + + expect(yamlFileContents).toContain('basic_auth') + expect(yamlFileContents).toContain(`username: ${contextOptions.targetCluster.auth.username}`) + expect(yamlFileContents).toContain(`password_from_secret_arn: ${contextOptions.targetCluster.auth.passwordFromSecretArn}`) + expect(yamlFileContents).toContain('metrics_source:\n cloudwatch:') + expect(yamlFileContents).toContain('kafka') + // Validates that the file can be parsed as valid yaml and has the expected fields + const parsedFromYaml = yaml.parse(yamlFileContents); + // Validates that the file has the expected fields + const expectedFields = ['source_cluster', 'target_cluster', 'metrics_source', 'backfill', 'snapshot', 'metadata_migration', 'replay', 'kafka']; + expect(Object.keys(parsedFromYaml).length).toEqual(expectedFields.length) + expect(new Set(Object.keys(parsedFromYaml))).toEqual(new Set(expectedFields)) + }); }); diff --git a/deployment/cdk/opensearch-service-migration/test/network-stack.test.ts b/deployment/cdk/opensearch-service-migration/test/network-stack.test.ts index c3c8fa912..384336b24 100644 --- a/deployment/cdk/opensearch-service-migration/test/network-stack.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/network-stack.test.ts @@ -37,7 +37,10 @@ describe('NetworkStack Tests', () => { vpcEnabled: true, // This setting could be left out, but provides clarity into the subnets for this test case vpcAZCount: 2, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const openSearchStacks = createStackComposer(contextOptions) @@ -60,7 +63,10 @@ describe('NetworkStack Tests', () => { addOnMigrationDeployId: "junit-addon", vpcEnabled: true, vpcAZCount: 2, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const stacks = createStackComposer(contextOptions) diff --git a/deployment/cdk/opensearch-service-migration/test/opensearch-domain-stack.test.ts b/deployment/cdk/opensearch-service-migration/test/opensearch-domain-stack.test.ts index 3fe436d47..79f57c088 100644 --- a/deployment/cdk/opensearch-service-migration/test/opensearch-domain-stack.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/opensearch-domain-stack.test.ts @@ -57,7 +57,10 @@ describe('OpenSearch Domain Stack Tests', () => { vpcSecurityGroupIds: ["sg-123456789abcdefgh", "sg-223456789abcdefgh"], domainAZCount: 3, domainRemovalPolicy: "DESTROY", - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const openSearchStacks = createStackComposer(contextOptions) @@ -110,7 +113,10 @@ describe('OpenSearch Domain Stack Tests', () => { vpcSecurityGroupIds: "[\"sg-123456789abcdefgh\", \"sg-223456789abcdefgh\"]", domainAZCount: "3", domainRemovalPolicy: "DESTROY", - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const openSearchStacks = createStackComposer(contextOptions) @@ -143,7 +149,10 @@ describe('OpenSearch Domain Stack Tests', () => { enforceHTTPS: true, encryptionAtRestEnabled: true, nodeToNodeEncryptionEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const openSearchStacks = createStackComposer(contextOptions) @@ -163,7 +172,10 @@ describe('OpenSearch Domain Stack Tests', () => { enforceHTTPS: "true", encryptionAtRestEnabled: "true", nodeToNodeEncryptionEnabled: "true", - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const openSearchStacks = createStackComposer(contextOptions) diff --git a/deployment/cdk/opensearch-service-migration/test/reindex-from-snapshot-stack.test.ts b/deployment/cdk/opensearch-service-migration/test/reindex-from-snapshot-stack.test.ts index 93cd91cd6..8b668f54c 100644 --- a/deployment/cdk/opensearch-service-migration/test/reindex-from-snapshot-stack.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/reindex-from-snapshot-stack.test.ts @@ -38,7 +38,10 @@ describe('ReindexFromSnapshotStack Tests', () => { test('ReindexFromSnapshotStack creates expected resources', () => { const contextOptions = { vpcEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, reindexFromSnapshotServiceEnabled: true, stage: 'unit-test', migrationAssistanceEnabled: true, @@ -75,7 +78,10 @@ describe('ReindexFromSnapshotStack Tests', () => { test('ReindexFromSnapshotStack sets correct RFS command', () => { const contextOptions = { vpcEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, reindexFromSnapshotServiceEnabled: true, stage: 'unit-test', migrationAssistanceEnabled: true, @@ -136,12 +142,83 @@ describe('ReindexFromSnapshotStack Tests', () => { ]); }); + test('ReindexFromSnapshotStack sets correct command for sigv4 auth', () => { + const contextOptions = { + vpcEnabled: true, + reindexFromSnapshotServiceEnabled: true, + stage: 'unit-test', + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, + targetCluster: { + "endpoint": "https://target-cluster", + "auth": {"type": "sigv4", "region": "eu-west-1", "serviceSigningName": "aoss"} + }, + migrationAssistanceEnabled: true, + }; + + + const stacks = createStackComposer(contextOptions); + const reindexStack = stacks.stacks.find(s => s instanceof ReindexFromSnapshotStack) as ReindexFromSnapshotStack; + expect(reindexStack).toBeDefined(); + const template = Template.fromStack(reindexStack); + + const taskDefinitionCapture = new Capture(); + template.hasResourceProperties('AWS::ECS::TaskDefinition', { + ContainerDefinitions: taskDefinitionCapture, + }); + + const containerDefinitions = taskDefinitionCapture.asArray(); + expect(containerDefinitions.length).toBe(1); + expect(containerDefinitions[0].Command).toEqual([ + '/bin/sh', + '-c', + '/rfs-app/entrypoint.sh' + ]); + expect(containerDefinitions[0].Environment).toEqual([ + { + Name: 'RFS_COMMAND', + Value: { + "Fn::Join": [ + "", + [ "/rfs-app/runJavaWithClasspath.sh com.rfs.RfsMigrateDocuments --s3-local-dir /tmp/s3_files --s3-repo-uri s3://migration-artifacts-test-account-unit-test-us-east-1/rfs-snapshot-repo --s3-region us-east-1 --snapshot-name rfs-snapshot --lucene-dir '/lucene' --target-host ", + { + "Ref": "SsmParameterValuemigrationunittestdefaultosClusterEndpointC96584B6F00A464EAD1953AFF4B05118Parameter", + }, + "--target-aws-service-signing-name aoss --target-aws-region eu-west-1", + ], + ], + } + }, + { + Name: 'RFS_TARGET_USER', + Value: '' + }, + { + Name: 'RFS_TARGET_PASSWORD', + Value: '' + }, + { + Name: 'RFS_TARGET_PASSWORD_ARN', + Value: '' + }, + { + Name: 'SHARED_LOGS_DIR_PATH', + Value: '/shared-logs-output/reindex-from-snapshot-default' + } + ]); + }); + test('ReindexFromSnapshotStack sets correct YAML configurations', () => { const contextOptions = { vpcEnabled: true, reindexFromSnapshotServiceEnabled: true, stage: 'unit-test', - sourceClusterEndpoint: 'https://test-cluster', + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, migrationAssistanceEnabled: true, }; @@ -163,7 +240,10 @@ describe('ReindexFromSnapshotStack Tests', () => { vpcEnabled: true, reindexFromSnapshotServiceEnabled: true, stage: 'unit-test', - sourceClusterEndpoint: 'https://test-cluster', + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + }, reindexFromSnapshotExtraArgs: '--custom-arg value --flag --snapshot-name custom-snapshot', migrationAssistanceEnabled: true, }; diff --git a/deployment/cdk/opensearch-service-migration/test/resources/sample-context-file.json b/deployment/cdk/opensearch-service-migration/test/resources/sample-context-file.json index 172e5c56e..274ecb476 100644 --- a/deployment/cdk/opensearch-service-migration/test/resources/sample-context-file.json +++ b/deployment/cdk/opensearch-service-migration/test/resources/sample-context-file.json @@ -5,6 +5,9 @@ "migrationAssistanceEnabled": true, "kafkaBrokerServiceEnabled": true, "otelCollectorEnabled": false, - "sourceClusterEndpoint": "https://test-cluster" + "sourceCluster": { + "endpoint": "https://test-cluster", + "auth": { "type": "none" } + } } -} \ No newline at end of file +} diff --git a/deployment/cdk/opensearch-service-migration/test/stack-composer-ordering.test.ts b/deployment/cdk/opensearch-service-migration/test/stack-composer-ordering.test.ts index 4d68b684b..29def817e 100644 --- a/deployment/cdk/opensearch-service-migration/test/stack-composer-ordering.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/stack-composer-ordering.test.ts @@ -116,7 +116,10 @@ describe('Stack Composer Ordering Tests', () => { "otelCollectorEnabled": false, "osContainerServiceEnabled": false, "reindexFromSnapshotServiceEnabled": false, - "sourceClusterEndpoint": "https://test-cluster", + "sourceCluster": { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const stacks = createStackComposer(contextOptions) diff --git a/deployment/cdk/opensearch-service-migration/test/stack-composer.test.ts b/deployment/cdk/opensearch-service-migration/test/stack-composer.test.ts index 55e234466..c3ff6c34c 100644 --- a/deployment/cdk/opensearch-service-migration/test/stack-composer.test.ts +++ b/deployment/cdk/opensearch-service-migration/test/stack-composer.test.ts @@ -217,7 +217,10 @@ describe('Stack Composer Tests', () => { migrationAssistanceEnabled: true, vpcEnabled: true, migrationConsoleServiceEnabled: true, - sourceClusterEndpoint: "https://test-cluster", + sourceCluster: { + "endpoint": "https://test-cluster", + "auth": {"type": "none"} + } } const openSearchStacks = createStackComposer(contextOptions) @@ -270,7 +273,9 @@ describe('Stack Composer Tests', () => { test('Test that a context with no source cluster details succeeds if sourceClusterDisabled', () => { const sourceClusterDisabledContextOptions = { - sourceClusterDisabled: true, + sourceCluster: { + "disabled": true + }, otelCollectorEnabled: true, migrationAssistanceEnabled: true, vpcEnabled: true, @@ -281,7 +286,6 @@ describe('Stack Composer Tests', () => { expect(openSearchStacks.stacks).toHaveLength(4) const sourceClusterNotExplicitlyDisabledContextOptions = { - sourceClusterDisabled: false, otelCollectorEnabled: true, migrationAssistanceEnabled: true, vpcEnabled: true, @@ -291,8 +295,10 @@ describe('Stack Composer Tests', () => { expect(sourceClusterNotExplicitlyDisabledCreateStackFunc).toThrow() const sourceClusterDisabledWithEndpointContextOptions = { - sourceClusterDisabled: true, - sourceClusterEndpoint: "XXXXXXXXXXXXXXXXXXXX", + sourceClusterDisabled: { + "disabled": true, + "endpoint": "XXXXXXXXXXXXXXXXXXXX" + }, otelCollectorEnabled: true, migrationAssistanceEnabled: true, vpcEnabled: true, @@ -301,4 +307,26 @@ describe('Stack Composer Tests', () => { let sourceClusterDisabledWithEndpointCreateStackFunc = () => createStackComposer(sourceClusterDisabledWithEndpointContextOptions) expect (sourceClusterDisabledWithEndpointCreateStackFunc).toThrow() }) + + + test('Test backwards compatibility of source/target cluster params', () => { + // This is effectively a smoke test with the "old-style" flat source and target cluster parameters. + const contextOptions = { + vpcEnabled: true, + migrationAssistanceEnabled: true, + migrationConsoleServiceEnabled: true, + sourceClusterEndpoint: "https://test-cluster", + reindexFromSnapshotServiceEnabled: true, + trafficReplayerServiceEnabled: true, + fineGrainedManagerUserName: "admin", + fineGrainedManagerUserSecretManagerKeyARN: "arn:aws:secretsmanager:us-east-1:12345678912:secret:master-user-os-pass-123abc", + nodeToNodeEncryptionEnabled: true, // required if FGAC is being used + encryptionAtRestEnabled: true, // required if FGAC is being used + enforceHTTPS: true // required if FGAC is being used + } + + const stacks = createStackComposer(contextOptions) + expect(stacks.stacks).toHaveLength(6) + + }); })