From aea8f3b4f1bf80d5ffa390fee2986d864cd842c5 Mon Sep 17 00:00:00 2001 From: Kaizen Conroy <36202692+kaizencc@users.noreply.github.com> Date: Thu, 23 Jan 2025 18:17:52 -0500 Subject: [PATCH 1/8] feat(s3): throw `ValidationError` instead of untyped errors (#33109) ### Issue `aws-s3*` for #32569 ### Description of changes ValidationErrors everywhere ### Describe any new or updated permissions being added n/a ### Description of how you validated changes Existing tests. Exemptions granted as this is basically a refactor of existing code. ### Checklist - [x] My code adheres to the [CONTRIBUTING GUIDE](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md) and [DESIGN GUIDELINES](https://github.com/aws/aws-cdk/blob/main/docs/DESIGN_GUIDELINES.md) ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/aws-cdk-lib/.eslintrc.js | 7 +++++++ packages/aws-cdk-lib/aws-s3-assets/lib/asset.ts | 3 ++- .../aws-cdk-lib/aws-s3-assets/lib/compat.ts | 3 ++- .../aws-s3-assets/test/custom-synthesis.test.ts | 3 ++- .../aws-s3-deployment/lib/bucket-deployment.ts | 17 +++++++++-------- .../aws-s3-deployment/lib/render-data.ts | 11 ++++++----- .../aws-cdk-lib/aws-s3-deployment/lib/source.ts | 9 +++++---- .../test/bucket-deployment.test.ts | 3 ++- .../aws-s3-notifications/lib/lambda.ts | 7 ++++--- 9 files changed, 39 insertions(+), 24 deletions(-) diff --git a/packages/aws-cdk-lib/.eslintrc.js b/packages/aws-cdk-lib/.eslintrc.js index 7525de988518a..ca7a7961c60ee 100644 --- a/packages/aws-cdk-lib/.eslintrc.js +++ b/packages/aws-cdk-lib/.eslintrc.js @@ -26,6 +26,13 @@ const enableNoThrowDefaultErrorIn = [ 'aws-ssmincidents', 'aws-ssmquicksetup', 'aws-synthetics', + 'aws-s3-assets', + 'aws-s3-deployment', + 'aws-s3-notifications', + 'aws-s3express', + 'aws-s3objectlambda', + 'aws-s3outposts', + 'aws-s3tables', ]; baseConfig.overrides.push({ files: enableNoThrowDefaultErrorIn.map(m => `./${m}/lib/**`), diff --git a/packages/aws-cdk-lib/aws-s3-assets/lib/asset.ts b/packages/aws-cdk-lib/aws-s3-assets/lib/asset.ts index 6f07fffa6d9c0..945d45ae1206e 100644 --- a/packages/aws-cdk-lib/aws-s3-assets/lib/asset.ts +++ b/packages/aws-cdk-lib/aws-s3-assets/lib/asset.ts @@ -6,6 +6,7 @@ import * as iam from '../../aws-iam'; import * as kms from '../../aws-kms'; import * as s3 from '../../aws-s3'; import * as cdk from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import * as cxapi from '../../cx-api'; export interface AssetOptions extends CopyOptions, cdk.FileCopyOptions, cdk.AssetOptions { @@ -143,7 +144,7 @@ export class Asset extends Construct implements cdk.IAsset { super(scope, id); if (!props.path) { - throw new Error('Asset path cannot be empty'); + throw new ValidationError('Asset path cannot be empty', this); } this.isBundled = props.bundling != null; diff --git a/packages/aws-cdk-lib/aws-s3-assets/lib/compat.ts b/packages/aws-cdk-lib/aws-s3-assets/lib/compat.ts index b5eac2276ed94..35f10c0ba1e72 100644 --- a/packages/aws-cdk-lib/aws-s3-assets/lib/compat.ts +++ b/packages/aws-cdk-lib/aws-s3-assets/lib/compat.ts @@ -1,5 +1,6 @@ import { FollowMode } from '../../assets'; import { SymlinkFollowMode } from '../../core'; +import { UnscopedValidationError } from '../../core/lib/errors'; export function toSymlinkFollow(follow?: FollowMode): SymlinkFollowMode | undefined { if (!follow) { @@ -12,6 +13,6 @@ export function toSymlinkFollow(follow?: FollowMode): SymlinkFollowMode | undefi case FollowMode.BLOCK_EXTERNAL: return SymlinkFollowMode.BLOCK_EXTERNAL; case FollowMode.EXTERNAL: return SymlinkFollowMode.EXTERNAL; default: - throw new Error(`unknown follow mode: ${follow}`); + throw new UnscopedValidationError(`unknown follow mode: ${follow}`); } } diff --git a/packages/aws-cdk-lib/aws-s3-assets/test/custom-synthesis.test.ts b/packages/aws-cdk-lib/aws-s3-assets/test/custom-synthesis.test.ts index b78b52bc131ec..6074ea50b9b93 100644 --- a/packages/aws-cdk-lib/aws-s3-assets/test/custom-synthesis.test.ts +++ b/packages/aws-cdk-lib/aws-s3-assets/test/custom-synthesis.test.ts @@ -7,6 +7,7 @@ import * as path from 'path'; import { Template } from '../../assertions'; import { StackSynthesizer, FileAssetSource, FileAssetLocation, DockerImageAssetSource, DockerImageAssetLocation, ISynthesisSession, App, Stack, AssetManifestBuilder, CfnParameter, CfnResource } from '../../core'; +import { UnscopedValidationError } from '../../core/lib/errors'; import { AssetManifestArtifact } from '../../cx-api'; import { Asset } from '../lib'; @@ -84,7 +85,7 @@ class CustomSynthesizer extends StackSynthesizer { addDockerImageAsset(asset: DockerImageAssetSource): DockerImageAssetLocation { void(asset); - throw new Error('Docker images are not supported here'); + throw new UnscopedValidationError('Docker images are not supported here'); } synthesize(session: ISynthesisSession): void { diff --git a/packages/aws-cdk-lib/aws-s3-deployment/lib/bucket-deployment.ts b/packages/aws-cdk-lib/aws-s3-deployment/lib/bucket-deployment.ts index 4fb7d0b40cae6..9a8348c04a95a 100644 --- a/packages/aws-cdk-lib/aws-s3-deployment/lib/bucket-deployment.ts +++ b/packages/aws-cdk-lib/aws-s3-deployment/lib/bucket-deployment.ts @@ -11,6 +11,7 @@ import * as lambda from '../../aws-lambda'; import * as logs from '../../aws-logs'; import * as s3 from '../../aws-s3'; import * as cdk from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import { BucketDeploymentSingletonFunction } from '../../custom-resource-handlers/dist/aws-s3-deployment/bucket-deployment-provider.generated'; import { AwsCliLayer } from '../../lambda-layer-awscli'; @@ -304,17 +305,17 @@ export class BucketDeployment extends Construct { if (props.distributionPaths) { if (!props.distribution) { - throw new Error('Distribution must be specified if distribution paths are specified'); + throw new ValidationError('Distribution must be specified if distribution paths are specified', this); } if (!cdk.Token.isUnresolved(props.distributionPaths)) { if (!props.distributionPaths.every(distributionPath => cdk.Token.isUnresolved(distributionPath) || distributionPath.startsWith('/'))) { - throw new Error('Distribution paths must start with "/"'); + throw new ValidationError('Distribution paths must start with "/"', this); } } } if (props.useEfs && !props.vpc) { - throw new Error('Vpc must be specified if useEfs is set'); + throw new ValidationError('Vpc must be specified if useEfs is set', this); } this.destinationBucket = props.destinationBucket; @@ -376,7 +377,7 @@ export class BucketDeployment extends Construct { }); const handlerRole = handler.role; - if (!handlerRole) { throw new Error('lambda.SingletonFunction should have created a Role'); } + if (!handlerRole) { throw new ValidationError('lambda.SingletonFunction should have created a Role', this); } this.handlerRole = handlerRole; this.sources = props.sources.map((source: ISource) => source.bind(this, { handlerRole: this.handlerRole })); @@ -455,7 +456,7 @@ export class BucketDeployment extends Construct { // '/this/is/a/random/key/prefix/that/is/a/lot/of/characters/do/we/think/that/it/will/ever/be/this/long?????' // better to throw an error here than wait for CloudFormation to fail if (!cdk.Token.isUnresolved(tagKey) && tagKey.length > 128) { - throw new Error('The BucketDeployment construct requires that the "destinationKeyPrefix" be <=104 characters.'); + throw new ValidationError('The BucketDeployment construct requires that the "destinationKeyPrefix" be <=104 characters.', this); } /* @@ -567,7 +568,7 @@ export class BucketDeployment extends Construct { // configurations since we have a singleton. if (memoryLimit) { if (cdk.Token.isUnresolved(memoryLimit)) { - throw new Error("Can't use tokens when specifying 'memoryLimit' since we use it to identify the singleton custom resource handler."); + throw new ValidationError("Can't use tokens when specifying 'memoryLimit' since we use it to identify the singleton custom resource handler.", this); } uuid += `-${memoryLimit.toString()}MiB`; @@ -578,7 +579,7 @@ export class BucketDeployment extends Construct { // configurations since we have a singleton. if (ephemeralStorageSize) { if (ephemeralStorageSize.isUnresolved()) { - throw new Error("Can't use tokens when specifying 'ephemeralStorageSize' since we use it to identify the singleton custom resource handler."); + throw new ValidationError("Can't use tokens when specifying 'ephemeralStorageSize' since we use it to identify the singleton custom resource handler.", this); } uuid += `-${ephemeralStorageSize.toMebibytes().toString()}MiB`; @@ -660,7 +661,7 @@ export class DeployTimeSubstitutedFile extends BucketDeployment { constructor(scope: Construct, id: string, props: DeployTimeSubstitutedFileProps) { if (!fs.existsSync(props.source)) { - throw new Error(`No file found at 'source' path ${props.source}`); + throw new ValidationError(`No file found at 'source' path ${props.source}`, scope); } // Makes substitutions on the file let fileData = fs.readFileSync(props.source, 'utf-8'); diff --git a/packages/aws-cdk-lib/aws-s3-deployment/lib/render-data.ts b/packages/aws-cdk-lib/aws-s3-deployment/lib/render-data.ts index 28f588d156d97..f04b0a9600cc0 100644 --- a/packages/aws-cdk-lib/aws-s3-deployment/lib/render-data.ts +++ b/packages/aws-cdk-lib/aws-s3-deployment/lib/render-data.ts @@ -1,5 +1,6 @@ import { Construct } from 'constructs'; import { Stack } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; export interface Content { readonly text: string; @@ -22,7 +23,7 @@ export function renderData(scope: Construct, data: string): Content { } if (typeof(obj) !== 'object') { - throw new Error(`Unexpected: after resolve() data must either be a string or a CloudFormation intrinsic. Got: ${JSON.stringify(obj)}`); + throw new ValidationError(`Unexpected: after resolve() data must either be a string or a CloudFormation intrinsic. Got: ${JSON.stringify(obj)}`, scope); } let markerIndex = 0; @@ -35,7 +36,7 @@ export function renderData(scope: Construct, data: string): Content { const parts = fnJoin[1]; if (sep !== '') { - throw new Error(`Unexpected "Fn::Join", expecting separator to be an empty string but got "${sep}"`); + throw new ValidationError(`Unexpected "Fn::Join", expecting separator to be an empty string but got "${sep}"`, scope); } for (const part of parts) { @@ -49,13 +50,13 @@ export function renderData(scope: Construct, data: string): Content { continue; } - throw new Error(`Unexpected "Fn::Join" part, expecting string or object but got ${typeof (part)}`); + throw new ValidationError(`Unexpected "Fn::Join" part, expecting string or object but got ${typeof (part)}`, scope); } } else if (obj.Ref || obj['Fn::GetAtt'] || obj['Fn::Select']) { addMarker(obj); } else { - throw new Error('Unexpected: Expecting `resolve()` to return "Fn::Join", "Ref" or "Fn::GetAtt"'); + throw new ValidationError('Unexpected: Expecting `resolve()` to return "Fn::Join", "Ref" or "Fn::GetAtt"', scope); } function addMarker(part: Ref | GetAtt | FnSelect) { @@ -63,7 +64,7 @@ export function renderData(scope: Construct, data: string): Content { const acceptedCfnFns = ['Ref', 'Fn::GetAtt', 'Fn::Select']; if (keys.length !== 1 || !acceptedCfnFns.includes(keys[0])) { const stringifiedAcceptedCfnFns = acceptedCfnFns.map((fn) => `"${fn}"`).join(' or '); - throw new Error(`Invalid CloudFormation reference. Key must start with any of ${stringifiedAcceptedCfnFns}. Got ${JSON.stringify(part)}`); + throw new ValidationError(`Invalid CloudFormation reference. Key must start with any of ${stringifiedAcceptedCfnFns}. Got ${JSON.stringify(part)}`, scope); } const marker = `<>`; diff --git a/packages/aws-cdk-lib/aws-s3-deployment/lib/source.ts b/packages/aws-cdk-lib/aws-s3-deployment/lib/source.ts index c6815194aa843..857e0092371d7 100644 --- a/packages/aws-cdk-lib/aws-s3-deployment/lib/source.ts +++ b/packages/aws-cdk-lib/aws-s3-deployment/lib/source.ts @@ -6,6 +6,7 @@ import * as iam from '../../aws-iam'; import * as s3 from '../../aws-s3'; import * as s3_assets from '../../aws-s3-assets'; import { FileSystem, Stack } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; /** * Source information. @@ -98,9 +99,9 @@ export class Source { */ public static bucket(bucket: s3.IBucket, zipObjectKey: string): ISource { return { - bind: (_: Construct, context?: DeploymentSourceContext) => { + bind: (scope: Construct, context?: DeploymentSourceContext) => { if (!context) { - throw new Error('To use a Source.bucket(), context must be provided'); + throw new ValidationError('To use a Source.bucket(), context must be provided', scope); } bucket.grantRead(context.handlerRole); @@ -121,7 +122,7 @@ export class Source { return { bind(scope: Construct, context?: DeploymentSourceContext): SourceConfig { if (!context) { - throw new Error('To use a Source.asset(), context must be provided'); + throw new ValidationError('To use a Source.asset(), context must be provided', scope); } let id = 1; @@ -133,7 +134,7 @@ export class Source { ...options, }); if (!asset.isZipArchive) { - throw new Error('Asset path must be either a .zip file or a directory'); + throw new ValidationError('Asset path must be either a .zip file or a directory', scope); } asset.grantRead(context.handlerRole); diff --git a/packages/aws-cdk-lib/aws-s3-deployment/test/bucket-deployment.test.ts b/packages/aws-cdk-lib/aws-s3-deployment/test/bucket-deployment.test.ts index 886102419b05f..8aa5bea01e1f1 100644 --- a/packages/aws-cdk-lib/aws-s3-deployment/test/bucket-deployment.test.ts +++ b/packages/aws-cdk-lib/aws-s3-deployment/test/bucket-deployment.test.ts @@ -9,6 +9,7 @@ import * as logs from '../../aws-logs'; import * as s3 from '../../aws-s3'; import * as sns from '../../aws-sns'; import * as cdk from '../../core'; +import { UnscopedValidationError } from '../../core/lib/errors'; import * as cxapi from '../../cx-api'; import * as s3deploy from '../lib'; @@ -1678,7 +1679,7 @@ function readDataFile(casm: cxapi.CloudAssembly, relativePath: string): string { } } - throw new Error(`File ${relativePath} not found in any of the assets of the assembly`); + throw new UnscopedValidationError(`File ${relativePath} not found in any of the assets of the assembly`); } test('DeployTimeSubstitutedFile allows custom role to be supplied', () => { diff --git a/packages/aws-cdk-lib/aws-s3-notifications/lib/lambda.ts b/packages/aws-cdk-lib/aws-s3-notifications/lib/lambda.ts index e6159ff8c22aa..45a7a44773f63 100644 --- a/packages/aws-cdk-lib/aws-s3-notifications/lib/lambda.ts +++ b/packages/aws-cdk-lib/aws-s3-notifications/lib/lambda.ts @@ -3,6 +3,7 @@ import * as iam from '../../aws-iam'; import * as lambda from '../../aws-lambda'; import * as s3 from '../../aws-s3'; import { CfnResource, Names, Stack } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; /** * Use a Lambda function as a bucket notification destination @@ -11,12 +12,12 @@ export class LambdaDestination implements s3.IBucketNotificationDestination { constructor(private readonly fn: lambda.IFunction) { } - public bind(_scope: Construct, bucket: s3.IBucket): s3.BucketNotificationDestinationConfig { + public bind(scope: Construct, bucket: s3.IBucket): s3.BucketNotificationDestinationConfig { const permissionId = `AllowBucketNotificationsTo${Names.nodeUniqueId(this.fn.permissionsNode)}`; if (!(bucket instanceof Construct)) { - throw new Error(`LambdaDestination for function ${Names.nodeUniqueId(this.fn.permissionsNode)} can only be configured on a - bucket construct (Bucket ${bucket.bucketName})`); + throw new ValidationError(`LambdaDestination for function ${Names.nodeUniqueId(this.fn.permissionsNode)} can only be configured on a + bucket construct (Bucket ${bucket.bucketName})`, scope); } if (bucket.node.tryFindChild(permissionId) === undefined) { From dc0cb8d2952e556d2fbfdee20729e2688f14bc94 Mon Sep 17 00:00:00 2001 From: Mohamed Elasmar <71043312+moelasmar@users.noreply.github.com> Date: Thu, 23 Jan 2025 18:17:40 -0800 Subject: [PATCH 2/8] chore(sync): sync main branch with v2-release branch (#33114) Fixing the commits conflict between main and v2-release branches --- packages/aws-cdk-lib/cx-api/FEATURE_FLAGS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/aws-cdk-lib/cx-api/FEATURE_FLAGS.md b/packages/aws-cdk-lib/cx-api/FEATURE_FLAGS.md index 955542ef2b803..3255976d583d1 100644 --- a/packages/aws-cdk-lib/cx-api/FEATURE_FLAGS.md +++ b/packages/aws-cdk-lib/cx-api/FEATURE_FLAGS.md @@ -86,7 +86,7 @@ Flags come in three types: | [@aws-cdk/aws-route53-targets:userPoolDomainNameMethodWithoutCustomResource](#aws-cdkaws-route53-targetsuserpooldomainnamemethodwithoutcustomresource) | When enabled, use a new method for DNS Name of user pool domain target without creating a custom resource. | 2.174.0 | (fix) | | [@aws-cdk/aws-ecs:disableEcsImdsBlocking](#aws-cdkaws-ecsdisableecsimdsblocking) | When set to true, CDK synth will throw exception if canContainersAccessInstanceRole is false. **IMPORTANT: See [details.](#aws-cdkaws-ecsdisableEcsImdsBlocking)** | 2.175.0 | (temporary) | | [@aws-cdk/aws-ecs:enableImdsBlockingDeprecatedFeature](#aws-cdkaws-ecsenableimdsblockingdeprecatedfeature) | When set to true along with canContainersAccessInstanceRole=false in ECS cluster, new updated commands will be added to UserData to block container accessing IMDS. **Applicable to Linux only. IMPORTANT: See [details.](#aws-cdkaws-ecsenableImdsBlockingDeprecatedFeature)** | 2.175.0 | (temporary) | -| [@aws-cdk/aws-elasticloadbalancingV2:albDualstackWithoutPublicIpv4SecurityGroupRulesDefault](#aws-cdkaws-elasticloadbalancingv2albdualstackwithoutpublicipv4securitygrouprulesdefault) | When enabled, the default security group ingress rules will allow IPv6 ingress from anywhere | V2NEXT | (fix) | +| [@aws-cdk/aws-elasticloadbalancingV2:albDualstackWithoutPublicIpv4SecurityGroupRulesDefault](#aws-cdkaws-elasticloadbalancingv2albdualstackwithoutpublicipv4securitygrouprulesdefault) | When enabled, the default security group ingress rules will allow IPv6 ingress from anywhere | 2.176.0 | (fix) | | [@aws-cdk/aws-iam:oidcRejectUnauthorizedConnections](#aws-cdkaws-iamoidcrejectunauthorizedconnections) | When enabled, the default behaviour of OIDC provider will reject unauthorized connections | V2NEXT | (fix) | From dd34d2e3286048eb5079d93b743c444c4ee1e9bf Mon Sep 17 00:00:00 2001 From: Hogan Bobertz Date: Fri, 24 Jan 2025 00:02:35 -0500 Subject: [PATCH 3/8] feat(apigatewayv2-authorizers): throw `ValidationError` instead of untyped errors (#33076) ### Issue `aws-apigatewayv2-authorizers` for #32569 ### Description of changes ValidationErrors everywhere ### Describe any new or updated permissions being added n/a ### Description of how you validated changes Existing tests. Exemptions granted as this is basically a refactor of existing code. ### Checklist - [x] My code adheres to the [CONTRIBUTING GUIDE](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md) and [DESIGN GUIDELINES](https://github.com/aws/aws-cdk/blob/main/docs/DESIGN_GUIDELINES.md) ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/aws-cdk-lib/.eslintrc.js | 1 + .../aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/jwt.ts | 3 ++- .../aws-apigatewayv2-authorizers/lib/http/lambda.ts | 5 +++-- .../aws-apigatewayv2-authorizers/lib/http/user-pool.ts | 3 ++- .../aws-apigatewayv2-authorizers/lib/websocket/lambda.ts | 3 ++- 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/packages/aws-cdk-lib/.eslintrc.js b/packages/aws-cdk-lib/.eslintrc.js index ca7a7961c60ee..eebbf16821843 100644 --- a/packages/aws-cdk-lib/.eslintrc.js +++ b/packages/aws-cdk-lib/.eslintrc.js @@ -25,6 +25,7 @@ const enableNoThrowDefaultErrorIn = [ 'aws-ssmcontacts', 'aws-ssmincidents', 'aws-ssmquicksetup', + 'aws-apigatewayv2-authorizers', 'aws-synthetics', 'aws-s3-assets', 'aws-s3-deployment', diff --git a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/jwt.ts b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/jwt.ts index 483c9705aaf21..e004a6e140834 100644 --- a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/jwt.ts +++ b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/jwt.ts @@ -5,6 +5,7 @@ import { HttpRouteAuthorizerConfig, IHttpRouteAuthorizer, } from '../../../aws-apigatewayv2'; +import { UnscopedValidationError } from '../../../core/lib/errors'; /** * Properties to initialize HttpJwtAuthorizer. @@ -59,7 +60,7 @@ export class HttpJwtAuthorizer implements IHttpRouteAuthorizer { */ public get authorizerId(): string { if (!this.authorizer) { - throw new Error( + throw new UnscopedValidationError( 'Cannot access authorizerId until authorizer is attached to a HttpRoute', ); } diff --git a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/lambda.ts b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/lambda.ts index 68b8ba4cf938a..26a47ef321f09 100644 --- a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/lambda.ts +++ b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/lambda.ts @@ -10,6 +10,7 @@ import { import { ServicePrincipal } from '../../../aws-iam'; import { IFunction } from '../../../aws-lambda'; import { Stack, Duration, Names } from '../../../core'; +import { UnscopedValidationError, ValidationError } from '../../../core/lib/errors'; /** * Specifies the type responses the lambda returns @@ -90,7 +91,7 @@ export class HttpLambdaAuthorizer implements IHttpRouteAuthorizer { */ public get authorizerId(): string { if (!this.authorizer) { - throw new Error( + throw new UnscopedValidationError( 'Cannot access authorizerId until authorizer is attached to a HttpRoute', ); } @@ -99,7 +100,7 @@ export class HttpLambdaAuthorizer implements IHttpRouteAuthorizer { public bind(options: HttpRouteAuthorizerBindOptions): HttpRouteAuthorizerConfig { if (this.httpApi && (this.httpApi.apiId !== options.route.httpApi.apiId)) { - throw new Error('Cannot attach the same authorizer to multiple Apis'); + throw new ValidationError('Cannot attach the same authorizer to multiple Apis', options.scope); } if (!this.authorizer) { diff --git a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/user-pool.ts b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/user-pool.ts index 123a3df1ad294..5cb0099080f6a 100644 --- a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/user-pool.ts +++ b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/http/user-pool.ts @@ -1,6 +1,7 @@ import { HttpAuthorizer, HttpAuthorizerType, HttpRouteAuthorizerBindOptions, HttpRouteAuthorizerConfig, IHttpRouteAuthorizer } from '../../../aws-apigatewayv2'; import { IUserPool, IUserPoolClient } from '../../../aws-cognito'; import { Stack } from '../../../core'; +import { UnscopedValidationError } from '../../../core/lib/errors'; /** * Properties to initialize HttpUserPoolAuthorizer. @@ -59,7 +60,7 @@ export class HttpUserPoolAuthorizer implements IHttpRouteAuthorizer { */ public get authorizerId(): string { if (!this.authorizer) { - throw new Error( + throw new UnscopedValidationError( 'Cannot access authorizerId until authorizer is attached to a HttpRoute', ); } diff --git a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts index 3a2d9bb611e82..073f69aafc56a 100644 --- a/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts +++ b/packages/aws-cdk-lib/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts @@ -10,6 +10,7 @@ import { import { ServicePrincipal } from '../../../aws-iam'; import { IFunction } from '../../../aws-lambda'; import { Stack, Names } from '../../../core'; +import { ValidationError } from '../../../core/lib/errors'; /** * Properties to initialize WebSocketTokenAuthorizer. @@ -49,7 +50,7 @@ export class WebSocketLambdaAuthorizer implements IWebSocketRouteAuthorizer { public bind(options: WebSocketRouteAuthorizerBindOptions): WebSocketRouteAuthorizerConfig { if (this.webSocketApi && (this.webSocketApi.apiId !== options.route.webSocketApi.apiId)) { - throw new Error('Cannot attach the same authorizer to multiple Apis'); + throw new ValidationError('Cannot attach the same authorizer to multiple Apis', options.scope); } if (!this.authorizer) { From c0a2cbf7360b74dce55c709d54566c8e01112a4f Mon Sep 17 00:00:00 2001 From: Rico Hermans Date: Fri, 24 Jan 2025 10:51:16 +0100 Subject: [PATCH 4/8] chore: turn check_suite into check_run (#33125) Investigation has shown that `check_suite` only triggers on main repo events, never on forks. Let's see what `check_run` does. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .github/workflows/check-suite-test.yaml | 26 ++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/check-suite-test.yaml b/.github/workflows/check-suite-test.yaml index e11b1c9f7b1c4..c8719917d7701 100644 --- a/.github/workflows/check-suite-test.yaml +++ b/.github/workflows/check-suite-test.yaml @@ -1,25 +1,25 @@ -name: Check Suite Logger +name: Check Run Logger on: - check_suite: + check_run: types: [completed] jobs: - log-check-suite: + log-check-run: runs-on: ubuntu-latest steps: - - name: Log check suite event details + - name: Log check run event details run: | - echo "Check Suite ID: ${{ github.event.check_suite.id }}" - echo "Status: ${{ github.event.check_suite.status }}" - echo "Conclusion: ${{ github.event.check_suite.conclusion }}" - echo "URL: ${{ github.event.check_suite.url }}" - echo "Head Branch: ${{ github.event.check_suite.head_branch }}" - echo "Head SHA: ${{ github.event.check_suite.head_sha }}" + echo "Check Run ID: ${{ github.event.check_run.id }}" + echo "Status: ${{ github.event.check_run.status }}" + echo "Conclusion: ${{ github.event.check_run.conclusion }}" + echo "URL: ${{ github.event.check_run.url }}" + echo "Head Branch: ${{ github.event.check_run.head_branch }}" + echo "Head SHA: ${{ github.event.check_run.head_sha }}" echo "Repository: ${{ github.event.repository.full_name }}" echo "Sender: ${{ github.event.sender.login }}" - echo "Created At: ${{ github.event.check_suite.created_at }}" - echo "Updated At: ${{ github.event.check_suite.updated_at }}" - echo "Pull Requests: ${{ toJson(github.event.check_suite.pull_requests) }}" \ No newline at end of file + echo "Created At: ${{ github.event.check_run.created_at }}" + echo "Updated At: ${{ github.event.check_run.updated_at }}" + echo "Pull Requests: ${{ toJson(github.event.check_run.pull_requests) }}" From d95add33fc3fe355752ff56cd37381cb808890b6 Mon Sep 17 00:00:00 2001 From: Momo Kornher Date: Fri, 24 Jan 2025 11:04:19 +0000 Subject: [PATCH 5/8] fix(cli): trace output (-vv) is useless when files are uploaded (#33104) ### Reason for this change When running the CDK CLI in trace mode (with `-vv`), e.g. `cdk deploy -vv` we log all SDK calls and their inputs. For file uploads, the input contains the buffer of the file to be uploaded. This results in really bad output: ``` [20:14:31] [sdk info] S3.PutObject({"Bucket":"cdk-hnb659fds-assets-us-east-1","Key":"220c435f80d2dd2cfc5dd0e8e29bfbb6ec58892bd813f5d6a008fdda38f6c02f.zip","Body":{"type":"Buffer","data":[80,75,3,4,20,0,8,0,8,0,0,0,33,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,100,97,116,97,46,116,120,116,75,73,44,73,52,227,2,0,80,75,7,8,165,92,116,66,8,0,0,0,6,0,0,0,80,75,1,2,45,3,20,0,8,0,8,0,0,0,33,0,165,92,116,66,8,0,0,0,6,0,0,0,8,0,0,0,0,0,0,0,0,0,32,0,164,129,0,0,0,0,100,97,116,97,46,116,120,116,80,75,5,6,0,0,0,0,1,0,1,0,54,0,0,0,62,0,0,0,0,0]},"ContentType":"application/zip","ChecksumAlgorithm":"SHA256","ServerSideEncryption":"aws:kms"}) -> OK ``` Now in this case we are only uploading 138 bytes. Imagine what this log looks like for files of 10MiB or 100MiB. Reportedly this also crashed every terminal. ### Description of changes There's no need to log the buffer bytes. Instead replace it with a string indicating its a buffer and the buffer length: ``` [20:14:31] [sdk info] S3.PutObject({"Body":""}) -> OK ``` ### Describe any new or updated permissions being added n/a ### Description of how you validated changes Tests ### Checklist - [x] My code adheres to the [CONTRIBUTING GUIDE](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md) and [DESIGN GUIDELINES](https://github.com/aws/aws-cdk/blob/main/docs/DESIGN_GUIDELINES.md) ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../aws-cdk/lib/api/aws-auth/sdk-logger.ts | 3 +- packages/aws-cdk/lib/serialize.ts | 33 +++++++++++++++++++ packages/aws-cdk/lib/util/bytes.ts | 21 ++++++++++++ packages/aws-cdk/test/util/bytes.test.ts | 18 ++++++++++ packages/aws-cdk/test/util/serialize.test.ts | 7 ++++ 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 packages/aws-cdk/lib/util/bytes.ts create mode 100644 packages/aws-cdk/test/util/bytes.test.ts create mode 100644 packages/aws-cdk/test/util/serialize.test.ts diff --git a/packages/aws-cdk/lib/api/aws-auth/sdk-logger.ts b/packages/aws-cdk/lib/api/aws-auth/sdk-logger.ts index ce1b981bab0d1..9a05d10283f31 100644 --- a/packages/aws-cdk/lib/api/aws-auth/sdk-logger.ts +++ b/packages/aws-cdk/lib/api/aws-auth/sdk-logger.ts @@ -1,6 +1,7 @@ import { inspect } from 'util'; import { Logger } from '@smithy/types'; import { trace } from '../../logging'; +import { replacerBufferWithInfo } from '../../serialize'; export class SdkToCliLogger implements Logger { public trace(..._content: any[]) { @@ -105,7 +106,7 @@ function formatApiCall(content: any): string | undefined { parts.push(`[${content.metadata?.attempts} attempts, ${content.metadata?.totalRetryDelay}ms retry]`); } - parts.push(`${service}.${api}(${JSON.stringify(content.input)})`); + parts.push(`${service}.${api}(${JSON.stringify(content.input, replacerBufferWithInfo)})`); if (isSdkApiCallSuccess(content)) { parts.push('-> OK'); diff --git a/packages/aws-cdk/lib/serialize.ts b/packages/aws-cdk/lib/serialize.ts index 9f310c95b3f78..7a6e836268036 100644 --- a/packages/aws-cdk/lib/serialize.ts +++ b/packages/aws-cdk/lib/serialize.ts @@ -1,4 +1,5 @@ import * as fs from 'fs-extra'; +import { formatBytes } from './util/bytes'; import * as yaml_cfn from './util/yaml-cfn'; /** @@ -51,3 +52,35 @@ export function obscureTemplate(template: any = {}) { return template; } + +/** + * Detects a buffer that has been converted to a JSON-like object + * In Node, `Buffer`s have `toJSON()` method that converts the buffer + * into a JS object that can be JSON stringified. + * Unfortunately this conversion happens before the replacer is called, + * so normal means of detecting a `Buffer` objet don't work anymore. + * @see https://github.com/nodejs/node-v0.x-archive/issues/5110 + */ +function isJsonBuffer(value: any): value is { + type: 'Buffer'; + data: number[]; +} { + return typeof value === 'object' + && 'type' in value + && value.type === 'Buffer' + && 'data' in value + && Array.isArray(value.data); +} + +/** + * A JSON.stringify() replacer that converts Buffers into a string with information + * Use this if you plan to print out JSON stringified objects that may contain a Buffer. + * Without this, large buffers (think: Megabytes) will completely fill up the output + * and even crash the system. + */ +export function replacerBufferWithInfo(_key: any, value: any): any { + if (isJsonBuffer(value)) { + return ``; + } + return value; +} diff --git a/packages/aws-cdk/lib/util/bytes.ts b/packages/aws-cdk/lib/util/bytes.ts new file mode 100644 index 0000000000000..22226d049b05d --- /dev/null +++ b/packages/aws-cdk/lib/util/bytes.ts @@ -0,0 +1,21 @@ +/** + * Format bytes as a human readable string + * + * @param bytes Number of bytes to format + * @param decimals Number of decimal places to show (default 2) + * @returns Formatted string with appropriate unit suffix + */ +export function formatBytes(bytes: number, decimals: number = 2): string { + decimals = decimals < 0 ? 0 : decimals; + + if (bytes === 0) { + return '0 Bytes'; + } + + const k = 1024; + const sizes = ['Bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']; + + const i = Math.floor(Math.log(bytes) / Math.log(k)); + + return `${parseFloat((bytes / Math.pow(k, i)).toFixed(decimals))} ${sizes[i]}`; +} diff --git a/packages/aws-cdk/test/util/bytes.test.ts b/packages/aws-cdk/test/util/bytes.test.ts new file mode 100644 index 0000000000000..9292af4f338ff --- /dev/null +++ b/packages/aws-cdk/test/util/bytes.test.ts @@ -0,0 +1,18 @@ +import { formatBytes } from '../../lib/util/bytes'; + +test.each([ + [0, '0 Bytes'], + [10, '10 Bytes'], + [1024, '1 KiB'], + [10.5 * 1024 * 1024, '10.5 MiB'], +])('converts %s bytes to %s', (bytes: number, expected: string) => { + expect(formatBytes(bytes)).toEqual(expected); +}); + +test('can format many decimals', () => { + expect(formatBytes(10.51234 * 1024 * 1024, 5)).toEqual('10.51234 MiB'); +}); + +test('can deal with negative decimals', () => { + expect(formatBytes(10.5 * 1024 * 1024, -1)).toEqual('11 MiB'); +}); diff --git a/packages/aws-cdk/test/util/serialize.test.ts b/packages/aws-cdk/test/util/serialize.test.ts new file mode 100644 index 0000000000000..65eab98b00cb4 --- /dev/null +++ b/packages/aws-cdk/test/util/serialize.test.ts @@ -0,0 +1,7 @@ +import { replacerBufferWithInfo } from '../../lib/serialize'; + +test('converts buffer to information', () => { + const res = JSON.stringify({ data: Buffer.from('test data') }, replacerBufferWithInfo); + + expect(res).toEqual('{"data":""}'); +}); From 4eb87b6531f7053e174a5fa77ab8e7ab4d896535 Mon Sep 17 00:00:00 2001 From: Rico Hermans Date: Fri, 24 Jan 2025 12:38:30 +0100 Subject: [PATCH 6/8] chore: remove check_run workflow (#33127) This triggers a bazillion times. Let's save some money. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .github/workflows/check-suite-test.yaml | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100644 .github/workflows/check-suite-test.yaml diff --git a/.github/workflows/check-suite-test.yaml b/.github/workflows/check-suite-test.yaml deleted file mode 100644 index c8719917d7701..0000000000000 --- a/.github/workflows/check-suite-test.yaml +++ /dev/null @@ -1,25 +0,0 @@ - -name: Check Run Logger - -on: - check_run: - types: [completed] - -jobs: - log-check-run: - runs-on: ubuntu-latest - - steps: - - name: Log check run event details - run: | - echo "Check Run ID: ${{ github.event.check_run.id }}" - echo "Status: ${{ github.event.check_run.status }}" - echo "Conclusion: ${{ github.event.check_run.conclusion }}" - echo "URL: ${{ github.event.check_run.url }}" - echo "Head Branch: ${{ github.event.check_run.head_branch }}" - echo "Head SHA: ${{ github.event.check_run.head_sha }}" - echo "Repository: ${{ github.event.repository.full_name }}" - echo "Sender: ${{ github.event.sender.login }}" - echo "Created At: ${{ github.event.check_run.created_at }}" - echo "Updated At: ${{ github.event.check_run.updated_at }}" - echo "Pull Requests: ${{ toJson(github.event.check_run.pull_requests) }}" From 34ae99779050ee1e982df29c93815fcc81ec6924 Mon Sep 17 00:00:00 2001 From: Rico Hermans Date: Fri, 24 Jan 2025 13:08:25 +0100 Subject: [PATCH 7/8] chore: make the PR linter check CodeCov statuses (#33128) This has the advantage that failing CodeCov checks can be ignored with a label. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .github/workflows/pr-linter.yml | 11 +- tools/@aws-cdk/prlint/constants.ts | 11 ++ tools/@aws-cdk/prlint/github.ts | 32 ++++ tools/@aws-cdk/prlint/index.ts | 14 +- tools/@aws-cdk/prlint/lint.ts | 26 +++- tools/@aws-cdk/prlint/linter-base.ts | 29 +++- tools/@aws-cdk/prlint/results.ts | 20 ++- tools/@aws-cdk/prlint/test/lint.test.ts | 138 ++++++++++-------- .../@aws-cdk/prlint/test/linter-base.test.ts | 51 +++---- tools/@aws-cdk/prlint/test/octomock.ts | 36 +++++ 10 files changed, 274 insertions(+), 94 deletions(-) create mode 100644 tools/@aws-cdk/prlint/test/octomock.ts diff --git a/.github/workflows/pr-linter.yml b/.github/workflows/pr-linter.yml index e45b6c7f95ece..438f96793a5fb 100644 --- a/.github/workflows/pr-linter.yml +++ b/.github/workflows/pr-linter.yml @@ -11,11 +11,19 @@ on: - opened - synchronize - reopened + + # Triggered from a separate job when a review is added workflow_run: workflows: [PR Linter Trigger] types: - completed - status: + + # Trigger when a status is updated (CodeBuild leads to statuses) + status: {} + + # Trigger when a check suite is completed (GitHub actions and CodeCov create checks) + check_suite: + types: [completed] jobs: download-if-workflow-run: @@ -57,6 +65,7 @@ jobs: pull-requests: write statuses: read issues: read + checks: read runs-on: ubuntu-latest needs: download-if-workflow-run steps: diff --git a/tools/@aws-cdk/prlint/constants.ts b/tools/@aws-cdk/prlint/constants.ts index 0d81d7dc46133..e0e92881a0e44 100644 --- a/tools/@aws-cdk/prlint/constants.ts +++ b/tools/@aws-cdk/prlint/constants.ts @@ -16,3 +16,14 @@ export enum Exemption { REQUEST_EXEMPTION = 'pr-linter/exemption-requested', CODECOV = "pr-linter/exempt-codecov", } + +const CODECOV_PREFIX = 'codecov/'; + +export const CODECOV_CHECKS = [ + `${CODECOV_PREFIX}patch`, + `${CODECOV_PREFIX}patch/packages/aws-cdk`, + `${CODECOV_PREFIX}patch/packages/aws-cdk-lib/core`, + `${CODECOV_PREFIX}project`, + `${CODECOV_PREFIX}project/packages/aws-cdk`, + `${CODECOV_PREFIX}project/packages/aws-cdk-lib/core` +]; \ No newline at end of file diff --git a/tools/@aws-cdk/prlint/github.ts b/tools/@aws-cdk/prlint/github.ts index 17ddc07858147..8e162cba46ae5 100644 --- a/tools/@aws-cdk/prlint/github.ts +++ b/tools/@aws-cdk/prlint/github.ts @@ -1,9 +1,11 @@ import { Endpoints } from '@octokit/types'; import { StatusEvent } from '@octokit/webhooks-definitions/schema'; +import type { components } from '@octokit/openapi-types'; export type GitHubPr = Endpoints['GET /repos/{owner}/{repo}/pulls/{pull_number}']['response']['data']; +export type CheckRun = components['schemas']['check-run']; export interface GitHubComment { id: number; @@ -31,3 +33,33 @@ export interface GitHubLabel { export interface GitHubFile { readonly filename: string; } + + +/** + * Combine all check run conclusions + * + * Returns `success` if they all return a positive result, `failure` if + * one of them failed for some reason, and `waiting` if the result isn't available + * yet. + */ +export function summarizeRunConclusions(conclusions: Array): 'success' | 'failure' | 'waiting' { + for (const concl of conclusions) { + switch (concl) { + case null: + case undefined: + case 'action_required': + return 'waiting'; + + case 'failure': + case 'cancelled': + case 'timed_out': + return 'failure'; + + case 'neutral': + case 'skipped': + case 'success': + break; + } + } + return 'success'; +} \ No newline at end of file diff --git a/tools/@aws-cdk/prlint/index.ts b/tools/@aws-cdk/prlint/index.ts index f79a2ce5ab036..00f502b0fb2f8 100644 --- a/tools/@aws-cdk/prlint/index.ts +++ b/tools/@aws-cdk/prlint/index.ts @@ -1,6 +1,6 @@ import * as github from '@actions/github'; import { Octokit } from '@octokit/rest'; -import { StatusEvent, PullRequestEvent } from '@octokit/webhooks-definitions/schema'; +import { StatusEvent, PullRequestEvent, CheckSuiteEvent } from '@octokit/webhooks-definitions/schema'; import { PullRequestLinter } from './lint'; import { LinterActions } from './linter-base'; import { DEFAULT_LINTER_LOGIN } from './constants'; @@ -25,11 +25,12 @@ async function run() { const number = await determinePrNumber(client); if (!number) { - if (github.context.eventName === 'status') { - console.error(`Could not find PR belonging to status event, but that's not unusual. Skipping.`); + if (['check_suite', 'status'].includes(github.context.eventName)) { + console.error(`Could not find PR belonging to event, but that's not unusual. Skipping.`); process.exit(0); } - throw new Error(`Could not find PR number from event: ${github.context.eventName}`); + + throw new Error(`Could not determine a PR number from either \$PR_NUMBER or \$PR_SHA or ${github.context.eventName}: ${JSON.stringify(github.context.payload)}`); } const prLinter = new PullRequestLinter({ @@ -87,7 +88,12 @@ async function determinePrNumber(client: Octokit): Promise { if (!sha && github.context.eventName === 'status') { sha = (github.context.payload as StatusEvent)?.sha; } + if (!sha && github.context.eventName === 'check_suite') { + // For a check_suite event, take the SHA and try to find a PR for it. + sha = (github.context.payload as CheckSuiteEvent)?.check_suite.head_sha; + } if (!sha) { + return undefined; throw new Error(`Could not determine a SHA from either \$PR_SHA or ${JSON.stringify(github.context.payload)}`); } diff --git a/tools/@aws-cdk/prlint/lint.ts b/tools/@aws-cdk/prlint/lint.ts index c9c3ddd3f7718..b7779ed3794a7 100644 --- a/tools/@aws-cdk/prlint/lint.ts +++ b/tools/@aws-cdk/prlint/lint.ts @@ -1,9 +1,9 @@ import * as path from 'path'; import { findModulePath, moduleStability } from './module'; import { breakingModules } from './parser'; -import { GitHubFile, GitHubPr, Review } from "./github"; +import { CheckRun, GitHubFile, GitHubPr, Review, summarizeRunConclusions } from "./github"; import { TestResult, ValidationCollector } from './results'; -import { CODE_BUILD_CONTEXT, Exemption } from './constants'; +import { CODE_BUILD_CONTEXT, CODECOV_CHECKS, Exemption } from './constants'; import { StatusEvent } from '@octokit/webhooks-definitions/schema'; import { LinterActions, mergeLinterActions, PR_FROM_MAIN_ERROR, PullRequestLinterBase } from './linter-base'; @@ -253,6 +253,28 @@ export class PullRequestLinter extends PullRequestLinterBase { ], }); + if (pr.base.ref === 'main') { + // Only check CodeCov for PRs targeting 'main' + const runs = await this.checkRuns(sha); + const codeCovRuns = CODECOV_CHECKS.map(c => runs[c] as CheckRun | undefined); + + validationCollector.validateRuleSet({ + exemption: () => hasLabel(pr, Exemption.CODECOV), + testRuleSet: [{ + test: () => { + const summary = summarizeRunConclusions(codeCovRuns.map(r => r?.conclusion)); + console.log('CodeCov Summary:', summary); + + switch (summary) { + case 'failure': return TestResult.failure('CodeCov is indicating a drop in code coverage'); + case 'waiting': return TestResult.failure('Still waiting for CodeCov results (make sure the build is passing first)'); + case 'success': return TestResult.success(); + } + }, + }], + }); + } + // We always delete all comments; in the future we will just communicate via reviews. ret.deleteComments = await this.findExistingPRLinterComments(); diff --git a/tools/@aws-cdk/prlint/linter-base.ts b/tools/@aws-cdk/prlint/linter-base.ts index 17bb51a534f21..58b7988c4068e 100644 --- a/tools/@aws-cdk/prlint/linter-base.ts +++ b/tools/@aws-cdk/prlint/linter-base.ts @@ -1,5 +1,5 @@ import { Octokit } from '@octokit/rest'; -import { GitHubComment, GitHubPr, Review } from "./github"; +import { CheckRun, GitHubComment, GitHubPr, Review } from "./github"; export const PR_FROM_MAIN_ERROR = 'Pull requests from `main` branch of a fork cannot be accepted. Please reopen this contribution from another branch on your fork. For more information, see https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md#step-4-pull-request.'; @@ -263,6 +263,33 @@ export class PullRequestLinterBase { } } + /** + * Return the check run results for the given SHA + * + * It *seems* like this only returns completed check runs. That is, in-progress + * runs are not included. + */ + public async checkRuns(sha: string): Promise<{[name: string]: CheckRun}> { + const response = await this.client.paginate(this.client.checks.listForRef, { + owner: this.prParams.owner, + repo: this.prParams.repo, + ref: sha, + }); + + const ret: {[name: string]: CheckRun} = {}; + for (const c of response) { + if (!c.started_at) { + continue; + } + + // Insert if this is the first time seeing this check, or the current DT is newer + if (!ret[c.name] || ret[c.name].started_at!.localeCompare(c.started_at) > 1) { + ret[c.name] = c; + } + } + return ret; + } + /** * Delete a comment */ diff --git a/tools/@aws-cdk/prlint/results.ts b/tools/@aws-cdk/prlint/results.ts index 7784637a2f4a4..bcf9d41b66e3e 100644 --- a/tools/@aws-cdk/prlint/results.ts +++ b/tools/@aws-cdk/prlint/results.ts @@ -8,7 +8,25 @@ import { GitHubFile, GitHubPr } from "./github"; */ export class TestResult { /** - * Create a test result from a potential failure + * Return a successful test result + */ + public static success() { + return new TestResult(); + } + + /** + * Return an unconditionally failing test result + */ + public static failure(message: string) { + const ret = new TestResult(); + ret.assessFailure(true, message); + return ret; + } + + /** + * Create a test result from a POTENTIAL failure + * + * If `failureCondition` is true, this will return a failure, otherwise it will return a success. */ public static fromFailure(failureCondition: boolean, failureMessage: string): TestResult { const ret = new TestResult(); diff --git a/tools/@aws-cdk/prlint/test/lint.test.ts b/tools/@aws-cdk/prlint/test/lint.test.ts index 33f0a0871df70..d26299b3090bf 100644 --- a/tools/@aws-cdk/prlint/test/lint.test.ts +++ b/tools/@aws-cdk/prlint/test/lint.test.ts @@ -1,8 +1,9 @@ import * as path from 'path'; import { GitHubFile, GitHubLabel, GitHubPr } from '../github'; -import { CODE_BUILD_CONTEXT } from '../constants'; +import { CODE_BUILD_CONTEXT, CODECOV_CHECKS } from '../constants'; import { PullRequestLinter } from '../lint'; import { StatusEvent } from '@octokit/webhooks-definitions/schema'; +import { createOctomock, OctoMock } from './octomock'; let mockRemoveLabel = jest.fn(); let mockAddLabel = jest.fn(); @@ -1210,79 +1211,96 @@ describe('for any PR', () => { ], })); }); -}); - -function configureMock(pr: Subset, prFiles?: GitHubFile[], existingComments?: Array<{ id: number, login: string, body: string }>): PullRequestLinter { - const pullsClient = { - get(_props: { _owner: string, _repo: string, _pull_number: number, _user: { _login: string} }) { - return { data: { ...pr, base: { ref: 'main', ...pr?.base }, head: { sha: 'ABC', ...pr?.head }} }; - }, - - listFiles(_props: { _owner: string, _repo: string, _pull_number: number }) { - return { data: prFiles ?? [] }; - }, - - createReview(errorMessage: string) { - return { - promise: () => mockCreateReview(errorMessage), - }; - }, - - listReviews: mockListReviews, - dismissReview() {}, + test('missing CodeCov runs lead to a failure', async () => { + // GIVEN + const prLinter = configureMock(ARBITRARY_PR, ARBITRARY_FILES); + prLinter.octomock.checks.listForRef.mockReturnValue({ data: [] }); - updateReview() { }, - - update() {}, - }; + // WHEN + const result = await prLinter.validatePullRequestTarget(); - const issuesClient = { - createComment() {}, - - deleteComment() {}, + // THEN + expect(result.requestChanges?.failures).toContainEqual( + expect.stringContaining('Still waiting for CodeCov results'), + ); + }); - listComments() { - const data = [{ id: 1000, user: { login: 'aws-cdk-automation' }, body: 'The pull request linter fails with the following errors:' }]; - if (existingComments) { - existingComments.forEach(comment => data.push({ id: comment.id, user: { login: comment.login }, body: comment.body })); - } - return { data }; - }, + test('failing CodeCov runs lead to a failure', async () => { + // GIVEN + const prLinter = configureMock(ARBITRARY_PR, ARBITRARY_FILES); + prLinter.octomock.checks.listForRef.mockReturnValue({ + data: CODECOV_CHECKS.map((name, i) => ({ + name, + // All are success except one + conclusion: i == 0 ? 'failure' : 'success', + started_at: '2020-01-01T00:00:00Z', + })), + }); - removeLabel: mockRemoveLabel, - addLabels: mockAddLabel, - }; + // WHEN + const result = await prLinter.validatePullRequestTarget(); - const reposClient = { - listCommitStatusesForRef() { - return { - data: [{ - context: CODE_BUILD_CONTEXT, - state: 'success', - }], - }; - }, - }; + // THEN + expect(result.requestChanges?.failures).toContainEqual( + expect.stringContaining('CodeCov is indicating a drop in code coverage'), + ); + }); +}); - const searchClient = { - issuesAndPullRequests() {}, - }; - return new PullRequestLinter({ +function configureMock(pr: Subset, prFiles?: GitHubFile[], existingComments?: Array<{ id: number, login: string, body: string }>): PullRequestLinter & { octomock: OctoMock } { + const octomock = createOctomock(); + + octomock.pulls.get.mockImplementation((_props: { _owner: string, _repo: string, _pull_number: number, _user: { _login: string} }) => ({ + data: { ...pr, base: { ref: 'main', ...pr?.base }, head: { sha: 'ABC', ...pr?.head }}, + })); + octomock.pulls.listFiles.mockImplementation((_props: { _owner: string, _repo: string, _pull_number: number }) => ({ + data: prFiles ?? [], + })); + octomock.pulls.createReview.mockImplementation((errorMessage) => { + return { + promise: () => mockCreateReview(errorMessage), + }; + }); + octomock.pulls.listReviews = mockListReviews; + octomock.issues.listComments.mockImplementation(() => { + const data = [{ id: 1000, user: { login: 'aws-cdk-automation' }, body: 'The pull request linter fails with the following errors:' }]; + if (existingComments) { + existingComments.forEach(comment => data.push({ id: comment.id, user: { login: comment.login }, body: comment.body })); + } + return { data }; + }); + octomock.issues.addLabels = mockAddLabel; + octomock.issues.removeLabel = mockRemoveLabel; + octomock.repos.listCommitStatusesForRef.mockImplementation(() => ({ + data: [{ + context: CODE_BUILD_CONTEXT, + state: 'success', + }], + })); + + // We need to pretend that all CodeCov checks are passing by default, otherwise + // the linter will complain about these even in tests that aren't testing for this. + octomock.checks.listForRef.mockImplementation(() => ({ + data: CODECOV_CHECKS.map(name => ({ + name, + conclusion: 'success', + started_at: '2020-01-01T00:00:00Z', + })), + })); + + const linter = new PullRequestLinter({ owner: 'aws', repo: 'aws-cdk', number: 1000, linterLogin: 'aws-cdk-automation', // hax hax - client: { - pulls: pullsClient as any, - issues: issuesClient as any, - search: searchClient as any, - repos: reposClient as any, - paginate: (method: any, args: any) => { return method(args).data; }, - } as any, + client: octomock as any, }); + // Stick the octomock on the linter, that will be useful in tests + (linter as any).octomock = octomock; + return linter as any; } /** diff --git a/tools/@aws-cdk/prlint/test/linter-base.test.ts b/tools/@aws-cdk/prlint/test/linter-base.test.ts index 403c8a028cbae..d424e3a3b4662 100644 --- a/tools/@aws-cdk/prlint/test/linter-base.test.ts +++ b/tools/@aws-cdk/prlint/test/linter-base.test.ts @@ -8,32 +8,9 @@ */ import { PullRequestLinterBase } from "../linter-base"; +import { createOctomock } from "./octomock"; -const octomock = { - pulls: { - get: jest.fn(), - listFiles: jest.fn(), - createReview: jest.fn(), - listReviews: jest.fn(), - dismissReview: jest.fn(), - updateReview: jest.fn(), - update: jest.fn(), - }, - issues: { - createComment: jest.fn(), - deleteComment: jest.fn(), - listComments: jest.fn(), - removeLabel: jest.fn(), - addLabels: jest.fn(), - }, - search: { - issuesAndPullRequests: jest.fn(), - }, - repos: { - listCommitStatusesForRef: jest.fn(), - }, - paginate: async (method: any, args: any) => { return (await method(args)).data; }, -}; +const octomock = createOctomock(); const linter = new PullRequestLinterBase({ client: octomock as any, @@ -111,3 +88,27 @@ test('dismissing a review dismisses and changes the text of all previous reviews review_id: 3333, })); }); + +test('checks with duplicate names are combined so the latest wins', async () => { + // GIVEN + octomock.checks.listForRef.mockReturnValue({ + data: [ + { + name: 'some-check', + conclusion: 'success', + started_at: '2025-01-24T02:00:00Z', + }, + { + name: 'some-check', + conclusion: 'failure', + started_at: '2025-01-24T01:00:00Z', + }, + ], + }); + + // WHEN + const runs = await linter.checkRuns('asdf'); + + // THEN + expect(runs['some-check'].conclusion).toBe('success'); +}); \ No newline at end of file diff --git a/tools/@aws-cdk/prlint/test/octomock.ts b/tools/@aws-cdk/prlint/test/octomock.ts new file mode 100644 index 0000000000000..d24e2578be079 --- /dev/null +++ b/tools/@aws-cdk/prlint/test/octomock.ts @@ -0,0 +1,36 @@ + +/** + * Create a mock object for Octokit + */ +export function createOctomock() { + return { + pulls: { + get: jest.fn(), + listFiles: jest.fn(), + createReview: jest.fn(), + listReviews: jest.fn(), + dismissReview: jest.fn(), + updateReview: jest.fn(), + update: jest.fn(), + }, + issues: { + createComment: jest.fn(), + deleteComment: jest.fn(), + listComments: jest.fn(), + removeLabel: jest.fn(), + addLabels: jest.fn(), + }, + search: { + issuesAndPullRequests: jest.fn(), + }, + repos: { + listCommitStatusesForRef: jest.fn(), + }, + checks: { + listForRef: jest.fn(), + }, + paginate: async (method: any, args: any) => { return (await method(args)).data; }, + }; +} + +export type OctoMock = ReturnType; \ No newline at end of file From 5e0f16d5f5782784a7b572caa6531460bb4eed50 Mon Sep 17 00:00:00 2001 From: Kaizen Conroy <36202692+kaizencc@users.noreply.github.com> Date: Fri, 24 Jan 2025 07:40:12 -0500 Subject: [PATCH 8/8] feat(route53): throw `ValidationError` instead of untyped errors (#33110) ### Issue `aws-route53*` for #32569 ### Description of changes ValidationErrors everywhere ### Describe any new or updated permissions being added n/a ### Description of how you validated changes Existing tests. Exemptions granted as this is basically a refactor of existing code. ### Checklist - [x] My code adheres to the [CONTRIBUTING GUIDE](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md) and [DESIGN GUIDELINES](https://github.com/aws/aws-cdk/blob/main/docs/DESIGN_GUIDELINES.md) ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/aws-cdk-lib/.eslintrc.js | 7 +++++++ .../lib/website-redirect.ts | 7 ++++--- .../lib/api-gateway-domain-name.ts | 3 ++- .../lib/bucket-website-target.ts | 9 +++++---- .../lib/elastic-beanstalk-environment-target.ts | 7 ++++--- .../aws-route53-targets/lib/route53-record.ts | 5 +++-- .../aws-cdk-lib/aws-route53/lib/geo-location.ts | 8 +++++--- .../aws-cdk-lib/aws-route53/lib/hosted-zone.ts | 13 +++++++------ .../aws-cdk-lib/aws-route53/lib/record-set.ts | 17 +++++++++-------- .../lib/vpc-endpoint-service-domain-name.ts | 4 ++-- 10 files changed, 48 insertions(+), 32 deletions(-) diff --git a/packages/aws-cdk-lib/.eslintrc.js b/packages/aws-cdk-lib/.eslintrc.js index eebbf16821843..49c329dbd1b17 100644 --- a/packages/aws-cdk-lib/.eslintrc.js +++ b/packages/aws-cdk-lib/.eslintrc.js @@ -27,6 +27,13 @@ const enableNoThrowDefaultErrorIn = [ 'aws-ssmquicksetup', 'aws-apigatewayv2-authorizers', 'aws-synthetics', + 'aws-route53', + 'aws-route53-patterns', + 'aws-route53-targets', + 'aws-route53profiles', + 'aws-route53recoverycontrol', + 'aws-route53recoveryreadiness', + 'aws-route53resolver', 'aws-s3-assets', 'aws-s3-deployment', 'aws-s3-notifications', diff --git a/packages/aws-cdk-lib/aws-route53-patterns/lib/website-redirect.ts b/packages/aws-cdk-lib/aws-route53-patterns/lib/website-redirect.ts index aedd43a9f5117..a9ba11d9aca74 100644 --- a/packages/aws-cdk-lib/aws-route53-patterns/lib/website-redirect.ts +++ b/packages/aws-cdk-lib/aws-route53-patterns/lib/website-redirect.ts @@ -5,6 +5,7 @@ import { ARecord, AaaaRecord, IHostedZone, RecordTarget } from '../../aws-route5 import { CloudFrontTarget } from '../../aws-route53-targets'; import { BlockPublicAccess, Bucket, RedirectProtocol } from '../../aws-s3'; import { ArnFormat, RemovalPolicy, Stack, Token, FeatureFlags } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import { md5hash } from '../../core/lib/helpers-internal'; import { ROUTE53_PATTERNS_USE_CERTIFICATE } from '../../cx-api'; @@ -61,7 +62,7 @@ export class HttpsRedirect extends Construct { if (props.certificate) { const certificateRegion = Stack.of(this).splitArn(props.certificate.certificateArn, ArnFormat.SLASH_RESOURCE_NAME).region; if (!Token.isUnresolved(certificateRegion) && certificateRegion !== 'us-east-1') { - throw new Error(`The certificate must be in the us-east-1 region and the certificate you provided is in ${certificateRegion}.`); + throw new ValidationError(`The certificate must be in the us-east-1 region and the certificate you provided is in ${certificateRegion}.`, this); } } const redirectCert = props.certificate ?? this.createCertificate(domainNames, props.zone); @@ -123,10 +124,10 @@ export class HttpsRedirect extends Construct { const stack = Stack.of(this); const parent = stack.node.scope; if (!parent) { - throw new Error(`Stack ${stack.stackId} must be created in the scope of an App or Stage`); + throw new ValidationError(`Stack ${stack.stackId} must be created in the scope of an App or Stage`, this); } if (Token.isUnresolved(stack.region)) { - throw new Error(`When ${ROUTE53_PATTERNS_USE_CERTIFICATE} is enabled, a region must be defined on the Stack`); + throw new ValidationError(`When ${ROUTE53_PATTERNS_USE_CERTIFICATE} is enabled, a region must be defined on the Stack`, this); } if (stack.region !== 'us-east-1') { const stackId = `certificate-redirect-stack-${stack.node.addr}`; diff --git a/packages/aws-cdk-lib/aws-route53-targets/lib/api-gateway-domain-name.ts b/packages/aws-cdk-lib/aws-route53-targets/lib/api-gateway-domain-name.ts index 5e20b107156db..b0ea117dd1a16 100644 --- a/packages/aws-cdk-lib/aws-route53-targets/lib/api-gateway-domain-name.ts +++ b/packages/aws-cdk-lib/aws-route53-targets/lib/api-gateway-domain-name.ts @@ -1,5 +1,6 @@ import * as apig from '../../aws-apigateway'; import * as route53 from '../../aws-route53'; +import { ValidationError } from '../../core/lib/errors'; /** * Defines an API Gateway domain name as the alias target. @@ -28,7 +29,7 @@ export class ApiGatewayDomain implements route53.IAliasRecordTarget { export class ApiGateway extends ApiGatewayDomain { constructor(api: apig.RestApiBase) { if (!api.domainName) { - throw new Error('API does not define a default domain name'); + throw new ValidationError('API does not define a default domain name', api); } super(api.domainName); diff --git a/packages/aws-cdk-lib/aws-route53-targets/lib/bucket-website-target.ts b/packages/aws-cdk-lib/aws-route53-targets/lib/bucket-website-target.ts index fef4aad569cc4..017b4bf59a876 100644 --- a/packages/aws-cdk-lib/aws-route53-targets/lib/bucket-website-target.ts +++ b/packages/aws-cdk-lib/aws-route53-targets/lib/bucket-website-target.ts @@ -2,6 +2,7 @@ import { IAliasRecordTargetProps } from './shared'; import * as route53 from '../../aws-route53'; import * as s3 from '../../aws-s3'; import { Stack, Token } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import { RegionInfo } from '../../region-info'; /** @@ -10,21 +11,21 @@ import { RegionInfo } from '../../region-info'; export class BucketWebsiteTarget implements route53.IAliasRecordTarget { constructor(private readonly bucket: s3.IBucket, private readonly props?: IAliasRecordTargetProps) {} - public bind(_record: route53.IRecordSet, _zone?: route53.IHostedZone): route53.AliasRecordTargetConfig { + public bind(record: route53.IRecordSet, _zone?: route53.IHostedZone): route53.AliasRecordTargetConfig { const { region } = Stack.of(this.bucket.stack); if (Token.isUnresolved(region)) { - throw new Error([ + throw new ValidationError([ 'Cannot use an S3 record alias in region-agnostic stacks.', 'You must specify a specific region when you define the stack', '(see https://docs.aws.amazon.com/cdk/latest/guide/environments.html)', - ].join(' ')); + ].join(' '), record); } const { s3StaticWebsiteHostedZoneId: hostedZoneId, s3StaticWebsiteEndpoint: dnsName } = RegionInfo.get(region); if (!hostedZoneId || !dnsName) { - throw new Error(`Bucket website target is not supported for the "${region}" region`); + throw new ValidationError(`Bucket website target is not supported for the "${region}" region`, record); } return { hostedZoneId, dnsName, evaluateTargetHealth: this.props?.evaluateTargetHealth }; diff --git a/packages/aws-cdk-lib/aws-route53-targets/lib/elastic-beanstalk-environment-target.ts b/packages/aws-cdk-lib/aws-route53-targets/lib/elastic-beanstalk-environment-target.ts index e35ea7b69e8ec..55e671865e711 100644 --- a/packages/aws-cdk-lib/aws-route53-targets/lib/elastic-beanstalk-environment-target.ts +++ b/packages/aws-cdk-lib/aws-route53-targets/lib/elastic-beanstalk-environment-target.ts @@ -1,6 +1,7 @@ import { IAliasRecordTargetProps } from './shared'; import * as route53 from '../../aws-route53'; import * as cdk from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import { RegionInfo } from '../../region-info'; /** @@ -13,9 +14,9 @@ import { RegionInfo } from '../../region-info'; export class ElasticBeanstalkEnvironmentEndpointTarget implements route53.IAliasRecordTarget { constructor( private readonly environmentEndpoint: string, private readonly props?: IAliasRecordTargetProps) {} - public bind(_record: route53.IRecordSet, _zone?: route53.IHostedZone): route53.AliasRecordTargetConfig { + public bind(record: route53.IRecordSet, _zone?: route53.IHostedZone): route53.AliasRecordTargetConfig { if (cdk.Token.isUnresolved(this.environmentEndpoint)) { - throw new Error('Cannot use an EBS alias as `environmentEndpoint`. You must find your EBS environment endpoint via the AWS console. See the Elastic Beanstalk developer guide: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html'); + throw new ValidationError('Cannot use an EBS alias as `environmentEndpoint`. You must find your EBS environment endpoint via the AWS console. See the Elastic Beanstalk developer guide: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html', record); } const dnsName = this.environmentEndpoint; @@ -25,7 +26,7 @@ export class ElasticBeanstalkEnvironmentEndpointTarget implements route53.IAlias const { ebsEnvEndpointHostedZoneId: hostedZoneId } = RegionInfo.get(region); if (!hostedZoneId || !dnsName) { - throw new Error(`Elastic Beanstalk environment target is not supported for the "${region}" region.`); + throw new ValidationError(`Elastic Beanstalk environment target is not supported for the "${region}" region.`, record); } return { diff --git a/packages/aws-cdk-lib/aws-route53-targets/lib/route53-record.ts b/packages/aws-cdk-lib/aws-route53-targets/lib/route53-record.ts index 1235a04ee0d61..0c796f8562468 100644 --- a/packages/aws-cdk-lib/aws-route53-targets/lib/route53-record.ts +++ b/packages/aws-cdk-lib/aws-route53-targets/lib/route53-record.ts @@ -1,4 +1,5 @@ import * as route53 from '../../aws-route53'; +import { ValidationError } from '../../core/lib/errors'; /** * Use another Route 53 record as an alias record target @@ -7,9 +8,9 @@ export class Route53RecordTarget implements route53.IAliasRecordTarget { constructor(private readonly record: route53.IRecordSet) { } - public bind(_record: route53.IRecordSet, zone?: route53.IHostedZone): route53.AliasRecordTargetConfig { + public bind(record: route53.IRecordSet, zone?: route53.IHostedZone): route53.AliasRecordTargetConfig { if (!zone) { // zone introduced as optional to avoid a breaking change - throw new Error('Cannot bind to record without a zone'); + throw new ValidationError('Cannot bind to record without a zone', record); } return { dnsName: this.record.domainName, diff --git a/packages/aws-cdk-lib/aws-route53/lib/geo-location.ts b/packages/aws-cdk-lib/aws-route53/lib/geo-location.ts index 8fee035dc8d45..adfcce1bae807 100644 --- a/packages/aws-cdk-lib/aws-route53/lib/geo-location.ts +++ b/packages/aws-cdk-lib/aws-route53/lib/geo-location.ts @@ -1,3 +1,5 @@ +import { UnscopedValidationError } from '../../core/lib/errors'; + /** * Routing based on geographical location. */ @@ -49,21 +51,21 @@ export class GeoLocation { private static validateCountry(country: string) { if (!GeoLocation.COUNTRY_REGEX.test(country)) { // eslint-disable-next-line max-len - throw new Error(`Invalid country format for country: ${country}, country should be two-letter and uppercase country ISO 3166-1-alpha-2 code`); + throw new UnscopedValidationError(`Invalid country format for country: ${country}, country should be two-letter and uppercase country ISO 3166-1-alpha-2 code`); } } private static validateCountryForSubdivision(country: string) { if (!GeoLocation.COUNTRY_FOR_SUBDIVISION_REGEX.test(country)) { // eslint-disable-next-line max-len - throw new Error(`Invalid country for subdivisions geolocation: ${country}, only UA (Ukraine) and US (United states) are supported`); + throw new UnscopedValidationError(`Invalid country for subdivisions geolocation: ${country}, only UA (Ukraine) and US (United states) are supported`); } } private static validateSubDivision(subDivision: string) { if (!GeoLocation.SUBDIVISION_REGEX.test(subDivision)) { // eslint-disable-next-line max-len - throw new Error(`Invalid subdivision format for subdivision: ${subDivision}, subdivision should be alphanumeric and between 1 and 3 characters`); + throw new UnscopedValidationError(`Invalid subdivision format for subdivision: ${subDivision}, subdivision should be alphanumeric and between 1 and 3 characters`); } } diff --git a/packages/aws-cdk-lib/aws-route53/lib/hosted-zone.ts b/packages/aws-cdk-lib/aws-route53/lib/hosted-zone.ts index 8005be5656dc8..9722fffd07da4 100644 --- a/packages/aws-cdk-lib/aws-route53/lib/hosted-zone.ts +++ b/packages/aws-cdk-lib/aws-route53/lib/hosted-zone.ts @@ -10,6 +10,7 @@ import * as iam from '../../aws-iam'; import * as kms from '../../aws-kms'; import * as cxschema from '../../cloud-assembly-schema'; import { ContextProvider, Duration, Lazy, Resource, Stack } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; /** * Common properties to create a Route 53 hosted zone @@ -105,7 +106,7 @@ export class HostedZone extends Resource implements IHostedZone { class Import extends Resource implements IHostedZone { public readonly hostedZoneId = hostedZoneId; public get zoneName(): string { - throw new Error('Cannot reference `zoneName` when using `HostedZone.fromHostedZoneId()`. A construct consuming this hosted zone may be trying to reference its `zoneName`. If this is the case, use `fromHostedZoneAttributes()` or `fromLookup()` instead.'); + throw new ValidationError('Cannot reference `zoneName` when using `HostedZone.fromHostedZoneId()`. A construct consuming this hosted zone may be trying to reference its `zoneName`. If this is the case, use `fromHostedZoneAttributes()` or `fromLookup()` instead.', this); } public get hostedZoneArn(): string { return makeHostedZoneArn(this, this.hostedZoneId); @@ -152,7 +153,7 @@ export class HostedZone extends Resource implements IHostedZone { */ public static fromLookup(scope: Construct, id: string, query: HostedZoneProviderProps): IHostedZone { if (!query.domainName) { - throw new Error('Cannot use undefined value for attribute `domainName`'); + throw new ValidationError('Cannot use undefined value for attribute `domainName`', scope); } const DEFAULT_HOSTED_ZONE: HostedZoneContextResponse = { @@ -242,7 +243,7 @@ export class HostedZone extends Resource implements IHostedZone { */ public enableDnssec(options: ZoneSigningOptions): IKeySigningKey { if (this.keySigningKey) { - throw new Error('DNSSEC is already enabled for this hosted zone'); + throw new ValidationError('DNSSEC is already enabled for this hosted zone', this); } this.keySigningKey = new KeySigningKey(this, 'KeySigningKey', { hostedZone: this, @@ -323,7 +324,7 @@ export class PublicHostedZone extends HostedZone implements IPublicHostedZone { public static fromPublicHostedZoneId(scope: Construct, id: string, publicHostedZoneId: string): IPublicHostedZone { class Import extends Resource implements IPublicHostedZone { public readonly hostedZoneId = publicHostedZoneId; - public get zoneName(): string { throw new Error('Cannot reference `zoneName` when using `PublicHostedZone.fromPublicHostedZoneId()`. A construct consuming this hosted zone may be trying to reference its `zoneName`. If this is the case, use `fromPublicHostedZoneAttributes()` instead'); } + public get zoneName(): string { throw new ValidationError('Cannot reference `zoneName` when using `PublicHostedZone.fromPublicHostedZoneId()`. A construct consuming this hosted zone may be trying to reference its `zoneName`. If this is the case, use `fromPublicHostedZoneAttributes()` instead', this); } public get hostedZoneArn(): string { return makeHostedZoneArn(this, this.hostedZoneId); } @@ -404,7 +405,7 @@ export class PublicHostedZone extends HostedZone implements IPublicHostedZone { } public addVpc(_vpc: ec2.IVpc) { - throw new Error('Cannot associate public hosted zones with a VPC'); + throw new ValidationError('Cannot associate public hosted zones with a VPC', this); } /** @@ -483,7 +484,7 @@ export class PrivateHostedZone extends HostedZone implements IPrivateHostedZone public static fromPrivateHostedZoneId(scope: Construct, id: string, privateHostedZoneId: string): IPrivateHostedZone { class Import extends Resource implements IPrivateHostedZone { public readonly hostedZoneId = privateHostedZoneId; - public get zoneName(): string { throw new Error('Cannot reference `zoneName` when using `PrivateHostedZone.fromPrivateHostedZoneId()`. A construct consuming this hosted zone may be trying to reference its `zoneName`'); } + public get zoneName(): string { throw new ValidationError('Cannot reference `zoneName` when using `PrivateHostedZone.fromPrivateHostedZoneId()`. A construct consuming this hosted zone may be trying to reference its `zoneName`', this); } public get hostedZoneArn(): string { return makeHostedZoneArn(this, this.hostedZoneId); } diff --git a/packages/aws-cdk-lib/aws-route53/lib/record-set.ts b/packages/aws-cdk-lib/aws-route53/lib/record-set.ts index 7c90833a95f6f..7780fb207a771 100644 --- a/packages/aws-cdk-lib/aws-route53/lib/record-set.ts +++ b/packages/aws-cdk-lib/aws-route53/lib/record-set.ts @@ -7,6 +7,7 @@ import { CfnRecordSet } from './route53.generated'; import { determineFullyQualifiedDomainName } from './util'; import * as iam from '../../aws-iam'; import { CustomResource, Duration, IResource, Names, RemovalPolicy, Resource, Token } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import { CrossAccountZoneDelegationProvider } from '../../custom-resource-handlers/dist/aws-route53/cross-account-zone-delegation-provider.generated'; import { DeleteExistingRecordSetProvider } from '../../custom-resource-handlers/dist/aws-route53/delete-existing-record-set-provider.generated'; @@ -340,16 +341,16 @@ export class RecordSet extends Resource implements IRecordSet { super(scope, id); if (props.weight && !Token.isUnresolved(props.weight) && (props.weight < 0 || props.weight > 255)) { - throw new Error(`weight must be between 0 and 255 inclusive, got: ${props.weight}`); + throw new ValidationError(`weight must be between 0 and 255 inclusive, got: ${props.weight}`, this); } if (props.setIdentifier && (props.setIdentifier.length < 1 || props.setIdentifier.length > 128)) { - throw new Error(`setIdentifier must be between 1 and 128 characters long, got: ${props.setIdentifier.length}`); + throw new ValidationError(`setIdentifier must be between 1 and 128 characters long, got: ${props.setIdentifier.length}`, this); } if (props.setIdentifier && props.weight === undefined && !props.geoLocation && !props.region && !props.multiValueAnswer) { - throw new Error('setIdentifier can only be specified for non-simple routing policies'); + throw new ValidationError('setIdentifier can only be specified for non-simple routing policies', this); } if (props.multiValueAnswer && props.target.aliasTarget) { - throw new Error('multiValueAnswer cannot be specified for alias record'); + throw new ValidationError('multiValueAnswer cannot be specified for alias record', this); } const nonSimpleRoutingPolicies = [ @@ -359,7 +360,7 @@ export class RecordSet extends Resource implements IRecordSet { props.multiValueAnswer, ].filter((variable) => variable !== undefined).length; if (nonSimpleRoutingPolicies > 1) { - throw new Error('Only one of region, weight, multiValueAnswer or geoLocation can be defined'); + throw new ValidationError('Only one of region, weight, multiValueAnswer or geoLocation can be defined', this); } this.geoLocation = props.geoLocation; @@ -546,9 +547,9 @@ class ARecordAsAliasTarget implements IAliasRecordTarget { constructor(private readonly aRrecordAttrs: ARecordAttrs) { } - public bind(_record: IRecordSet, _zone?: IHostedZone | undefined): AliasRecordTargetConfig { - if (!_zone) { - throw new Error('Cannot bind to record without a zone'); + public bind(record: IRecordSet, zone?: IHostedZone | undefined): AliasRecordTargetConfig { + if (!zone) { + throw new ValidationError('Cannot bind to record without a zone', record); } return { dnsName: this.aRrecordAttrs.targetDNS, diff --git a/packages/aws-cdk-lib/aws-route53/lib/vpc-endpoint-service-domain-name.ts b/packages/aws-cdk-lib/aws-route53/lib/vpc-endpoint-service-domain-name.ts index b3854f1570d99..4b54b5a383150 100644 --- a/packages/aws-cdk-lib/aws-route53/lib/vpc-endpoint-service-domain-name.ts +++ b/packages/aws-cdk-lib/aws-route53/lib/vpc-endpoint-service-domain-name.ts @@ -1,6 +1,7 @@ import { Construct } from 'constructs'; import { IVpcEndpointService } from '../../aws-ec2'; import { Fn, Names, Stack } from '../../core'; +import { ValidationError } from '../../core/lib/errors'; import { md5hash } from '../../core/lib/helpers-internal'; import { AwsCustomResource, AwsCustomResourcePolicy, PhysicalResourceId } from '../../custom-resources'; import { IPublicHostedZone, TxtRecord } from '../lib'; @@ -80,8 +81,7 @@ export class VpcEndpointServiceDomainName extends Construct { const serviceUniqueId = Names.nodeUniqueId(props.endpointService.node); if (serviceUniqueId in VpcEndpointServiceDomainName.endpointServicesMap) { const endpoint = VpcEndpointServiceDomainName.endpointServicesMap[serviceUniqueId]; - throw new Error( - `Cannot create a VpcEndpointServiceDomainName for service ${serviceUniqueId}, another VpcEndpointServiceDomainName (${endpoint}) is already associated with it`); + throw new ValidationError(`Cannot create a VpcEndpointServiceDomainName for service ${serviceUniqueId}, another VpcEndpointServiceDomainName (${endpoint}) is already associated with it`, this); } }