-
Notifications
You must be signed in to change notification settings - Fork 4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat(servicecatalog): Add Product Stack Asset Support #22143
Changes from all commits
86b14f6
e245852
37d12c3
0b84503
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,8 @@ | ||
import { BlockPublicAccess, BucketEncryption, IBucket } from '@aws-cdk/aws-s3'; | ||
import * as cdk from '@aws-cdk/core'; | ||
import { ProductStack } from '../product-stack'; | ||
import { ProductStackAssetBucket } from '../product-stack-asset-bucket'; | ||
import { hashValues } from './util'; | ||
|
||
/** | ||
* Deployment environment for an AWS Service Catalog product stack. | ||
|
@@ -7,6 +11,12 @@ import * as cdk from '@aws-cdk/core'; | |
*/ | ||
export class ProductStackSynthesizer extends cdk.StackSynthesizer { | ||
private stack?: cdk.Stack; | ||
private assetBucket?: IBucket; | ||
|
||
constructor(assetBucket?: IBucket) { | ||
super(); | ||
this.assetBucket = assetBucket; | ||
} | ||
|
||
public bind(stack: cdk.Stack): void { | ||
if (this.stack !== undefined) { | ||
|
@@ -15,8 +25,31 @@ export class ProductStackSynthesizer extends cdk.StackSynthesizer { | |
this.stack = stack; | ||
} | ||
|
||
public addFileAsset(_asset: cdk.FileAssetSource): cdk.FileAssetLocation { | ||
throw new Error('Service Catalog Product Stacks cannot use Assets'); | ||
public addFileAsset(asset: cdk.FileAssetSource): cdk.FileAssetLocation { | ||
if (!this.stack) { | ||
throw new Error('You must call bindStack() first'); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this message will reach the user. Do you mean to tell your user to call There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is the error we throw in our |
||
} | ||
|
||
if (!this.assetBucket) { | ||
const parentStack = (this.stack as ProductStack)._getParentStack(); | ||
this.assetBucket = new ProductStackAssetBucket(parentStack, `ProductStackAssetBucket${hashValues(this.stack.stackName)}`, { | ||
bucketName: (this.stack as ProductStack)._generateBucketName(), | ||
blockPublicAccess: BlockPublicAccess.BLOCK_ALL, | ||
encryption: BucketEncryption.KMS, | ||
removalPolicy: cdk.RemovalPolicy.RETAIN, | ||
}); | ||
} | ||
|
||
(this.stack as ProductStack)._setAssetBucket(this.assetBucket); | ||
(this.assetBucket as ProductStackAssetBucket)._addAsset(asset); | ||
|
||
const bucketName = this.assetBucket.bucketName; | ||
const s3Filename = asset.fileName?.split('.')[1] + '.zip'; | ||
const objectKey = `${s3Filename}`; | ||
const s3ObjectUrl = `s3://${bucketName}/${objectKey}`; | ||
const httpUrl = `https://s3.${bucketName}/${objectKey}`; | ||
|
||
return { bucketName, objectKey, httpUrl, s3ObjectUrl, s3Url: httpUrl }; | ||
} | ||
|
||
public addDockerImageAsset(_asset: cdk.DockerImageAssetSource): cdk.DockerImageAssetLocation { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
import { Bucket, BucketProps } from '@aws-cdk/aws-s3'; | ||
import { BucketDeployment, ISource, Source } from '@aws-cdk/aws-s3-deployment'; | ||
import * as cdk from '@aws-cdk/core'; | ||
import { Construct, IConstruct } from 'constructs'; | ||
|
||
/** | ||
* Product stack asset bucket props. | ||
*/ | ||
export interface ProductStackAssetBucketProps extends BucketProps { | ||
} | ||
|
||
/** | ||
* A Service Catalog product stack asset bucket, which is similar in form to an Amazon S3 bucket. | ||
* You can store multiple product stack assets and collectively deploy them to S3. | ||
*/ | ||
export class ProductStackAssetBucket extends Bucket { | ||
private readonly assets: ISource[]; | ||
|
||
constructor(scope: Construct, id: string, props: ProductStackAssetBucketProps = {}) { | ||
super(scope, id, props); | ||
|
||
if (props.bucketName == undefined) { | ||
throw new Error('BucketName must be defined for assetBucket'); | ||
} | ||
|
||
this.assets = []; | ||
|
||
cdk.Aspects.of(this).add({ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what is the reasoning behind using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It can be done without Aspects I believe, but Aspects fits the use case and allows us to cleanly implement our use case as well as abstract the additional code to the new |
||
visit(c: IConstruct) { | ||
if (c instanceof ProductStackAssetBucket) { | ||
c.deployAssets(); | ||
}; | ||
}, | ||
}); | ||
} | ||
|
||
/** | ||
* Asset are prepared for bulk deployment to S3. | ||
* @internal | ||
*/ | ||
public _addAsset(asset: cdk.FileAssetSource): void { | ||
const assetPath = './cdk.out/' + asset.fileName; | ||
this.assets.push(Source.asset(assetPath)); | ||
} | ||
|
||
/** | ||
* Deploy all assets to S3. | ||
*/ | ||
private deployAssets() { | ||
if (this.assets.length > 0) { | ||
new BucketDeployment(this, 'AssetsBucketDeployment', { | ||
sources: this.assets, | ||
destinationBucket: this, | ||
extract: false, | ||
prune: false, | ||
}); | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,8 +1,9 @@ | ||
import * as path from 'path'; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This integration test only tests half of the scenario. It confirms that we can publish to the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The same can be said about any of the other existing integration test. There exist validation in Service Catalog to validate the template before it can be created but it can't check everything. We can't provision the product in CDK code and even then trying to provision it would probably fall in the scope of a provision-product construct. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is different though than existing functionality. In this PR we are adding functionality outside of service catalog. This PR is completely useless if the consumer cannot access the assets in the bucket and we have no test to assert that. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hey Cory, I refactored ProductStackAssetBucket to extend S3.Bucket as suggested as well as implemented the other comments. |
||
import * as s3_assets from '@aws-cdk/aws-s3-assets'; | ||
import * as sns from '@aws-cdk/aws-sns'; | ||
import * as cdk from '@aws-cdk/core'; | ||
import * as servicecatalog from '../lib'; | ||
import { ProductStackHistory } from '../lib'; | ||
import { ProductStackAssetBucket, ProductStackHistory, ProductStackProps } from '../lib'; | ||
|
||
const app = new cdk.App(); | ||
const stack = new cdk.Stack(app, 'integ-servicecatalog-product'); | ||
|
@@ -15,12 +16,28 @@ class TestProductStack extends servicecatalog.ProductStack { | |
} | ||
} | ||
|
||
class TestAssetProductStack extends servicecatalog.ProductStack { | ||
constructor(scope: any, id: string, props?: ProductStackProps) { | ||
super(scope, id, props); | ||
|
||
new s3_assets.Asset(this, 'testAsset', { | ||
path: path.join(__dirname, 'products.template.zip'), | ||
}); | ||
} | ||
} | ||
|
||
const productStackHistory = new ProductStackHistory(stack, 'ProductStackHistory', { | ||
productStack: new TestProductStack(stack, 'SNSTopicProduct3'), | ||
currentVersionName: 'v1', | ||
currentVersionLocked: true, | ||
}); | ||
|
||
const testAssetBucket = new ProductStackAssetBucket(stack, 'TestAssetBucket', { | ||
bucketName: 'product-stack-asset-bucket-12345678-test-region', | ||
removalPolicy: cdk.RemovalPolicy.DESTROY, | ||
autoDeleteObjects: true, | ||
}); | ||
|
||
const product = new servicecatalog.CloudFormationProduct(stack, 'TestProduct', { | ||
productName: 'testProduct', | ||
owner: 'testOwner', | ||
|
@@ -42,6 +59,12 @@ const product = new servicecatalog.CloudFormationProduct(stack, 'TestProduct', { | |
{ | ||
cloudFormationTemplate: servicecatalog.CloudFormationTemplate.fromProductStack(new TestProductStack(stack, 'SNSTopicProduct2')), | ||
}, | ||
{ | ||
validateTemplate: false, | ||
cloudFormationTemplate: servicecatalog.CloudFormationTemplate.fromProductStack(new TestAssetProductStack(stack, 'S3AssetProduct', { | ||
assetBucket: testAssetBucket, | ||
})), | ||
}, | ||
productStackHistory.currentVersion(), | ||
], | ||
}); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
import { Template } from '@aws-cdk/assertions'; | ||
import * as cdk from '@aws-cdk/core'; | ||
import { FileAssetSource } from '@aws-cdk/core'; | ||
import { ProductStackAssetBucket } from '../lib'; | ||
|
||
describe('ProductStackAssetBucket', () => { | ||
let app: cdk.App; | ||
let stack: cdk.Stack; | ||
|
||
beforeEach(() => { | ||
app = new cdk.App(); | ||
stack = new cdk.Stack(app, 'Stack', { | ||
env: { account: '12345678', region: 'test-region' }, | ||
}); | ||
}); | ||
|
||
test('default ProductStackAssetBucket creation', () => { | ||
// WHEN | ||
new ProductStackAssetBucket(stack, 'MyProductStackAssetBucket', { | ||
bucketName: 'test-asset-bucket', | ||
}); | ||
|
||
// THEN | ||
Template.fromStack(stack).hasResourceProperties('AWS::S3::Bucket', { | ||
BucketName: 'test-asset-bucket', | ||
}); | ||
}), | ||
|
||
test('default ProductStackAssetBucket creation missing bucketname', () => { | ||
// WHEN | ||
expect(() => { | ||
new ProductStackAssetBucket(stack, 'MyProductStackAssetBucket'); | ||
}).toThrow('BucketName must be defined for assetBucket'); | ||
|
||
// THEN | ||
Template.fromStack(stack).hasResourceProperties('AWS::S3::Bucket', {}); | ||
}), | ||
|
||
test('ProductStackAssetBucket without assets avoids bucket deployment', () => { | ||
// WHEN | ||
new ProductStackAssetBucket(stack, 'MyProductStackAssetBucket', { | ||
bucketName: 'test-asset-bucket', | ||
}); | ||
|
||
// THEN | ||
Template.fromStack(stack).resourceCountIs('Custom::CDKBucketDeployment', 0); | ||
}), | ||
|
||
test('ProductStackAssetBucket with assets creates bucket deployment', () => { | ||
// GIVEN | ||
const assetBucket = new ProductStackAssetBucket(stack, 'MyProductStackAssetBucket', { | ||
bucketName: 'test-asset-bucket', | ||
}); | ||
|
||
const asset = { | ||
packaging: 'zip', | ||
sourceHash: '3be8ad230b47f23554e7098c40e6e4f58ffc7c0cdddbf0da8c8cc105d6d25f2d', | ||
fileName: '../test/cdk.out/asset.3be8ad230b47f23554e7098c40e6e4f58ffc7c0cdddbf0da8c8cc105d6d25f2d.zip', | ||
} as FileAssetSource; | ||
|
||
// WHEN | ||
assetBucket._addAsset(asset); | ||
|
||
// THEN | ||
Template.fromStack(stack).resourceCountIs('Custom::CDKBucketDeployment', 1); | ||
}); | ||
}); |
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
export declare function handler(event: AWSLambda.CloudFormationCustomResourceEvent): Promise<void>; |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
// eslint-disable-next-line import/no-extraneous-dependencies | ||
import { S3 } from 'aws-sdk'; | ||
|
||
const AUTO_DELETE_OBJECTS_TAG = 'aws-cdk:auto-delete-objects'; | ||
|
||
const s3 = new S3(); | ||
|
||
export async function handler(event: AWSLambda.CloudFormationCustomResourceEvent) { | ||
switch (event.RequestType) { | ||
case 'Create': | ||
return; | ||
case 'Update': | ||
return onUpdate(event); | ||
case 'Delete': | ||
return onDelete(event.ResourceProperties?.BucketName); | ||
} | ||
} | ||
|
||
async function onUpdate(event: AWSLambda.CloudFormationCustomResourceEvent) { | ||
const updateEvent = event as AWSLambda.CloudFormationCustomResourceUpdateEvent; | ||
const oldBucketName = updateEvent.OldResourceProperties?.BucketName; | ||
const newBucketName = updateEvent.ResourceProperties?.BucketName; | ||
const bucketNameHasChanged = newBucketName != null && oldBucketName != null && newBucketName !== oldBucketName; | ||
|
||
/* If the name of the bucket has changed, CloudFormation will try to delete the bucket | ||
and create a new one with the new name. So we have to delete the contents of the | ||
bucket so that this operation does not fail. */ | ||
if (bucketNameHasChanged) { | ||
return onDelete(oldBucketName); | ||
} | ||
} | ||
|
||
/** | ||
* Recursively delete all items in the bucket | ||
* | ||
* @param bucketName the bucket name | ||
*/ | ||
async function emptyBucket(bucketName: string) { | ||
const listedObjects = await s3.listObjectVersions({ Bucket: bucketName }).promise(); | ||
const contents = [...listedObjects.Versions ?? [], ...listedObjects.DeleteMarkers ?? []]; | ||
if (contents.length === 0) { | ||
return; | ||
} | ||
|
||
const records = contents.map((record: any) => ({ Key: record.Key, VersionId: record.VersionId })); | ||
await s3.deleteObjects({ Bucket: bucketName, Delete: { Objects: records } }).promise(); | ||
|
||
if (listedObjects?.IsTruncated) { | ||
await emptyBucket(bucketName); | ||
} | ||
} | ||
|
||
async function onDelete(bucketName?: string) { | ||
if (!bucketName) { | ||
throw new Error('No BucketName was provided.'); | ||
} | ||
if (!await isBucketTaggedForDeletion(bucketName)) { | ||
process.stdout.write(`Bucket does not have '${AUTO_DELETE_OBJECTS_TAG}' tag, skipping cleaning.\n`); | ||
return; | ||
} | ||
try { | ||
await emptyBucket(bucketName); | ||
} catch (e) { | ||
if (e.code !== 'NoSuchBucket') { | ||
throw e; | ||
} | ||
// Bucket doesn't exist. Ignoring | ||
} | ||
} | ||
|
||
/** | ||
* The bucket will only be tagged for deletion if it's being deleted in the same | ||
* deployment as this Custom Resource. | ||
* | ||
* If the Custom Resource is every deleted before the bucket, it must be because | ||
* `autoDeleteObjects` has been switched to false, in which case the tag would have | ||
* been removed before we get to this Delete event. | ||
*/ | ||
async function isBucketTaggedForDeletion(bucketName: string) { | ||
const response = await s3.getBucketTagging({ Bucket: bucketName }).promise(); | ||
return response.TagSet.some(tag => tag.Key === AUTO_DELETE_OBJECTS_TAG && tag.Value === 'true'); | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,309 @@ | ||
import contextlib | ||
import json | ||
import logging | ||
import os | ||
import shutil | ||
import subprocess | ||
import tempfile | ||
from urllib.request import Request, urlopen | ||
from uuid import uuid4 | ||
from zipfile import ZipFile | ||
|
||
import boto3 | ||
|
||
logger = logging.getLogger() | ||
logger.setLevel(logging.INFO) | ||
|
||
cloudfront = boto3.client('cloudfront') | ||
s3 = boto3.client('s3') | ||
|
||
CFN_SUCCESS = "SUCCESS" | ||
CFN_FAILED = "FAILED" | ||
ENV_KEY_MOUNT_PATH = "MOUNT_PATH" | ||
ENV_KEY_SKIP_CLEANUP = "SKIP_CLEANUP" | ||
|
||
CUSTOM_RESOURCE_OWNER_TAG = "aws-cdk:cr-owned" | ||
|
||
def handler(event, context): | ||
|
||
def cfn_error(message=None): | ||
logger.error("| cfn_error: %s" % message) | ||
cfn_send(event, context, CFN_FAILED, reason=message) | ||
|
||
try: | ||
# We are not logging ResponseURL as this is a pre-signed S3 URL, and could be used to tamper | ||
# with the response CloudFormation sees from this Custom Resource execution. | ||
logger.info({ key:value for (key, value) in event.items() if key != 'ResponseURL'}) | ||
|
||
# cloudformation request type (create/update/delete) | ||
request_type = event['RequestType'] | ||
|
||
# extract resource properties | ||
props = event['ResourceProperties'] | ||
old_props = event.get('OldResourceProperties', {}) | ||
physical_id = event.get('PhysicalResourceId', None) | ||
|
||
try: | ||
source_bucket_names = props['SourceBucketNames'] | ||
source_object_keys = props['SourceObjectKeys'] | ||
source_markers = props.get('SourceMarkers', None) | ||
dest_bucket_name = props['DestinationBucketName'] | ||
dest_bucket_prefix = props.get('DestinationBucketKeyPrefix', '') | ||
extract = props.get('Extract', 'true') == 'true' | ||
retain_on_delete = props.get('RetainOnDelete', "true") == "true" | ||
distribution_id = props.get('DistributionId', '') | ||
user_metadata = props.get('UserMetadata', {}) | ||
system_metadata = props.get('SystemMetadata', {}) | ||
prune = props.get('Prune', 'true').lower() == 'true' | ||
exclude = props.get('Exclude', []) | ||
include = props.get('Include', []) | ||
|
||
# backwards compatibility - if "SourceMarkers" is not specified, | ||
# assume all sources have an empty market map | ||
if source_markers is None: | ||
source_markers = [{} for i in range(len(source_bucket_names))] | ||
|
||
default_distribution_path = dest_bucket_prefix | ||
if not default_distribution_path.endswith("/"): | ||
default_distribution_path += "/" | ||
if not default_distribution_path.startswith("/"): | ||
default_distribution_path = "/" + default_distribution_path | ||
default_distribution_path += "*" | ||
|
||
distribution_paths = props.get('DistributionPaths', [default_distribution_path]) | ||
except KeyError as e: | ||
cfn_error("missing request resource property %s. props: %s" % (str(e), props)) | ||
return | ||
|
||
# treat "/" as if no prefix was specified | ||
if dest_bucket_prefix == "/": | ||
dest_bucket_prefix = "" | ||
|
||
s3_source_zips = list(map(lambda name, key: "s3://%s/%s" % (name, key), source_bucket_names, source_object_keys)) | ||
s3_dest = "s3://%s/%s" % (dest_bucket_name, dest_bucket_prefix) | ||
old_s3_dest = "s3://%s/%s" % (old_props.get("DestinationBucketName", ""), old_props.get("DestinationBucketKeyPrefix", "")) | ||
|
||
|
||
# obviously this is not | ||
if old_s3_dest == "s3:///": | ||
old_s3_dest = None | ||
|
||
logger.info("| s3_dest: %s" % s3_dest) | ||
logger.info("| old_s3_dest: %s" % old_s3_dest) | ||
|
||
# if we are creating a new resource, allocate a physical id for it | ||
# otherwise, we expect physical id to be relayed by cloudformation | ||
if request_type == "Create": | ||
physical_id = "aws.cdk.s3deployment.%s" % str(uuid4()) | ||
else: | ||
if not physical_id: | ||
cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type) | ||
return | ||
|
||
# delete or create/update (only if "retain_on_delete" is false) | ||
if request_type == "Delete" and not retain_on_delete: | ||
if not bucket_owned(dest_bucket_name, dest_bucket_prefix): | ||
aws_command("s3", "rm", s3_dest, "--recursive") | ||
|
||
# if we are updating without retention and the destination changed, delete first | ||
if request_type == "Update" and not retain_on_delete and old_s3_dest != s3_dest: | ||
if not old_s3_dest: | ||
logger.warn("cannot delete old resource without old resource properties") | ||
return | ||
|
||
aws_command("s3", "rm", old_s3_dest, "--recursive") | ||
|
||
if request_type == "Update" or request_type == "Create": | ||
s3_deploy(s3_source_zips, s3_dest, user_metadata, system_metadata, prune, exclude, include, source_markers, extract) | ||
|
||
if distribution_id: | ||
cloudfront_invalidate(distribution_id, distribution_paths) | ||
|
||
cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id, responseData={ | ||
# Passing through the ARN sequences dependencees on the deployment | ||
'DestinationBucketArn': props.get('DestinationBucketArn'), | ||
'SourceObjectKeys': props.get('SourceObjectKeys'), | ||
}) | ||
except KeyError as e: | ||
cfn_error("invalid request. Missing key %s" % str(e)) | ||
except Exception as e: | ||
logger.exception(e) | ||
cfn_error(str(e)) | ||
|
||
#--------------------------------------------------------------------------------------------------- | ||
# populate all files from s3_source_zips to a destination bucket | ||
def s3_deploy(s3_source_zips, s3_dest, user_metadata, system_metadata, prune, exclude, include, source_markers, extract): | ||
# list lengths are equal | ||
if len(s3_source_zips) != len(source_markers): | ||
raise Exception("'source_markers' and 's3_source_zips' must be the same length") | ||
|
||
# create a temporary working directory in /tmp or if enabled an attached efs volume | ||
if ENV_KEY_MOUNT_PATH in os.environ: | ||
workdir = os.getenv(ENV_KEY_MOUNT_PATH) + "/" + str(uuid4()) | ||
os.mkdir(workdir) | ||
else: | ||
workdir = tempfile.mkdtemp() | ||
|
||
logger.info("| workdir: %s" % workdir) | ||
|
||
# create a directory into which we extract the contents of the zip file | ||
contents_dir=os.path.join(workdir, 'contents') | ||
os.mkdir(contents_dir) | ||
|
||
try: | ||
# download the archive from the source and extract to "contents" | ||
for i in range(len(s3_source_zips)): | ||
s3_source_zip = s3_source_zips[i] | ||
markers = source_markers[i] | ||
|
||
if extract: | ||
archive=os.path.join(workdir, str(uuid4())) | ||
logger.info("archive: %s" % archive) | ||
aws_command("s3", "cp", s3_source_zip, archive) | ||
logger.info("| extracting archive to: %s\n" % contents_dir) | ||
logger.info("| markers: %s" % markers) | ||
extract_and_replace_markers(archive, contents_dir, markers) | ||
else: | ||
logger.info("| copying archive to: %s\n" % contents_dir) | ||
aws_command("s3", "cp", s3_source_zip, contents_dir) | ||
|
||
# sync from "contents" to destination | ||
|
||
s3_command = ["s3", "sync"] | ||
|
||
if prune: | ||
s3_command.append("--delete") | ||
|
||
if exclude: | ||
for filter in exclude: | ||
s3_command.extend(["--exclude", filter]) | ||
|
||
if include: | ||
for filter in include: | ||
s3_command.extend(["--include", filter]) | ||
|
||
s3_command.extend([contents_dir, s3_dest]) | ||
s3_command.extend(create_metadata_args(user_metadata, system_metadata)) | ||
aws_command(*s3_command) | ||
finally: | ||
if not os.getenv(ENV_KEY_SKIP_CLEANUP): | ||
shutil.rmtree(workdir) | ||
|
||
#--------------------------------------------------------------------------------------------------- | ||
# invalidate files in the CloudFront distribution edge caches | ||
def cloudfront_invalidate(distribution_id, distribution_paths): | ||
invalidation_resp = cloudfront.create_invalidation( | ||
DistributionId=distribution_id, | ||
InvalidationBatch={ | ||
'Paths': { | ||
'Quantity': len(distribution_paths), | ||
'Items': distribution_paths | ||
}, | ||
'CallerReference': str(uuid4()), | ||
}) | ||
# by default, will wait up to 10 minutes | ||
cloudfront.get_waiter('invalidation_completed').wait( | ||
DistributionId=distribution_id, | ||
Id=invalidation_resp['Invalidation']['Id']) | ||
|
||
#--------------------------------------------------------------------------------------------------- | ||
# set metadata | ||
def create_metadata_args(raw_user_metadata, raw_system_metadata): | ||
if len(raw_user_metadata) == 0 and len(raw_system_metadata) == 0: | ||
return [] | ||
|
||
format_system_metadata_key = lambda k: k.lower() | ||
format_user_metadata_key = lambda k: k.lower() | ||
|
||
system_metadata = { format_system_metadata_key(k): v for k, v in raw_system_metadata.items() } | ||
user_metadata = { format_user_metadata_key(k): v for k, v in raw_user_metadata.items() } | ||
|
||
flatten = lambda l: [item for sublist in l for item in sublist] | ||
system_args = flatten([[f"--{k}", v] for k, v in system_metadata.items()]) | ||
user_args = ["--metadata", json.dumps(user_metadata, separators=(',', ':'))] if len(user_metadata) > 0 else [] | ||
|
||
return system_args + user_args + ["--metadata-directive", "REPLACE"] | ||
|
||
#--------------------------------------------------------------------------------------------------- | ||
# executes an "aws" cli command | ||
def aws_command(*args): | ||
aws="/opt/awscli/aws" # from AwsCliLayer | ||
logger.info("| aws %s" % ' '.join(args)) | ||
subprocess.check_call([aws] + list(args)) | ||
|
||
#--------------------------------------------------------------------------------------------------- | ||
# sends a response to cloudformation | ||
def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None): | ||
|
||
responseUrl = event['ResponseURL'] | ||
logger.info(responseUrl) | ||
|
||
responseBody = {} | ||
responseBody['Status'] = responseStatus | ||
responseBody['Reason'] = reason or ('See the details in CloudWatch Log Stream: ' + context.log_stream_name) | ||
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name | ||
responseBody['StackId'] = event['StackId'] | ||
responseBody['RequestId'] = event['RequestId'] | ||
responseBody['LogicalResourceId'] = event['LogicalResourceId'] | ||
responseBody['NoEcho'] = noEcho | ||
responseBody['Data'] = responseData | ||
|
||
body = json.dumps(responseBody) | ||
logger.info("| response body:\n" + body) | ||
|
||
headers = { | ||
'content-type' : '', | ||
'content-length' : str(len(body)) | ||
} | ||
|
||
try: | ||
request = Request(responseUrl, method='PUT', data=bytes(body.encode('utf-8')), headers=headers) | ||
with contextlib.closing(urlopen(request)) as response: | ||
logger.info("| status code: " + response.reason) | ||
except Exception as e: | ||
logger.error("| unable to send response to CloudFormation") | ||
logger.exception(e) | ||
|
||
|
||
#--------------------------------------------------------------------------------------------------- | ||
# check if bucket is owned by a custom resource | ||
# if it is then we don't want to delete content | ||
def bucket_owned(bucketName, keyPrefix): | ||
tag = CUSTOM_RESOURCE_OWNER_TAG | ||
if keyPrefix != "": | ||
tag = tag + ':' + keyPrefix | ||
try: | ||
request = s3.get_bucket_tagging( | ||
Bucket=bucketName, | ||
) | ||
return any((x["Key"].startswith(tag)) for x in request["TagSet"]) | ||
except Exception as e: | ||
logger.info("| error getting tags from bucket") | ||
logger.exception(e) | ||
return False | ||
|
||
# extract archive and replace markers in output files | ||
def extract_and_replace_markers(archive, contents_dir, markers): | ||
with ZipFile(archive, "r") as zip: | ||
zip.extractall(contents_dir) | ||
|
||
# replace markers for this source | ||
for file in zip.namelist(): | ||
file_path = os.path.join(contents_dir, file) | ||
if os.path.isdir(file_path): continue | ||
replace_markers(file_path, markers) | ||
|
||
def replace_markers(filename, markers): | ||
# convert the dict of string markers to binary markers | ||
replace_tokens = dict([(k.encode('utf-8'), v.encode('utf-8')) for k, v in markers.items()]) | ||
|
||
outfile = filename + '.new' | ||
with open(filename, 'rb') as fi, open(outfile, 'wb') as fo: | ||
for line in fi: | ||
for token in replace_tokens: | ||
line = line.replace(token, replace_tokens[token]) | ||
fo.write(line) | ||
|
||
# # delete the original file and rename the new one to the original | ||
os.remove(filename) | ||
os.rename(outfile, filename) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
{"version":"20.0.0"} | ||
{"version":"21.0.0"} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
{ | ||
"version": "20.0.0", | ||
"version": "21.0.0", | ||
"testCases": { | ||
"integ.product": { | ||
"stacks": [ | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
{} |
Large diffs are not rendered by default.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How are you granting access on the other side? The IAM role that gets from these buckets needs
access to be granted access to the bucket in its princial policy.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Addressed in my other comment, but there is not much more we can do. We made it as easy as possible but we don't have access to the accounts it is being shared with, it is up to the Admin to configure these spoke accounts.