diff --git a/backend/deployment_triggers/__init__.py b/backend/deployment_triggers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/dbmigrations_handler.py b/backend/deployment_triggers/dbmigrations_handler.py similarity index 100% rename from backend/dbmigrations_handler.py rename to backend/deployment_triggers/dbmigrations_handler.py diff --git a/backend/deployment_triggers/dbsnapshots_handler.py b/backend/deployment_triggers/dbsnapshots_handler.py new file mode 100644 index 000000000..5fea7d809 --- /dev/null +++ b/backend/deployment_triggers/dbsnapshots_handler.py @@ -0,0 +1,67 @@ +""" +The handler of this module will be called once upon every deployment +""" + +import logging +import os +import datetime +import boto3 +import time +from alembic import command +from alembic.script import ScriptDirectory +from alembic.migration import MigrationContext +from alembic.config import Config +from dataall.base.db.connection import ENVNAME, get_engine + +logger = logging.getLogger() +logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO')) + + +def handler(event, context) -> None: + """ + This function will be called once upon every deployment. + It checks if there are any alembic migration scripts to execute. + If there are, it will create a snapshot of the database. + It executes the alembic migration scripts. + """ + alembic_cfg = Config('alembic.ini') + alembic_cfg.set_main_option('script_location', './migrations') + + # Get head version + script = ScriptDirectory.from_config(alembic_cfg) + head_rev = script.get_current_head() + + # Get current version from database + engine = get_engine(ENVNAME) + with engine.engine.connect() as connection: + context = MigrationContext.configure(connection) + current_rev = context.get_current_revision() + + if head_rev != current_rev: + snapshot_id = f'{os.environ.get("resource_prefix", "dataall")}-migration-{head_rev}-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}' + cluster_id = engine.dbconfig.host.split('.')[0] + logger.info( + f'Creating RDS snapshot for cluster {cluster_id}, head revision {head_rev} is ahead of {current_rev}...' + ) + try: + rds_client = boto3.client('rds', region_name=os.getenv('AWS_REGION')) + # Edge case in which the cluster is performing backup and/or maintenance operations. + # If it times out the CICD pipeline fails and needs to be retried. + while ( + cluster_status := rds_client.describe_db_clusters(DBClusterIdentifier=cluster_id)['DBClusters'][0][ + 'Status' + ] + ) != 'available': + logger.info(f'Waiting while the cluster is available, {cluster_status=}') + time.sleep(30) + + rds_client.create_db_cluster_snapshot( + DBClusterSnapshotIdentifier=snapshot_id, + DBClusterIdentifier=cluster_id, + Tags=[ + {'Key': 'Application', 'Value': 'dataall'}, + ], + ) + except Exception as e: + logger.exception(f'Failed to create RDS snapshot: {e}') + raise Exception(f'Failed to create RDS snapshot: {e}') diff --git a/backend/saveperms_handler.py b/backend/deployment_triggers/saveperms_handler.py similarity index 100% rename from backend/saveperms_handler.py rename to backend/deployment_triggers/saveperms_handler.py diff --git a/deploy/stacks/backend_stack.py b/deploy/stacks/backend_stack.py index 42206bc48..7b0a96709 100644 --- a/deploy/stacks/backend_stack.py +++ b/deploy/stacks/backend_stack.py @@ -310,17 +310,47 @@ def __init__( **kwargs, ) + db_snapshots = TriggerFunctionStack( + self, + 'DbSnapshots', + handler='deployment_triggers.dbsnapshots_handler.handler', + envname=envname, + resource_prefix=resource_prefix, + vpc=vpc, + vpce_connection=vpce_connection, + image_tag=image_tag, + ecr_repository=repo, + execute_after=[aurora_stack.cluster], + connectables=[aurora_stack.cluster], + additional_policy_statements=[ + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=['rds:AddTagsToResource', 'rds:CreateDBClusterSnapshot'], + resources=[ + f'arn:aws:rds:*:{self.account}:cluster-snapshot:{resource_prefix}*', + f'arn:aws:rds:*:{self.account}:cluster:{resource_prefix}*', + ], + ), + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=['rds:DescribeDBClusters'], + resources=['*'], + ), + ], + **kwargs, + ) + db_migrations = TriggerFunctionStack( self, 'DbMigrations', - handler='dbmigrations_handler.handler', + handler='deployment_triggers.dbmigrations_handler.handler', envname=envname, resource_prefix=resource_prefix, vpc=vpc, vpce_connection=vpce_connection, image_tag=image_tag, ecr_repository=repo, - execute_after=[aurora_stack.cluster], + execute_after=[db_snapshots.trigger_function], connectables=[aurora_stack.cluster], **kwargs, ) @@ -328,7 +358,7 @@ def __init__( TriggerFunctionStack( self, 'SavePerms', - handler='saveperms_handler.handler', + handler='deployment_triggers.saveperms_handler.handler', envname=envname, resource_prefix=resource_prefix, vpc=vpc, diff --git a/deploy/stacks/trigger_function_stack.py b/deploy/stacks/trigger_function_stack.py index 70924a474..c7bbb6539 100644 --- a/deploy/stacks/trigger_function_stack.py +++ b/deploy/stacks/trigger_function_stack.py @@ -27,6 +27,7 @@ def __init__( vpce_connection: ec2.IConnectable = None, connectables: List[ec2.IConnectable] = [], execute_after: List[Construct] = [], + additional_policy_statements: List[iam.PolicyStatement] = [], **kwargs, ): super().__init__(scope, id, **kwargs) @@ -35,16 +36,16 @@ def __init__( image_tag = self.node.try_get_context('image_tag') image_tag = f'lambdas-{image_tag}' - env = {'envname': envname, 'LOG_LEVEL': 'INFO'} + env = {'envname': envname, 'resource_prefix': resource_prefix, 'LOG_LEVEL': 'INFO'} function_sgs = self.create_lambda_sgs(envname, handler, resource_prefix, vpc) - + statements = self.get_policy_statements(resource_prefix) + (additional_policy_statements or []) self.trigger_function = TriggerFunction( self, f'TriggerFunction-{handler}', function_name=f'{resource_prefix}-{envname}-{handler.replace(".", "_")}', description=f'dataall {handler} trigger function', - initial_policy=self.get_policy_statements(resource_prefix), + initial_policy=statements, code=_lambda.Code.from_ecr_image(repository=ecr_repository, tag=image_tag, cmd=[handler]), vpc=vpc, security_groups=[function_sgs],