diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b0cd053e..4a51fab3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,3 +29,9 @@ repos: hooks: - id: markdownlint entry: markdownlint --ignore .github/*.md + + - repo: git://github.com/detailyang/pre-commit-shell + rev: 1.0.5 + hooks: + - id: shell-lint + args: [-x] diff --git a/Dockerfile b/Dockerfile index dedd9436..48fd7934 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,11 +5,6 @@ ENV PYTHONDONTWRITEBYTECODE 1 # we do not want pipenv to create a virtualenv inside the container ENV PIPENV_SITE_PACKAGES 1 -ARG locustfile -ENV LOCUSTFILE=$locustfile - -RUN echo $LOCUSTFILE - WORKDIR /app RUN pip install pipenv diff --git a/locustfiles/all.py b/locustfiles/all.py new file mode 100644 index 00000000..40589637 --- /dev/null +++ b/locustfiles/all.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +""" Locust test for all APIs """ +from locust import HttpUser, between, events + +from utils.hosts import MilMoveHostMixin, MilMoveDomain, clean_milmove_host_users +from utils.parsers import GHCAPIParser, PrimeAPIParser, SupportAPIParser +from tasks import PrimeTasks, SupportTasks, ServicesCounselorTasks, TOOTasks + +# from flask import make_response, redirect +from gevent.pool import Group + + +# init these classes just once because we don't need to parse the API over and over: +prime_api = PrimeAPIParser() +support_api = SupportAPIParser() +ghc_api = GHCAPIParser() + + +class PrimeUser(MilMoveHostMixin, HttpUser): + """ + Tests the Prime API. + """ + + # These attributes are used in MilMoveHostMixin to set up the proper hostname for any MilMove environment: + local_port = "9443" + domain = MilMoveDomain.PRIME # the base domain for the host + is_api = True # if True, uses the api base domain in deployed environments + + # This attribute is used for generating fake requests when hitting the Prime API: + parser = prime_api + + # These are locust HttpUser attributes that help define and shape the load test: + wait_time = between(0.25, 9) # the time period to wait in between tasks (in seconds, accepts decimals and 0) + tasks = {PrimeTasks: 1} # the set of tasks to be executed and their relative weight + + +class SupportUser(MilMoveHostMixin, HttpUser): + """ + Tests the Support API. + """ + + local_port = "9443" + domain = MilMoveDomain.PRIME + is_api = True + + parser = support_api + + wait_time = between(0.25, 9) + tasks = {SupportTasks: 1} + + +class ServicesCounselorUser(MilMoveHostMixin, HttpUser): + """ + Tests the MilMove Office app with the Services Counselor role. + """ + + local_protocol = "http" + local_port = "8080" + domain = MilMoveDomain.OFFICE + + # This attribute is used for generating fake requests when hitting the GHC API: + parser = ghc_api + + wait_time = between(0.25, 9) + tasks = {ServicesCounselorTasks: 1} + + +class TOOUser(MilMoveHostMixin, HttpUser): + """ + Tests the MilMove Office app with the TOO role. + """ + + local_protocol = "http" + local_port = "8080" + domain = MilMoveDomain.OFFICE + + # This attribute is used for generating fake requests when hitting the GHC API: + parser = ghc_api + + wait_time = between(0.25, 9) + tasks = {TOOTasks: 1} + + +@events.test_stop.add_listener +def on_test_stop(**kwargs): + """ + Clean up steps to run when the load test stops. Removes any cert files that may have been created during the setup. + """ + clean_milmove_host_users(locust_env=kwargs["environment"]) + + +@events.init_command_line_parser.add_listener +def on_locust_command(parser, **_kwargs): + parser.add_argument( + "--prime-user-weight", env_var="PRIME_USER_WEIGHT", type=int, default=1, help="Weight for Prime user" + ) + parser.add_argument( + "--services-counselor-user-weight", + env_var="SERVICES_COUNSELOR_USER_WEIGHT", + type=int, + default=1, + help="Weight for Services Counselor user", + ) + parser.add_argument( + "--support-user-weight", env_var="SUPPORT_USER_WEIGHT", type=int, default=1, help="Weight for Support user" + ) + parser.add_argument("--too-user-weight", env_var="TOO_USER_WEIGHT", type=int, default=1, help="Weight for TOO user") + + +@events.test_start.add_listener +def on_test_start(environment, **_kwargs): + prime_class = PrimeUser + services_counselor_class = ServicesCounselorUser + support_class = SupportUser + too_class = TOOUser + + prime_class.weight = environment.parsed_options.prime_user_weight + services_counselor_class.weight = environment.parsed_options.services_counselor_user_weight + support_class.weight = environment.parsed_options.support_user_weight + too_class.weight = environment.parsed_options.too_user_weight + + environment.user_classes = [ + prime_class, + services_counselor_class, + support_class, + too_class, + ] + environment._remove_user_classes_with_weight_zero() + + # if the user classes change between runs (because user classes + # with 0 weights have been removed), we have to do some manual / + # hacky cleanups + setattr(environment.runner, "target_user_classes_count", {}) + setattr(environment.runner, "target_user_count", 0) + setattr(environment.runner, "_users_dispatcher", None) + setattr(environment.runner, "user_greenlets", Group()) diff --git a/nix/default.nix b/nix/default.nix index 8c399d2e..ee5ef0bf 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -46,6 +46,14 @@ in buildEnv { rev = "8112bb92f9df718eaa077ec77109eecc60240a72"; }) {}).pre-commit + (import (builtins.fetchGit { + # Descriptive name to make the store path easier to identify + name = "shellcheck-0.7.2"; + url = "https://github.com/NixOS/nixpkgs/"; + ref = "refs/heads/nixpkgs-unstable"; + rev = "8e1eab9eae4278c9bb1dcae426848a581943db5a"; + }) {}).shellcheck + (import (builtins.fetchGit { # Descriptive name to make the store path easier to identify name = "aws-vault-6.3.1"; diff --git a/scripts/aws-session-port-forward.py b/scripts/aws-session-port-forward.py index 3ff69202..6464db8b 100755 --- a/scripts/aws-session-port-forward.py +++ b/scripts/aws-session-port-forward.py @@ -48,9 +48,9 @@ print("Cannot find containers for taskArn: {}", task_arn, file=sys.stderr) sys.exit(1) -if len(task["containers"]) != 1: - print("More than one container for taskArn: {}", task_arn, file=sys.stderr) - sys.exit(1) +# if len(task["containers"]) != 1: +# print("More than one container for taskArn: {}", task_arn, file=sys.stderr) +# sys.exit(1) container = task["containers"][0] diff --git a/scripts/codebuild b/scripts/codebuild index 5d4a85fd..daa5275e 100755 --- a/scripts/codebuild +++ b/scripts/codebuild @@ -6,30 +6,108 @@ set -x echo "${DOCKER_PASSWORD}" | \ docker login --username "${DOCKER_USERNAME}" --password-stdin -#LOCUSTFILE can be set in the build overrides of the CodeBuild config -docker build -t "${ECR_REPO}:exp" --build-arg locustfile="${LOCUSTFILE}" . +tag=${CODEBUILD_RESOLVED_SOURCE_VERSION:-exp} +image="${ECR_REPO}:${tag}" -aws ecr get-login-password --region $AWS_REGION \ +docker build -t "${image}" . + +aws ecr get-login-password --region "${AWS_REGION}" \ | docker login --username AWS --password-stdin "${ECR_REPO}" -docker push "${ECR_REPO}:exp" +docker push "${ECR_REPO}:${tag}" #This will return an inactive task definition if a new one has been published #In that scenario you can specify the new revision manually in update-service current_task_definition=$(aws ecs describe-services \ --cluster loadtesting \ - --service exp-web | \ + --service exp-web) + +task_definition_name=$(echo "${current_task_definition}" | \ jq '.services[0].taskDefinition' | \ cut -d/ -f 2 | \ tr -d '"') -#This has not been replacing the current service so you may need to stop the -#previous deployment until we can figure out why --force-new-deployment is -#required for the same task definition revision. +taskDefinitionKeys=(containerDefinitions + family + taskRoleArn + executionRoleArn + networkMode + volumes + placementConstraints + cpu + memory) + +joinedKeys=$(printf ",%s" "${taskDefinitionKeys[@]}") +# remove leading comma +joinedKeys=${joinedKeys:1} + +jq_filter=".taskDefinition | {${joinedKeys}}" + +raw_task_definition=$(aws ecs describe-task-definition \ + --task-definition "${task_definition_name}" | \ + jq "${jq_filter}") + +# LOCUSTFILE can be set in the build overrides of the CodeBuild config +: "${LOCUSTFILE:=/app/locustfiles/prime.py}" + +# See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_size +cpu="1024" +memory="8192" + +container_def_len=$(echo "${raw_task_definition}" |\ + jq '.containerDefinitions | length') + +if [[ "1" == "${container_def_len}" ]]; then + # we need to add the aws otel collector sidecar so we can get + # performance metrics about how locust is running + container_name=$(echo "${raw_task_definition}" | \ + jq -r '.containerDefinitions[0].name') + jq_log_config='.containerDefinitions[0].logConfiguration' + orig_log_config=$(echo "${raw_task_definition}" | \ + jq "${jq_log_config}") + jq_stream_prefix='.options."awslogs-stream-prefix"' + orig_stream_prefix=$(echo "${orig_log_config}" | \ + jq -r "${jq_stream_prefix}") + log_config=$(echo "${orig_log_config}" | \ + jq "${jq_stream_prefix} = \"otel-${orig_stream_prefix}\"" + ) +aws_otel_collector="\ +{ \ + \"name\": \"otel-${container_name}\", \ + \"image\": \"amazon/aws-otel-collector:v0.14.0\", \ + \"essential\": true, \ + \"command\": [ \ + \"--config=/etc/ecs/container-insights/otel-task-metrics-config.yaml\" \ + ], \ + \"logConfiguration\": $log_config \ +}" + + raw_task_definition=$( + echo "${raw_task_definition}" | \ + jq ".containerDefinitions[.containerDefinitions | length] |= . + ${aws_otel_collector}" + ) +fi + +# update with new settings (e.g. image) +new_task_definition=$( + echo "${raw_task_definition}" | \ + jq ".cpu = \"${cpu}\"" | \ + jq ".memory = \"${memory}\"" | \ + jq ".containerDefinitions[0].image = \"${image}\"" | \ + jq ".containerDefinitions[0].entryPoint = [\"locust\"]" | \ + jq ".containerDefinitions[0].command = [\"-f\", \"${LOCUSTFILE}\", \"--host\", \"dp3\", \"--web-port\", \"4000\"]" \ + ) +new_task_definition_name=$(aws ecs register-task-definition --cli-input-json "${new_task_definition}" | \ + jq '.taskDefinition.taskDefinitionArn' | \ + cut -d/ -f 2 | \ + tr -d '"') + +# use force-new-deployment in case we are re-running the build and the +# image isn't changing aws ecs update-service \ --cluster loadtesting \ --service exp-web \ - --task-definition "${current_task_definition}" \ + --task-definition "${new_task_definition_name}" \ --desired-count 1 \ --force-new-deployment