diff --git a/.gitea/workflows/test-external-stack.yml b/.gitea/workflows/test-external-stack.yml new file mode 100644 index 00000000..1d6794c5 --- /dev/null +++ b/.gitea/workflows/test-external-stack.yml @@ -0,0 +1,58 @@ +name: External Stack Test + +on: + push: + branches: '*' + paths: + - '!**' + - '.gitea/workflows/triggers/test-external-stack' + - '.gitea/workflows/test-external-stack.yml' + - 'tests/external-stack/run-test.sh' + schedule: # Note: coordinate with other tests to not overload runners at the same time of day + - cron: '8 19 * * *' + +jobs: + test: + name: "Run external stack test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: "Run external stack tests" + run: ./tests/external-stack/run-test.sh + - name: Notify Vulcanize Slack on CI failure + if: ${{ always() && github.ref_name == 'main' }} + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: ${{ always() && github.ref_name == 'main' }} + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/triggers/test-external-stack b/.gitea/workflows/triggers/test-external-stack new file mode 100644 index 00000000..0a20e3e2 --- /dev/null +++ b/.gitea/workflows/triggers/test-external-stack @@ -0,0 +1 @@ +Change this file to trigger running the external-stack CI job diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py index 71debf09..2b78306b 100644 --- a/stack_orchestrator/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -71,7 +71,7 @@ def process_container(build_context: BuildContext) -> bool: # Check if this is in an external stack if stack_is_external(build_context.stack): - container_parent_dir = Path(build_context.stack).joinpath("container-build") + container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build") temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-")) temp_build_script_filename = temp_build_dir.joinpath("build.sh") # Now check if the container exists in the external stack. diff --git a/stack_orchestrator/data/container-build/cerc-test-container/build.sh b/stack_orchestrator/data/container-build/cerc-test-container/build.sh index ee56576a..fdc86a90 100755 --- a/stack_orchestrator/data/container-build/cerc-test-container/build.sh +++ b/stack_orchestrator/data/container-build/cerc-test-container/build.sh @@ -2,4 +2,4 @@ # Build cerc/test-container source ${CERC_CONTAINER_BASE_DIR}/build-base.sh SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR \ No newline at end of file +docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 29afcf13..db1611f9 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -27,6 +27,7 @@ from stack_orchestrator import constants from stack_orchestrator.opts import opts from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path +from stack_orchestrator.util import resolve_compose_file from stack_orchestrator.deploy.deployer import Deployer, DeployerException from stack_orchestrator.deploy.deployer_factory import getDeployer from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext @@ -324,7 +325,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): pod_path = pod["path"] if include_exclude_check(pod_name, include, exclude): if pod_repository is None or pod_repository == "internal": - compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") + if deployment: + compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") + else: + compose_file_name = resolve_compose_file(stack, pod_name) else: if deployment: compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml") @@ -336,6 +340,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): if pod_post_start_command is not None: post_start_commands.append(os.path.join(script_dir, pod_post_start_command)) else: + # TODO: fix this code for external stack with scripts pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml") pod_pre_start_command = pod.get("pre_start_command") diff --git a/stack_orchestrator/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py index 8b812d3a..9ee09619 100644 --- a/stack_orchestrator/deploy/deploy_util.py +++ b/stack_orchestrator/deploy/deploy_util.py @@ -16,7 +16,7 @@ import os from typing import List, Any from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping -from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list +from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file from stack_orchestrator.opts import opts @@ -27,7 +27,7 @@ def _container_image_from_service(stack: str, service: str): pods = get_pod_list(parsed_stack) yaml = get_yaml() for pod in pods: - pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml") + pod_file_path = resolve_compose_file(stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) if "services" in parsed_pod_file: services = parsed_pod_file["services"] diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 8da93f7a..94d7f772 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -43,7 +43,7 @@ def _get_ports(stack): pods = get_pod_list(parsed_stack) yaml = get_yaml() for pod in pods: - pod_file_path = get_pod_file_path(parsed_stack, pod) + pod_file_path = get_pod_file_path(stack, parsed_stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) if "services" in parsed_pod_file: for svc_name, svc in parsed_pod_file["services"].items(): @@ -79,7 +79,7 @@ def find_vol_usage(parsed_pod_file, vol): return ret for pod in pods: - pod_file_path = get_pod_file_path(parsed_stack, pod) + pod_file_path = get_pod_file_path(stack, parsed_stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) if "volumes" in parsed_pod_file: volumes = parsed_pod_file["volumes"] @@ -483,7 +483,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw data_dir = Path(__file__).absolute().parent.parent.joinpath("data") yaml = get_yaml() for pod in pods: - pod_file_path = get_pod_file_path(parsed_stack, pod) + pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) destination_pod_dir = destination_pods_dir.joinpath(pod) diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py index c0a49689..06fe4ec7 100644 --- a/stack_orchestrator/main.py +++ b/stack_orchestrator/main.py @@ -17,6 +17,7 @@ from stack_orchestrator.command_types import CommandOptions from stack_orchestrator.repos import setup_repositories +from stack_orchestrator.repos import fetch_stack from stack_orchestrator.build import build_containers, fetch_containers from stack_orchestrator.build import build_npms from stack_orchestrator.build import build_webapp @@ -50,6 +51,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err ctx.obj = command_options +cli.add_command(fetch_stack.command, "fetch-stack") cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(build_containers.command, "build-containers") cli.add_command(fetch_containers.command, "fetch-containers") diff --git a/stack_orchestrator/repos/fetch_stack.py b/stack_orchestrator/repos/fetch_stack.py new file mode 100644 index 00000000..9566e48f --- /dev/null +++ b/stack_orchestrator/repos/fetch_stack.py @@ -0,0 +1,45 @@ +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + + +import click +import os + +from decouple import config +from git import exc + +from stack_orchestrator.opts import opts +from stack_orchestrator.repos.setup_repositories import process_repo +from stack_orchestrator.util import error_exit + + +@click.command() +@click.argument('stack-locator') +@click.option('--git-ssh', is_flag=True, default=False) +@click.option('--check-only', is_flag=True, default=False) +@click.option('--pull', is_flag=True, default=False) +@click.pass_context +def command(ctx, stack_locator, git_ssh, check_only, pull): + '''optionally resolve then git clone a repository containing one or more stack definitions''' + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + if not opts.o.quiet: + print(f"Dev Root is: {dev_root_path}") + try: + process_repo(pull, check_only, git_ssh, dev_root_path, None, stack_locator) + except exc.GitCommandError as error: + error_exit(f"\n******* git command returned error exit status:\n{error}") diff --git a/stack_orchestrator/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py index a137d645..4014e183 100644 --- a/stack_orchestrator/repos/setup_repositories.py +++ b/stack_orchestrator/repos/setup_repositories.py @@ -26,6 +26,7 @@ from pathlib import Path import yaml from stack_orchestrator.constants import stack_file_name +from stack_orchestrator.opts import opts from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit @@ -87,8 +88,8 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path): # TODO: fix the messy arg list here -def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo): - if verbose: +def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo): + if opts.o.verbose: print(f"Processing repo: {fully_qualified_repo}") repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo) git_ssh_prefix = f"git@{repo_host}:" @@ -100,7 +101,7 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa (current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag( full_filesystem_repo_path ) if is_present else (None, None) - if not quiet: + if not opts.o.quiet: present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \ else 'Needs to be fetched' print(f"Checking: {full_filesystem_repo_path}: {present_text}") @@ -111,25 +112,25 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa sys.exit(1) else: if pull: - if verbose: + if opts.o.verbose: print(f"Running git pull for {full_filesystem_repo_path}") if not check_only: if is_branch: git_repo = git.Repo(full_filesystem_repo_path) origin = git_repo.remotes.origin - origin.pull(progress=None if quiet else GitProgress()) + origin.pull(progress=None if opts.o.quiet else GitProgress()) else: print("skipping pull because this repo checked out a tag") else: print("(git pull skipped)") if not is_present: # Clone - if verbose: + if opts.o.verbose: print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}') - if not dry_run: + if not opts.o.dry_run: git.Repo.clone_from(full_github_repo_path, full_filesystem_repo_path, - progress=None if quiet else GitProgress()) + progress=None if opts.o.quiet else GitProgress()) else: print("(git clone skipped)") # Checkout the requested branch, if one was specified @@ -150,13 +151,13 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa current_repo_branch_or_tag and ( current_repo_branch_or_tag != branch_to_checkout) ): - if not quiet: + if not opts.o.quiet: print(f"switching to branch {branch_to_checkout} in repo {repo_path}") git_repo = git.Repo(full_filesystem_repo_path) # git checkout works for both branches and tags git_repo.git.checkout(branch_to_checkout) else: - if verbose: + if opts.o.verbose: print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}") @@ -182,36 +183,18 @@ def parse_branches(branches_string): @click.option('--check-only', is_flag=True, default=False) @click.option('--pull', is_flag=True, default=False) @click.option("--branches", help="override branches for repositories") -@click.option('--branches-file', help="checkout branches specified in this file") @click.pass_context -def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches_file): +def command(ctx, include, exclude, git_ssh, check_only, pull, branches): '''git clone the set of repositories required to build the complete system from source''' - quiet = ctx.obj.quiet - verbose = ctx.obj.verbose - dry_run = ctx.obj.dry_run - stack = ctx.obj.stack + quiet = opts.o.quiet + verbose = opts.o.verbose + stack = opts.o.stack branches_array = [] - # TODO: branches file needs to be re-worked in the context of stacks - if branches_file: - if branches: - print("Error: can't specify both --branches and --branches-file") - sys.exit(1) - else: - if verbose: - print(f"loading branches from: {branches_file}") - with open(branches_file) as branches_file_open: - branches_array = branches_file_open.read().splitlines() - - print(f"branches: {branches}") if branches: - if branches_file: - print("Error: can't specify both --branches and --branches-file") - sys.exit(1) - else: - branches_array = parse_branches(branches) + branches_array = parse_branches(branches) if branches_array and verbose: print(f"Branches are: {branches_array}") @@ -271,7 +254,6 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches for repo in repos: try: - process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo) + process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo) except git.exc.GitCommandError as error: - print(f"\n******* git command returned error exit status:\n{error}") - sys.exit(1) + error_exit(f"\n******* git command returned error exit status:\n{error}") diff --git a/stack_orchestrator/util.py b/stack_orchestrator/util.py index d03753c3..c2422f72 100644 --- a/stack_orchestrator/util.py +++ b/stack_orchestrator/util.py @@ -94,10 +94,24 @@ def get_plugin_code_paths(stack) -> List[Path]: return list(result) -def get_pod_file_path(parsed_stack, pod_name: str): +# Find a compose file, looking first in any external stack +# and if not found there, internally +def resolve_compose_file(stack, pod_name: str): + if stack_is_external(stack): + # First try looking in the external stack for the compose file + compose_base = Path(stack).parent.parent.joinpath("compose") + proposed_file = compose_base.joinpath(f"docker-compose-{pod_name}.yml") + if proposed_file.exists(): + return proposed_file + # If we don't find it fall through to the internal case + compose_base = get_internal_compose_file_dir() + return compose_base.joinpath(f"docker-compose-{pod_name}.yml") + + +def get_pod_file_path(stack, parsed_stack, pod_name: str): pods = parsed_stack["pods"] if type(pods[0]) is str: - result = os.path.join(get_compose_file_dir(), f"docker-compose-{pod_name}.yml") + result = resolve_compose_file(stack, pod_name) else: for pod in pods: if pod["name"] == pod_name: @@ -131,7 +145,7 @@ def pod_has_scripts(parsed_stack, pod_name: str): return result -def get_compose_file_dir(): +def get_internal_compose_file_dir(): # TODO: refactor to use common code with deploy command # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure data_dir = Path(__file__).absolute().parent.joinpath("data") diff --git a/tests/external-stack/run-test.sh b/tests/external-stack/run-test.sh new file mode 100755 index 00000000..9ec2cfc6 --- /dev/null +++ b/tests/external-stack/run-test.sh @@ -0,0 +1,185 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +# Dump environment variables for debugging +echo "Environment variables:" +env + +if [ "$1" == "from-path" ]; then + TEST_TARGET_SO="laconic-so" +else + TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +fi + +delete_cluster_exit () { + $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes + exit 1 +} + +# Test basic stack-orchestrator deploy +echo "Running stack-orchestrator external stack deploy test" +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +# Clone the external test stack +$TEST_TARGET_SO fetch-stack git.vdb.to/cerc-io/test-external-stack +stack_name="$CERC_REPO_BASE_DIR/test-external-stack/stacks/test-external-stack" +TEST_TARGET_SO_STACK="$TEST_TARGET_SO --stack ${stack_name}" +# Test bringing the test container up and down +# with and without volume removal +$TEST_TARGET_SO_STACK setup-repositories +$TEST_TARGET_SO_STACK build-containers +# Test deploy command execution +$TEST_TARGET_SO_STACK deploy setup $CERC_REPO_BASE_DIR +# Check that we now have the expected output directory +container_output_dir=$CERC_REPO_BASE_DIR/container-output-dir +if [ ! -d "$container_output_dir" ]; then + echo "deploy setup test: output directory not present" + echo "deploy setup test: FAILED" + exit 1 +fi +if [ ! -f "$container_output_dir/output-file" ]; then + echo "deploy setup test: output file not present" + echo "deploy setup test: FAILED" + exit 1 +fi +output_file_content=$(<$container_output_dir/output-file) +if [ ! "$output_file_content" == "output-data" ]; then + echo "deploy setup test: output file contents not correct" + echo "deploy setup test: FAILED" + exit 1 +fi +# Check that we now have the expected output file +$TEST_TARGET_SO_STACK deploy up +# Test deploy port command +deploy_port_output=$( $TEST_TARGET_SO_STACK deploy port test 80 ) +if [[ "$deploy_port_output" =~ ^0.0.0.0:[1-9][0-9]* ]]; then + echo "Deploy port test: passed" +else + echo "Deploy port test: FAILED" + exit 1 +fi +$TEST_TARGET_SO_STACK deploy down +# The next time we bring the container up the volume will be old (from the previous run above) +$TEST_TARGET_SO_STACK deploy up +log_output_1=$( $TEST_TARGET_SO_STACK deploy logs ) +if [[ "$log_output_1" == *"filesystem is old"* ]]; then + echo "Retain volumes test: passed" +else + echo "Retain volumes test: FAILED" + exit 1 +fi +$TEST_TARGET_SO_STACK deploy down --delete-volumes +# Now when we bring the container up the volume will be new again +$TEST_TARGET_SO_STACK deploy up +log_output_2=$( $TEST_TARGET_SO_STACK deploy logs ) +if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then + echo "Delete volumes test: passed" +else + echo "Delete volumes test: FAILED" + exit 1 +fi +$TEST_TARGET_SO_STACK deploy down --delete-volumes +# Basic test of creating a deployment +test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir +test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml +$TEST_TARGET_SO_STACK deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED,CERC_TEST_PARAM_3=FAST +# Check the file now exists +if [ ! -f "$test_deployment_spec" ]; then + echo "deploy init test: spec file not present" + echo "deploy init test: FAILED" + exit 1 +fi +echo "deploy init test: passed" +$TEST_TARGET_SO_STACK deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir +# Check the deployment dir exists +if [ ! -d "$test_deployment_dir" ]; then + echo "deploy create test: deployment directory not present" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create test: passed" +# Check the file writted by the create command in the stack now exists +if [ ! -f "$test_deployment_dir/create-file" ]; then + echo "deploy create test: create output file not present" + echo "deploy create test: FAILED" + exit 1 +fi +# And has the right content +create_file_content=$(<$test_deployment_dir/create-file) +if [ ! "$create_file_content" == "create-command-output-data" ]; then + echo "deploy create test: create output file contents not correct" + echo "deploy create test: FAILED" + exit 1 +fi + +# Add a config file to be picked up by the ConfigMap before starting. +echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config + +echo "deploy create output file test: passed" +# Try to start the deployment +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +# Check logs command works +log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then + echo "deployment logs test: passed" +else + echo "deployment logs test: FAILED" + exit 1 +fi +# Check the config variable CERC_TEST_PARAM_1 was passed correctly +if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then + echo "deployment config test: passed" +else + echo "deployment config test: FAILED" + exit 1 +fi +# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file +if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then + echo "deployment compose config test: passed" +else + echo "deployment compose config test: FAILED" + exit 1 +fi +# Check the config variable CERC_TEST_PARAM_3 was passed correctly +if [[ "$log_output_3" == *"Test-param-3: FAST"* ]]; then + echo "deployment config test: passed" +else + echo "deployment config test: FAILED" + exit 1 +fi + +# Check that the ConfigMap is mounted and contains the expected content. +log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then + echo "deployment ConfigMap test: passed" +else + echo "deployment ConfigMap test: FAILED" + delete_cluster_exit +fi + +# Stop then start again and check the volume was preserved +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop +# Sleep a bit just in case +# sleep for longer to check if that's why the subsequent create cluster fails +sleep 20 +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_5" == *"filesystem is old"* ]]; then + echo "Retain volumes test: passed" +else + echo "Retain volumes test: FAILED" + delete_cluster_exit +fi + +# Stop and clean up +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes +echo "Test passed"