diff --git a/src/aerie_cli/commands/scheduling.py b/src/aerie_cli/commands/scheduling.py index f0437d64..2119edd8 100644 --- a/src/aerie_cli/commands/scheduling.py +++ b/src/aerie_cli/commands/scheduling.py @@ -75,10 +75,11 @@ def delete_all_goals_for_plan( logging.info("No goals to delete.") return - logging.info("Deleting goals for Plan ID {plan}: ".format(plan=plan_id), nl=False) + to_print = "Deleting goals for Plan ID {plan}: ".format(plan=plan_id) goal_ids = [] for goal in clear_goals: goal_ids.append(goal["goal"]["id"]) - logging.info(str(goal["goal"]["id"]) + " ", nl=False) - + to_print += str(goal["goal"]["id"]) + " " + logging.info(to_print) + client.delete_scheduling_goals(goal_ids) diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index e0f9953a..a53b32df 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -3,6 +3,7 @@ # The scope is global either way, but this allows for easier debugging, type hints, and autofill import os import sys +import logging from aerie_cli.aerie_client import AerieClient from aerie_cli.aerie_host import AerieHost, AerieHostConfiguration @@ -17,6 +18,9 @@ start_session_from_configuration, ) +GLOBAL_LOGGER = logging.getLogger(__name__) +GLOBAL_LOGGER.propagate = True # we must set this to see stdout in caplog + # in case src_path is not from aeri-cli src and from site-packages src_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../src") sys.path.insert(0, src_path) diff --git a/tests/integration_tests/test_configurations.py b/tests/integration_tests/test_configurations.py index e4e2430c..d931b554 100644 --- a/tests/integration_tests/test_configurations.py +++ b/tests/integration_tests/test_configurations.py @@ -4,6 +4,7 @@ from aerie_cli.__main__ import app import pytest +import logging from .conftest import\ HASURA_ADMIN_SECRET,\ @@ -26,7 +27,8 @@ CONFIGURATION_NAME = "localhost" configuration_id = -1 -def test_configurations_clean(): +def test_configurations_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "clean"], @@ -34,12 +36,12 @@ def test_configurations_clean(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert len(PersistentConfigurationManager.get_configurations()) == 0,\ f"CONFIGURATIONS NOT CLEARED! CONFIGURATIONS: {PersistentConfigurationManager.get_configurations()}\n"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" no_session_error = False @@ -71,7 +73,8 @@ def test_configurations_clean(): pytest.exit("CONFIGURATION SHOULD NOT EXIST. Failed when using active configuration\n", returncode=pytest.ExitCode.TESTS_FAILED) -def test_configurations_create(): +def test_configurations_create(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "create"], @@ -84,7 +87,7 @@ def test_configurations_create(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" global configuration_id @@ -94,7 +97,8 @@ def test_configurations_create(): configuration_id = i assert configuration_id != -1, "CONFIGURATION NOT LOADED, is it's name localhost?" -def test_activate(): +def test_activate(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() @@ -108,18 +112,18 @@ def test_activate(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert PersistentSessionManager.get_active_session().configuration_name == "localhost" -def test_deactivate(): +def test_deactivate(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() assert len(PersistentConfigurationManager.get_configurations()) == before_refresh assert PersistentSessionManager.get_active_session().configuration_name == "localhost" - result = runner.invoke( app, ["deactivate"], @@ -128,11 +132,11 @@ def test_deactivate(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"Deactivated session: {CONFIGURATION_NAME}" - in result.stdout + in caplog.text ) try: @@ -156,7 +160,8 @@ def test_deactivate(): f"CONFIGURATION SHOULD NOT BE ACTIVE. Active config: {active_config}", returncode=pytest.ExitCode.TESTS_FAILED) -def test_configurations_delete(): +def test_configurations_delete(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() @@ -177,10 +182,11 @@ def test_configurations_delete(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_configurations_load(): +def test_configurations_load(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "load"], @@ -189,11 +195,11 @@ def test_configurations_load(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( "Added configurations" - in result.stdout + in caplog.text ) # def test_configurations_update(): @@ -214,10 +220,11 @@ def test_configurations_load(): # ) # assert result.exit_code == 0,\ -# f"{result.stdout}"\ +# f"{caplog.text}"\ # f"{result.stderr}" -def test_configurations_list(): +def test_configurations_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["configurations", "list"], @@ -225,15 +232,16 @@ def test_configurations_list(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( "Aerie Host Configurations" - in result.stdout + in result.stdout # we use Console.print for this, not log ) # We're activating at the end to ensure that localhost is still active # for other integration tests. -def test_last_activate(): +def test_last_activate(caplog): + caplog.set_level(logging.INFO) before_refresh = len(PersistentConfigurationManager.get_configurations()) assert before_refresh > 0 PersistentConfigurationManager.read_configurations() @@ -249,6 +257,6 @@ def test_last_activate(): ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert PersistentSessionManager.get_active_session().configuration_name == "localhost" diff --git a/tests/integration_tests/test_constraints.py b/tests/integration_tests/test_constraints.py index 1b04ce1d..b21bb322 100644 --- a/tests/integration_tests/test_constraints.py +++ b/tests/integration_tests/test_constraints.py @@ -8,6 +8,7 @@ import os import pytest import arrow +import logging runner = CliRunner(mix_stderr = False) @@ -50,50 +51,54 @@ def set_up_environment(request): plan_id = client.create_activity_plan(model_id, plan_to_create) client.simulate_plan(plan_id) -def test_constraint_upload(): +def test_constraint_upload(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "upload"], input="Test" + "\n" + CONSTRAINT_PATH + "\n" + str(model_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Created constraint" in result.stdout + assert "Created constraint" in caplog.text global constraint_id - for line in result.stdout.splitlines(): + for line in caplog.text.splitlines(): if not "Created constraint: " in line: continue # get constraint id from the end of the line constraint_id = int(line.split(": ")[1]) assert constraint_id != -1, "Could not find constraint ID, constraint upload may have failed"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_constraint_update(): +def test_constraint_update(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "update"], input=str(constraint_id) + "\n" + CONSTRAINT_PATH + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Updated constraint" in result.stdout + assert "Updated constraint" in caplog.text -def test_constraint_violations(): +def test_constraint_violations(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "violations"], input=str(plan_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" # Check that a constraint violation is returned with the open bracket and curly brace # (The integration test constraint should report a violation) - assert "Constraint violations: [{" in result.stdout + assert "Constraint violations: [{" in caplog.text -def test_constraint_delete(): +def test_constraint_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["constraints", "delete"], input=str(constraint_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Successfully deleted constraint {str(constraint_id)}" in result.stdout + assert f"Successfully deleted constraint {str(constraint_id)}" in caplog.text diff --git a/tests/integration_tests/test_expansion.py b/tests/integration_tests/test_expansion.py index c8d68b27..17b7dc39 100644 --- a/tests/integration_tests/test_expansion.py +++ b/tests/integration_tests/test_expansion.py @@ -1,6 +1,7 @@ import os import pytest import arrow +import logging from typer.testing import CliRunner from pathlib import Path @@ -76,49 +77,53 @@ def set_up_environment(request): # Uses plan and simulation dataset ####################### -def test_expansion_sequence_create(): +def test_expansion_sequence_create(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "create"], input=str(sim_id) + "\n" + str(expansion_sequence_id) + "\n" + str(2) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Successfully created sequence" in result.stdout -def test_expansion_sequence_list(): +def test_expansion_sequence_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "list"], input="2" + "\n" + str(sim_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "All sequences for Simulation Dataset" in result.stdout -def test_expansion_sequence_download(): +def test_expansion_sequence_download(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "download"], input=str(sim_id) + "\n" + str(expansion_sequence_id) + "\n" + DOWNLOADED_FILE_NAME + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" path_to_sequence = Path(DOWNLOADED_FILE_NAME) assert path_to_sequence.exists() path_to_sequence.unlink() -def test_expansion_sequence_delete(): +def test_expansion_sequence_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sequences", "delete"], input=str(sim_id) + "\n" + str(expansion_sequence_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Successfully deleted sequence" in result.stdout @@ -127,7 +132,8 @@ def test_expansion_sequence_delete(): # Uses model, command dictionary, and activity types ####################### -def test_expansion_set_create(): +def test_expansion_set_create(caplog): + caplog.set_level(logging.INFO) client.create_expansion_rule( expansion_logic=""" export default function MyExpansion(props: { @@ -147,7 +153,7 @@ def test_expansion_set_create(): input=str(model_id) + "\n" + str(command_dictionary_id) + "\n" + "BakeBananaBread" + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" global expansion_set_id for line in result.stdout.splitlines(): @@ -156,27 +162,29 @@ def test_expansion_set_create(): # get expansion id from the end of the line expansion_set_id = int(line.split(": ")[1]) assert expansion_set_id != -1, "Could not find expansion run ID, expansion create may have failed"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_expansion_set_get(): +def test_expansion_set_get(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sets", "get"], input=str(expansion_set_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Set" in result.stdout and "Contents" in result.stdout -def test_expansion_set_list(): +def test_expansion_set_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "sets", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Sets" in result.stdout @@ -184,25 +192,27 @@ def test_expansion_set_list(): # TEST EXPANSION RUNS # Uses plan and simulation dataset ####################### -def test_expansion_run_create(): +def test_expansion_run_create(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "runs", "create"], input=str(sim_id) + "\n" + str(expansion_set_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Run ID: " in result.stdout -def test_expansion_runs_list(): +def test_expansion_runs_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["expansion", "runs", "list"], input="2" + "\n" + str(sim_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Expansion Runs" in result.stdout @@ -210,13 +220,14 @@ def test_expansion_runs_list(): # DELETE MODELS ####################### -def test_model_delete(): +def test_model_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "delete"], input=str(model_id), catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {model_id} has been removed" in result.stdout + assert f"ID: {model_id} has been removed" in caplog.text diff --git a/tests/integration_tests/test_metadata.py b/tests/integration_tests/test_metadata.py index a49e73cb..1465456e 100644 --- a/tests/integration_tests/test_metadata.py +++ b/tests/integration_tests/test_metadata.py @@ -3,6 +3,7 @@ from aerie_cli.__main__ import app import os +import logging runner = CliRunner(mix_stderr = False) @@ -13,36 +14,40 @@ METADATA_SCHEMAS_PATH = os.path.join(FILES_PATH, "metadata_schemas") METADATA_SCHEMA_PATH = os.path.join(METADATA_SCHEMAS_PATH, "metadata_schema.json") -def test_metadata_list(): +def test_metadata_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Metadata Schemas" in result.stdout -def test_metadata_clean(): +def test_metadata_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "clean"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "All metadata schemas have been deleted" in result.stdout + assert "All metadata schemas have been deleted" in caplog.text -def test_metadata_upload(): +def test_metadata_upload(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "upload"], input=METADATA_SCHEMA_PATH, catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "2 new schema have been added" in result.stdout + assert "2 new schema have been added" in caplog.text -def test_metadata_delete(): +def test_metadata_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["metadata", "delete"], input="STRING_EXAMPLE", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Schema `STRING_EXAMPLE` has been removed" in result.stdout + assert "Schema `STRING_EXAMPLE` has been removed" in caplog.text diff --git a/tests/integration_tests/test_models.py b/tests/integration_tests/test_models.py index 36171ace..6cc09b93 100644 --- a/tests/integration_tests/test_models.py +++ b/tests/integration_tests/test_models.py @@ -1,4 +1,5 @@ import os +import logging from typer.testing import CliRunner @@ -15,21 +16,23 @@ # Model Variables model_id = -1 -def test_model_clean(): +def test_model_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "clean"], catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"All mission models have been deleted" - in result.stdout + in caplog.text ) -def test_model_upload(): +def test_model_upload(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "upload", "--time-tag-version"], @@ -49,30 +52,32 @@ def test_model_upload(): model_id = latest_model.id assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"Created new mission model: {MODEL_NAME} with Model ID: {model_id}" - in result.stdout + in caplog.text ) -def test_model_list(): +def test_model_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Current Mission Models" in result.stdout -def test_model_delete(): +def test_model_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "delete"], input=str(model_id), catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {model_id} has been removed" in result.stdout + assert f"ID: {model_id} has been removed" in caplog.text diff --git a/tests/integration_tests/test_plans.py b/tests/integration_tests/test_plans.py index 16585426..88372df7 100644 --- a/tests/integration_tests/test_plans.py +++ b/tests/integration_tests/test_plans.py @@ -1,5 +1,6 @@ import os import pytest +import logging from typer.testing import CliRunner from pathlib import Path @@ -54,7 +55,8 @@ def cli_plan_simulate(): # Uses model ####################### -def test_plan_upload(): +def test_plan_upload(caplog): + caplog.set_level(logging.INFO) # Clean out plans first plans.clean() @@ -66,7 +68,7 @@ def test_plan_upload(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" # Get uploaded plan id @@ -75,9 +77,10 @@ def test_plan_upload(): global plan_id plan_id = latest_plan.id - assert f"Created plan ID: {plan_id}" in result.stdout + assert f"Created plan ID: {plan_id}" in caplog.text -def test_plan_duplicate(): +def test_plan_duplicate(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "duplicate"], @@ -85,7 +88,7 @@ def test_plan_duplicate(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" # Get duplicated plan id @@ -93,14 +96,15 @@ def test_plan_duplicate(): latest_plan = resp[-1] duplicated_plan_id = latest_plan.id - assert f"Duplicate activity plan created with ID: {duplicated_plan_id}" in result.stdout + assert f"Duplicate activity plan created with ID: {duplicated_plan_id}" in caplog.text -def test_plan_list(): +def test_plan_list(caplog): + caplog.set_level(logging.INFO) result = runner.invoke(app, ["plans", "list"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert "Current Activity Plans" in result.stdout @@ -111,7 +115,8 @@ def test_plan_list(): ####################### -def test_list_empty_plan_collaborators(): +def test_list_empty_plan_collaborators(caplog): + caplog.set_level(logging.INFO) """ Should be no plan collaborators to start """ @@ -121,12 +126,13 @@ def test_list_empty_plan_collaborators(): input=str(plan_id) + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout}" f"{result.stderr}" + assert result.exit_code == 0, f"{caplog.text}" f"{result.stderr}" - assert "No collaborators" in result.stdout + assert "No collaborators" in caplog.text -def test_add_collaborators(): +def test_add_collaborators(caplog): + caplog.set_level(logging.INFO) """ Add all users as collaborators and check the final list """ @@ -139,15 +145,16 @@ def test_add_collaborators(): input=str(plan_id) + "\n" + username + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout} {result.stderr}" + assert result.exit_code == 0, f"{caplog.text} {result.stderr}" - assert "Success" in result.stdout + assert "Success" in caplog.text # Check full list of collaborators assert ADDITIONAL_USERS == client.list_plan_collaborators(plan_id) -def test_list_plan_collaborators(): +def test_list_plan_collaborators(caplog): + caplog.set_level(logging.INFO) """ Check that the `plans collaborators list` command lists all collaborators """ @@ -157,13 +164,14 @@ def test_list_plan_collaborators(): input=str(plan_id) + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout} {result.stderr}" + assert result.exit_code == 0, f"{caplog.text} {result.stderr}" for username in ADDITIONAL_USERS: - assert username in result.stdout + assert username in caplog.text -def test_delete_collaborators(): +def test_delete_collaborators(caplog): + caplog.set_level(logging.INFO) """ Delete a collaborator and verify the result """ @@ -174,9 +182,9 @@ def test_delete_collaborators(): input=str(plan_id) + "\n" + "1" + "\n", catch_exceptions=False, ) - assert result.exit_code == 0, f"{result.stdout} {result.stderr}" + assert result.exit_code == 0, f"{caplog.text} {result.stderr}" - assert "Success" in result.stdout + assert "Success" in caplog.text ####################### @@ -184,17 +192,19 @@ def test_delete_collaborators(): # Uses plan ####################### -def test_plan_simulate(): +def test_plan_simulate(caplog): + caplog.set_level(logging.INFO) result = cli_plan_simulate() sim_ids = client.get_simulation_dataset_ids_by_plan_id(plan_id) global sim_id sim_id = sim_ids[-1] assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Simulation completed" in result.stdout + assert f"Simulation completed" in caplog.text -def test_plan_download(): +def test_plan_download(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "download"], @@ -205,11 +215,12 @@ def test_plan_download(): assert path_to_plan.exists() path_to_plan.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote activity plan" in result.stdout + assert f"Wrote activity plan" in caplog.text -def test_plan_download_expanded_args(): +def test_plan_download_expanded_args(caplog): + caplog.set_level(logging.INFO) """ Download a plan, exercising the --full-args option to get effective activity arguments """ @@ -223,11 +234,12 @@ def test_plan_download_expanded_args(): assert path_to_plan.exists() path_to_plan.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote activity plan" in result.stdout + assert f"Wrote activity plan" in caplog.text -def test_plan_download_resources(): +def test_plan_download_resources(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "download-resources"], @@ -238,11 +250,12 @@ def test_plan_download_resources(): assert path_to_resources.exists() path_to_resources.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote resource timelines" in result.stdout + assert f"Wrote resource timelines" in caplog.text -def test_plan_download_simulation(): +def test_plan_download_simulation(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "download-simulation"], @@ -253,11 +266,12 @@ def test_plan_download_simulation(): assert path_to_resources.exists() path_to_resources.unlink() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Wrote activity plan" in result.stdout + assert f"Wrote activity plan" in caplog.text -def test_plan_create_config(): +def test_plan_create_config(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "create-config"], @@ -265,20 +279,22 @@ def test_plan_create_config(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Configuration Arguments for Plan ID: {plan_id}" in result.stdout - assert "initialPlantCount: 2" in result.stdout - assert "initialProducer: nobody" in result.stdout + assert f"Configuration Arguments for Plan ID: {plan_id}" in caplog.text + assert "initialPlantCount: 2" in caplog.text + assert "initialProducer: nobody" in caplog.text -def test_simulate_after_create_config(): +def test_simulate_after_create_config(caplog): + caplog.set_level(logging.INFO) result = cli_plan_simulate() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Simulation completed" in result.stdout + assert f"Simulation completed" in caplog.text -def test_plan_update_config(): +def test_plan_update_config(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "update-config"], @@ -286,59 +302,63 @@ def test_plan_update_config(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Configuration Arguments for Plan ID: {plan_id}" in result.stdout - assert "initialPlantCount: 3" in result.stdout - assert "initialProducer: somebody" in result.stdout + assert f"Configuration Arguments for Plan ID: {plan_id}" in caplog.text + assert "initialPlantCount: 3" in caplog.text + assert "initialProducer: somebody" in caplog.text -def test_simulate_after_update_config(): +def test_simulate_after_update_config(caplog): + caplog.set_level(logging.INFO) result = cli_plan_simulate() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Simulation completed" in result.stdout + assert f"Simulation completed" in caplog.text ####################### # DELETE PLANS ####################### -def test_plan_delete(): +def test_plan_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "delete"], input=str(plan_id) + "\n", catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {plan_id} has been removed." in result.stdout + assert f"ID: {plan_id} has been removed." in caplog.text -def test_plan_clean(): +def test_plan_clean(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["plans", "clean"], catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" assert ( f"All activity plans have been deleted" - in result.stdout + in caplog.text ) ####################### # DELETE MODELS ####################### -def test_model_delete(): +def test_model_delete(caplog): + caplog.set_level(logging.INFO) result = runner.invoke( app, ["models", "delete"], input=str(model_id), catch_exceptions=False,) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"ID: {model_id} has been removed" in result.stdout + assert f"ID: {model_id} has been removed" in caplog.text diff --git a/tests/integration_tests/test_scheduling.py b/tests/integration_tests/test_scheduling.py index bd237ee0..39c2b08c 100644 --- a/tests/integration_tests/test_scheduling.py +++ b/tests/integration_tests/test_scheduling.py @@ -1,6 +1,7 @@ import os import pytest import arrow +import logging from typer.testing import CliRunner from pathlib import Path @@ -77,22 +78,24 @@ def cli_schedule_upload(): os.remove(schedule_file_path) return result -def test_schedule_upload(): +def test_schedule_upload(caplog): + caplog.set_level(logging.INFO) result = cli_schedule_upload() assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert "Assigned goals in priority order" in result.stdout + assert "Assigned goals in priority order" in caplog.text global goal_id - for line in result.stdout.splitlines(): + for line in caplog.text.splitlines(): if not "Assigned goals in priority order" in line: continue # get expansion id from the end of the line goal_id = int(line.split("ID ")[1][:-1]) assert goal_id != -1, "Could not find goal ID, goal upload may have failed"\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" -def test_schedule_delete(): +def test_schedule_delete(caplog): + caplog.set_level(logging.INFO) assert goal_id != -1, "Goal id was not set" result = runner.invoke( @@ -102,11 +105,12 @@ def test_schedule_delete(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Successfully deleted Goal" in result.stdout + assert f"Successfully deleted Goal" in caplog.text -def test_schedule_delete_all(): +def test_schedule_delete_all(caplog): + caplog.set_level(logging.INFO) # Upload a goal to delete cli_schedule_upload() @@ -118,6 +122,6 @@ def test_schedule_delete_all(): catch_exceptions=False, ) assert result.exit_code == 0,\ - f"{result.stdout}"\ + f"{caplog.text}"\ f"{result.stderr}" - assert f"Deleting goals for Plan ID {plan_id}" in result.stdout + assert f"Deleting goals for Plan ID {plan_id}" in caplog.text