Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

infra: Improve testing usability #117

Merged
merged 5 commits into from
Feb 4, 2025
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ jobs:
env:
PYTEST_ADDOPTS: "--durations=0"
run: |
hatch test --cover
hatch test -m "" --parallel --cover # override default exclusion of slow tests with -m ""
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah super nice that hatch already comes with pytest-xdist and coverage


docs:
runs-on: ubuntu-latest
Expand Down
13 changes: 11 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,16 @@ Changelog = "https://github.com/C2SM/Sirocco/blob/main/CHANGELOG.md"
# Configuration for [pytest](https://docs.pytest.org)
addopts = "--pdbcls=IPython.terminal.debugger:TerminalPdb"
norecursedirs = "tests/cases"
markers = [
"slow: slow integration tests which are not recommended to run locally for normal development"
]
filterwarnings = [
"error",
"ignore::UserWarning",
'ignore:datetime.datetime.utcfromtimestamp\(\) is deprecated:DeprecationWarning', # from aio_pika via duration
"ignore:There is no current event loop:DeprecationWarning", # from plumpy via aiida testing tools
"ignore:Object of type <DbNode> not in session:sqlalchemy.exc.SAWarning", # sqlalchemy via aiida testing tools
]

[tool.coverage.run]
# Configuration of [coverage.py](https://coverage.readthedocs.io)
Expand Down Expand Up @@ -82,8 +92,7 @@ installer = "uv"
extra-dependencies = [
"ipdb"
]
default-args = []
extra-args = ["--doctest-modules"]
extra-args = ["--doctest-modules", '-m not slow']

[[tool.hatch.envs.hatch-test.matrix]]
python = ["3.12"]
Expand Down
2 changes: 1 addition & 1 deletion src/sirocco/core/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def from_config_file(cls: type[Self], config_path: str) -> Self:
return cls.from_config_workflow(ConfigWorkflow.from_config_file(config_path))

@classmethod
def from_config_workflow(cls: type[Self], config_workflow: ConfigWorkflow) -> Workflow:
def from_config_workflow(cls: type[Self], config_workflow: ConfigWorkflow) -> Self:
return cls(
name=config_workflow.name,
config_rootdir=config_workflow.rootdir,
Expand Down
56 changes: 56 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import pathlib
import typing

import pytest

from sirocco import pretty_print
from sirocco.core import _tasks as core_tasks
from sirocco.core import workflow
from sirocco.parsing import _yaml_data_models as models

pytest_plugins = ["aiida.tools.pytest_fixtures"]
Expand All @@ -20,3 +24,55 @@ def minimal_config() -> models.ConfigWorkflow:
),
parameters={},
)


# configs that are tested for parsing
ALL_CONFIG_CASES = ["small", "parameters", "large"]


@pytest.fixture(params=ALL_CONFIG_CASES)
def config_case(request) -> typing.Iterator[str]:
return request.param


@pytest.fixture
def pprinter() -> typing.Iterator[pretty_print.PrettyPrinter]:
return pretty_print.PrettyPrinter()


def generate_config_paths(test_case: str):
return {
"yml": pathlib.Path(f"tests/cases/{test_case}/config/config.yml"),
"txt": pathlib.Path(f"tests/cases/{test_case}/data/config.txt"),
"svg": pathlib.Path(f"tests/cases/{test_case}/svg/config.svg"),
}


@pytest.fixture
def config_paths(config_case) -> typing.Iterator[dict[str, pathlib.Path]]:
return generate_config_paths(config_case)


def pytest_addoption(parser):
parser.addoption("--reserialize", action="store_true", default=False)


def serialize_worklfow(config_paths: dict[str, pathlib.Path], workflow: workflow.Workflow) -> None:
config_paths["txt"].write_text(pretty_print.PrettyPrinter().format(workflow))


def serialize_nml(config_paths: dict[str, pathlib.Path], workflow: workflow.Workflow) -> None:
nml_refdir = config_paths["txt"].parent / "ICON_namelists"
for task in workflow.tasks:
if isinstance(task, core_tasks.icon_task.IconTask):
task.create_workflow_namelists(folder=nml_refdir)


def pytest_configure(config):
if config.getoption("reserialize"):
print("Regenerating serialized references") # noqa: T201 # this is actual UX, not a debug print
for config_case in ALL_CONFIG_CASES:
config_paths = generate_config_paths(config_case)
wf = workflow.Workflow.from_config_file(str(config_paths["yml"]))
serialize_worklfow(config_paths=config_paths, workflow=wf)
serialize_nml(config_paths=config_paths, workflow=wf)
59 changes: 9 additions & 50 deletions tests/test_wc_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from sirocco.core import Workflow
from sirocco.core._tasks.icon_task import IconTask
from sirocco.parsing._yaml_data_models import ConfigShellTask, ShellCliArgument
from sirocco.pretty_print import PrettyPrinter
from sirocco.vizgraph import VizGraph
from sirocco.workgraph import AiidaWorkGraph

Expand All @@ -26,28 +25,6 @@ def test_parsing_cli_parameters():
]


@pytest.fixture
def pprinter():
return PrettyPrinter()


def generate_config_paths(test_case: str):
return {
"yml": Path(f"tests/cases/{test_case}/config/config.yml"),
"txt": Path(f"tests/cases/{test_case}/data/config.txt"),
"svg": Path(f"tests/cases/{test_case}/svg/config.svg"),
}


# configs that are tested for parsing
all_uses_cases = ["small", "parameters", "large"]


@pytest.fixture(params=all_uses_cases)
def config_paths(request):
return generate_config_paths(request.param)


def test_parse_config_file(config_paths, pprinter):
reference_str = config_paths["txt"].read_text()
test_str = pprinter.format(Workflow.from_config_file(config_paths["yml"]))
Expand All @@ -59,24 +36,20 @@ def test_parse_config_file(config_paths, pprinter):
), f"Workflow graph doesn't match serialized data. New graph string dumped to {new_path}."


@pytest.mark.skip(reason="don't run it each time, uncomment to regenerate serilaized data")
def test_serialize_workflow(config_paths, pprinter):
config_paths["txt"].write_text(pprinter.format(Workflow.from_config_file(config_paths["yml"])))


def test_vizgraph(config_paths):
VizGraph.from_config_file(config_paths["yml"]).draw(file_path=config_paths["svg"])


# configs that are tested for running workgraph
@pytest.mark.slow
@pytest.mark.parametrize(
"config_path",
"config_case",
[
"tests/cases/small/config/config.yml",
"tests/cases/parameters/config/config.yml",
"small",
"parameters",
],
)
def test_run_workgraph(config_path, aiida_computer):
def test_run_workgraph(config_case, config_paths, aiida_computer): # noqa: ARG001 # config_case is overridden
"""Tests end-to-end the parsing from file up to running the workgraph.

Automatically uses the aiida_profile fixture to create a new profile. Note to debug the test with your profile
Expand All @@ -85,18 +58,18 @@ def test_run_workgraph(config_path, aiida_computer):
# some configs reference computer "localhost" which we need to create beforehand
aiida_computer("localhost").store()

core_workflow = Workflow.from_config_file(config_path)
core_workflow = Workflow.from_config_file(str(config_paths["yml"]))
aiida_workflow = AiidaWorkGraph(core_workflow)
out = aiida_workflow.run()
assert out.get("execution_count", None).value == 1


# configs containing task using icon plugin
@pytest.mark.parametrize(
"config_paths",
[generate_config_paths("large")],
"config_case",
["large"],
)
def test_nml_mod(config_paths, tmp_path):
def test_nml_mod(config_case, config_paths, tmp_path): # noqa: ARG001 # config_case is overridden
nml_refdir = config_paths["txt"].parent / "ICON_namelists"
wf = Workflow.from_config_file(config_paths["yml"])
# Create core mamelists
Expand All @@ -111,17 +84,3 @@ def test_nml_mod(config_paths, tmp_path):
new_path = nml.with_suffix(".new")
new_path.write_text(test_nml)
assert ref_nml == test_nml, f"Namelist {nml.name} differs between ref and test"


@pytest.mark.skip(reason="don't run it each time, uncomment to regenerate serilaized data")
# configs containing task using icon plugin
@pytest.mark.parametrize(
"config_paths",
[generate_config_paths("large")],
)
def test_serialize_nml(config_paths):
nml_refdir = config_paths["txt"].parent / "ICON_namelists"
wf = Workflow.from_config_file(config_paths["yml"])
for task in wf.tasks:
if isinstance(task, IconTask):
task.create_workflow_namelists(folder=nml_refdir)