diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5aa4a20fb..881edc5a5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -41,8 +41,8 @@ LICENSE @glotaran/pyglotaran_creators # cli /glotaran/cli/ @jsnel @glotaran/admins -# examples -/glotaran/examples/ @jsnel @glotaran/maintainers +# examples # Removed in PR #866 +# /glotaran/examples/ @jsnel @glotaran/maintainers # io /glotaran/io/ @jsnel @glotaran/maintainers diff --git a/changelog.md b/changelog.md index 82e56d60b..485517332 100644 --- a/changelog.md +++ b/changelog.md @@ -5,6 +5,7 @@ ### ✨ Features - ✨ Add simple decay megacomplexes (#860) +- ✨ Feature: Generators (#866) ### 👌 Minor Improvements: diff --git a/docs/source/notebooks/plugin_system/plugin_howto_write_a_io_plugin.ipynb b/docs/source/notebooks/plugin_system/plugin_howto_write_a_io_plugin.ipynb index e2ada8252..9ddcfaa6e 100644 --- a/docs/source/notebooks/plugin_system/plugin_howto_write_a_io_plugin.ipynb +++ b/docs/source/notebooks/plugin_system/plugin_howto_write_a_io_plugin.ipynb @@ -167,9 +167,9 @@ "metadata": {}, "outputs": [], "source": [ - "from glotaran.examples.sequential import dataset\n", "from glotaran.io import load_dataset\n", - "from glotaran.io import save_dataset" + "from glotaran.io import save_dataset\n", + "from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET as dataset" ] }, { diff --git a/docs/source/notebooks/quickstart/quickstart.ipynb b/docs/source/notebooks/quickstart/quickstart.ipynb index 19bdb439b..22a1fc5f6 100644 --- a/docs/source/notebooks/quickstart/quickstart.ipynb +++ b/docs/source/notebooks/quickstart/quickstart.ipynb @@ -54,7 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "from glotaran.examples.sequential import dataset\n", + "from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET as dataset\n", "\n", "dataset" ] diff --git a/glotaran/__init__.py b/glotaran/__init__.py index 178c8dd66..cc721f940 100644 --- a/glotaran/__init__.py +++ b/glotaran/__init__.py @@ -1,4 +1,5 @@ """Glotaran package __init__.py""" +from glotaran.deprecation.deprecation_utils import deprecate_submodule from glotaran.deprecation.modules.glotaran_root import read_model_from_yaml from glotaran.deprecation.modules.glotaran_root import read_model_from_yaml_file from glotaran.deprecation.modules.glotaran_root import read_parameters_from_csv_file @@ -10,6 +11,13 @@ __version__ = "0.6.0.dev0" +examples = deprecate_submodule( + deprecated_module_name="glotaran.examples", + new_module_name="glotaran.testing.simulated_data", + to_be_removed_in_version="0.8.0", + module_load_overwrite="glotaran.deprecation.modules.examples", +) + def __getattr__(attribute_name: str): from glotaran.deprecation.deprecation_utils import deprecate_module_attribute diff --git a/glotaran/builtin/io/folder/test/test_folder_plugin.py b/glotaran/builtin/io/folder/test/test_folder_plugin.py index 710281d17..50ba60fd4 100644 --- a/glotaran/builtin/io/folder/test/test_folder_plugin.py +++ b/glotaran/builtin/io/folder/test/test_folder_plugin.py @@ -1,23 +1,27 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING +from typing import Literal import pytest +from glotaran.analysis.optimize import optimize from glotaran.io import save_result -from glotaran.project.test.test_result import dummy_result # noqa: F401 +from glotaran.project.result import Result +from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME -if TYPE_CHECKING: - from typing import Literal - from glotaran.project.result import Result +@pytest.fixture(scope="session") +def dummy_result(): + """Dummy result for testing.""" + print(SCHEME.data["dataset_1"]) + yield optimize(SCHEME, raise_exception=True) @pytest.mark.parametrize("format_name", ("folder", "legacy")) def test_save_result_folder( tmp_path: Path, - dummy_result: Result, # noqa: F811 + dummy_result: Result, format_name: Literal["folder", "legacy"], ): """Check all files exist.""" @@ -34,9 +38,7 @@ def test_save_result_folder( "initial_parameters.csv", "optimized_parameters.csv", "parameter_history.csv", - "dataset1.nc", - "dataset2.nc", - "dataset3.nc", + "dataset_1.nc", ] for wanted in wanted_files: assert (result_dir / wanted).exists() @@ -46,7 +48,7 @@ def test_save_result_folder( @pytest.mark.parametrize("format_name", ("folder", "legacy")) def test_save_result_folder_error_path_is_file( tmp_path: Path, - dummy_result: Result, # noqa: F811 + dummy_result: Result, format_name: Literal["folder", "legacy"], ): """Raise error if result_path is a file without extension and overwrite is true.""" diff --git a/glotaran/builtin/io/yml/test/test_save_model.py b/glotaran/builtin/io/yml/test/test_save_model.py index 444141ae3..54dee7e9e 100644 --- a/glotaran/builtin/io/yml/test/test_save_model.py +++ b/glotaran/builtin/io/yml/test/test_save_model.py @@ -2,57 +2,44 @@ from typing import TYPE_CHECKING -from glotaran.examples.sequential import model from glotaran.io import load_model from glotaran.io import save_model +from glotaran.testing.simulated_data.sequential_spectral_decay import MODEL if TYPE_CHECKING: from pathlib import Path -want = """\ -default_megacomplex: decay +want = """default_megacomplex: decay-sequential dataset_groups: default: residual_function: variable_projection link_clp: null -k_matrix: - k1: - matrix: - (s2, s1): kinetic.1 - (s3, s2): kinetic.2 - (s3, s3): kinetic.3 -initial_concentration: - j1: - compartments: - - s1 - - s2 - - s3 - parameters: - - j.1 - - j.0 - - j.0 - exclude_from_normalize: [] irf: - irf1: + gaussian_irf: type: gaussian center: irf.center width: irf.width normalize: true backsweep: false megacomplex: - m1: - type: decay + megacomplex_sequential_decay: + type: decay-sequential + compartments: + - species_1 + - species_2 + - species_3 + rates: + - rates.species_1 + - rates.species_2 + - rates.species_3 dimension: time - k_matrix: - - k1 dataset: - dataset1: + dataset_1: group: default megacomplex: - - m1 - initial_concentration: j1 - irf: irf1 + - megacomplex_sequential_decay + irf: gaussian_irf """ @@ -62,7 +49,7 @@ def test_save_model( """Check all files exist.""" model_path = tmp_path / "testmodel.yml" - save_model(file_name=model_path, format_name="yml", model=model) + save_model(file_name=model_path, format_name="yml", model=MODEL) assert model_path.is_file() assert model_path.read_text() == want diff --git a/glotaran/builtin/io/yml/test/test_save_result.py b/glotaran/builtin/io/yml/test/test_save_result.py index a6d3e653b..d7946462e 100644 --- a/glotaran/builtin/io/yml/test/test_save_result.py +++ b/glotaran/builtin/io/yml/test/test_save_result.py @@ -1,21 +1,29 @@ from __future__ import annotations +from dataclasses import replace from pathlib import Path from textwrap import dedent -from typing import TYPE_CHECKING + +import pytest from glotaran import __version__ +from glotaran.analysis.optimize import optimize from glotaran.io import save_result -from glotaran.project.test.test_result import dummy_result # noqa: F401 +from glotaran.project.result import Result +from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME -if TYPE_CHECKING: - from glotaran.project.result import Result +@pytest.fixture(scope="session") +def dummy_result(): + """Dummy result for testing.""" + scheme = replace(SCHEME, maximum_number_function_evaluations=1) + print(scheme.data["dataset_1"]) + yield optimize(scheme, raise_exception=True) def test_save_result_yml( tmp_path: Path, - dummy_result: Result, # noqa: F811 + dummy_result: Result, ): """Check all files exist.""" expected = dedent( @@ -25,16 +33,17 @@ def test_save_result_yml( termination_reason: The maximum number of function evaluations is exceeded. glotaran_version: {__version__} free_parameter_labels: - - '1' - - '2' + - rates.species_1 + - rates.species_2 + - rates.species_3 + - irf.center + - irf.width scheme: scheme.yml initial_parameters: initial_parameters.csv optimized_parameters: optimized_parameters.csv parameter_history: parameter_history.csv data: - dataset1: dataset1.nc - dataset2: dataset2.nc - dataset3: dataset3.nc + dataset_1: dataset_1.nc """ ) @@ -46,8 +55,7 @@ def test_save_result_yml( assert (result_dir / "result.yml").exists() assert (result_dir / "initial_parameters.csv").exists() assert (result_dir / "optimized_parameters.csv").exists() - assert (result_dir / "dataset1.nc").exists() - assert (result_dir / "dataset2.nc").exists() - assert (result_dir / "dataset3.nc").exists() + assert (result_dir / "dataset_1.nc").exists() + # We can't check equality due to numerical fluctuations assert expected in (result_dir / "result.yml").read_text() diff --git a/glotaran/builtin/io/yml/test/test_save_scheme.py b/glotaran/builtin/io/yml/test/test_save_scheme.py index 7535c8943..21ca1f4a6 100644 --- a/glotaran/builtin/io/yml/test/test_save_scheme.py +++ b/glotaran/builtin/io/yml/test/test_save_scheme.py @@ -4,15 +4,15 @@ import xarray as xr -from glotaran.examples.sequential import dataset -from glotaran.examples.sequential import model -from glotaran.examples.sequential import parameter from glotaran.io import load_scheme from glotaran.io import save_dataset from glotaran.io import save_model from glotaran.io import save_parameters from glotaran.io import save_scheme from glotaran.project import Scheme +from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET +from glotaran.testing.simulated_data.sequential_spectral_decay import MODEL +from glotaran.testing.simulated_data.sequential_spectral_decay import PARAMETER if TYPE_CHECKING: from pathlib import Path @@ -35,13 +35,13 @@ def test_save_scheme(tmp_path: Path): - save_model(model, tmp_path / "m.yml") - save_parameters(parameter, tmp_path / "p.csv") - save_dataset(dataset, tmp_path / "d.nc") + save_model(MODEL, tmp_path / "m.yml") + save_parameters(PARAMETER, tmp_path / "p.csv") + save_dataset(DATASET, tmp_path / "d.nc") scheme = Scheme( - model, - parameter, - {"dataset_1": dataset}, + MODEL, + PARAMETER, + {"dataset_1": DATASET}, ) scheme_path = tmp_path / "testscheme.yml" save_scheme(file_name=scheme_path, format_name="yml", scheme=scheme) diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index 167f98309..f35c42864 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -4,6 +4,7 @@ from typing import TYPE_CHECKING from ruamel.yaml import YAML +from ruamel.yaml.compat import StringIO from glotaran.deprecation.modules.builtin_io_yml import model_spec_deprecations from glotaran.deprecation.modules.builtin_io_yml import scheme_spec_deprecations @@ -83,7 +84,7 @@ def save_model(self, model: Model, file_name: str): if isinstance(prop, dict) and any(isinstance(k, tuple) for k in prop): keys = [f"({k[0]}, {k[1]})" for k in prop] item[prop_name] = {f"{k}": v for k, v in zip(keys, prop.values())} - _write_dict(file_name, model_dict) + write_dict(model_dict, file_name=file_name) def load_parameters(self, file_name: str) -> ParameterGroup: """Create a ParameterGroup instance from the specs defined in a file. @@ -111,7 +112,7 @@ def load_scheme(self, file_name: str) -> Scheme: def save_scheme(self, scheme: Scheme, file_name: str): scheme_dict = asdict(scheme, folder=Path(file_name).parent) - _write_dict(file_name, scheme_dict) + write_dict(scheme_dict, file_name=file_name) def load_result(self, result_path: str) -> Result: """Create a :class:`Result` instance from the specs defined in a file. @@ -141,7 +142,7 @@ def save_result(self, result: Result, result_path: str): """ save_result(result, Path(result_path).parent.as_posix(), format_name="folder") result_dict = asdict(result, folder=Path(result_path).parent) - _write_dict(result_path, result_dict) + write_dict(result_dict, file_name=result_path) def _load_yml(self, file_name: str) -> dict[str, Any]: yaml = YAML() @@ -153,12 +154,17 @@ def _load_yml(self, file_name: str) -> dict[str, Any]: return spec -def _write_dict(file_name: str, data: Mapping[str, Any]): +def write_dict(data: Mapping[str, Any], file_name: str | None = None) -> str | None: yaml = YAML() yaml.representer.add_representer(type(None), _yaml_none_representer) yaml.indent(mapping=2, sequence=2, offset=2) - with open(file_name, "w") as f: - yaml.dump(data, f) + if file_name is not None: + with open(file_name, "w") as f: + yaml.dump(data, f) + else: + stream = StringIO() + yaml.dump(data, stream) + return stream.getvalue() def _yaml_none_representer(representer: BaseRepresenter, data: Mapping[str, Any]) -> ScalarNode: diff --git a/glotaran/deprecation/deprecation_utils.py b/glotaran/deprecation/deprecation_utils.py index 7967d5383..2c0d49deb 100644 --- a/glotaran/deprecation/deprecation_utils.py +++ b/glotaran/deprecation/deprecation_utils.py @@ -3,6 +3,7 @@ from __future__ import annotations import os +import re import sys from functools import wraps from importlib import import_module @@ -590,6 +591,7 @@ def deprecate_module_attribute( deprecated_qual_name: str, new_qual_name: str, to_be_removed_in_version: str, + module_load_overwrite: str = "", ) -> Any: """Import and return and anttribute from the new location. @@ -607,6 +609,11 @@ def deprecate_module_attribute( ``glotaran.parameter.ParameterGroup`` to_be_removed_in_version : str Version the support for this usage will be removed. + module_load_overwrite : str + Overwrite the location the functionality will be set from. + This allows preserving functionality without polluting new + module with code just for the sake of it. + , by default '' Returns ------- @@ -649,14 +656,20 @@ def __getattr__(attribute_name: str): .. # noqa: DAR402 """ - module_name = ".".join(new_qual_name.split(".")[:-1]) - attribute_name = new_qual_name.split(".")[-1] + if not module_load_overwrite: + module_name = ".".join(new_qual_name.split(".")[:-1]) + attribute_name = new_qual_name.split(".")[-1] + check_qual_names = (False, True) + else: + module_name = ".".join(module_load_overwrite.split(".")[:-1]) + attribute_name = module_load_overwrite.split(".")[-1] + check_qual_names = (False, False) warn_deprecated( - deprecated_qual_name_usage=deprecated_qual_name, - new_qual_name_usage=new_qual_name, + deprecated_qual_name_usage=re.sub(r"\.__path__$", "", deprecated_qual_name), + new_qual_name_usage=re.sub(r"\.__path__$", "", new_qual_name), to_be_removed_in_version=to_be_removed_in_version, - check_qual_names=(False, True), + check_qual_names=check_qual_names, stacklevel=4, importable_indices=(1, 1), ) @@ -668,6 +681,7 @@ def deprecate_submodule( deprecated_module_name: str, new_module_name: str, to_be_removed_in_version: str, + module_load_overwrite: str = "", ) -> ModuleType: r"""Create a module at runtime which retrieves attributes from new module. @@ -687,6 +701,11 @@ def deprecate_submodule( ``'glotaran.project.result'`` to_be_removed_in_version : str Version the support for this usage will be removed. + module_load_overwrite : str + Overwrite the location for the new module the deprecated functionality is loaded from. + This allows preserving functionality without polluting new + module with code just for the sake of it. + , by default '' Returns ------- @@ -723,7 +742,11 @@ def deprecate_submodule( .. # noqa: DAR402 """ - new_module = import_module(new_module_name) + if module_load_overwrite == "": + new_module = import_module(new_module_name) + else: + new_module = import_module(module_load_overwrite) + deprecated_module = ModuleType( deprecated_module_name, f"Deprecated use {new_module_name!r} instead.\n\n{new_module.__doc__}", @@ -739,6 +762,8 @@ def warn_getattr(attribute_name: str): deprecated_qual_name=f"{deprecated_module_name}.{attribute_name}", new_qual_name=f"{new_module_name}.{attribute_name}", to_be_removed_in_version=to_be_removed_in_version, + module_load_overwrite=module_load_overwrite + and f"{module_load_overwrite}.{attribute_name}", ) raise AttributeError(f"module {deprecated_module_name} has no attribute {attribute_name}") diff --git a/glotaran/deprecation/modules/examples/__init__.py b/glotaran/deprecation/modules/examples/__init__.py new file mode 100644 index 000000000..83f217886 --- /dev/null +++ b/glotaran/deprecation/modules/examples/__init__.py @@ -0,0 +1 @@ +"""Deprecation package for 'glotaran.examples'.""" diff --git a/glotaran/deprecation/modules/examples/sequential.py b/glotaran/deprecation/modules/examples/sequential.py new file mode 100644 index 000000000..80507747b --- /dev/null +++ b/glotaran/deprecation/modules/examples/sequential.py @@ -0,0 +1,36 @@ +"""Deprecated functionality export for 'glotaran.examples.sequential'.""" + +sequential_deprecation_mapping = { + "sim_model": "SIMULATION_MODEL", + "dataset": "DATASET", + "model": "MODEL", + "scheme": "SCHEME", +} +shared_deprecation_mapping = { + "wanted_parameter": "SIMULATION_PARAMETER", + "parameter": "PARAMETER", + "_time": "TIME_AXIS", + "_spectral": "SPECTRAL_AXIS", +} + + +def __getattr__(attribute_name: str): + from glotaran.deprecation.deprecation_utils import deprecate_module_attribute + + for deprecated, new in shared_deprecation_mapping.items(): + if attribute_name == deprecated: + return deprecate_module_attribute( + deprecated_qual_name=f"glotaran.examples.sequential.{deprecated}", + new_qual_name=f"glotaran.testing.simulated_data.shared_decay.{new}", + to_be_removed_in_version="0.8.0", + ) + + for deprecated, new in sequential_deprecation_mapping.items(): + if attribute_name == deprecated: + return deprecate_module_attribute( + deprecated_qual_name=f"glotaran.examples.sequential.{deprecated}", + new_qual_name=f"glotaran.testing.simulated_data.sequential_spectral_decay.{new}", + to_be_removed_in_version="0.8.0", + ) + + raise AttributeError(f"module {__name__} has no attribute {attribute_name}") diff --git a/glotaran/deprecation/modules/test/test_changed_imports.py b/glotaran/deprecation/modules/test/test_changed_imports.py index 2c7673bd7..44c5fd221 100644 --- a/glotaran/deprecation/modules/test/test_changed_imports.py +++ b/glotaran/deprecation/modules/test/test_changed_imports.py @@ -130,3 +130,17 @@ def test_io_read_data_file(recwarn: WarningsRecorder): result = changed_import_test_warn(recwarn, "glotaran.io", attribute_name="read_data_file") assert result.__code__ == load_dataset.__code__ + + +@pytest.mark.parametrize( + "attribute_name", ("sim_model", "dataset", "model", "scheme", "wanted_parameter", "parameter") +) +def test_examples_sequential(recwarn: WarningsRecorder, attribute_name: str): + """glotaran.examples.sequential exported addributes""" + from glotaran.examples import sequential # noqa: F401 + + recwarn.clear() + + changed_import_test_warn( + recwarn, "glotaran.examples.sequential", attribute_name=attribute_name + ) diff --git a/glotaran/deprecation/modules/test/test_model_model.py b/glotaran/deprecation/modules/test/test_model_model.py index ce5cd5333..70c7ffdc6 100644 --- a/glotaran/deprecation/modules/test/test_model_model.py +++ b/glotaran/deprecation/modules/test/test_model_model.py @@ -1,28 +1,13 @@ """Tests for deprecated methods in ``glotaran.model.model``.""" from __future__ import annotations -from typing import TYPE_CHECKING - import pytest from glotaran.deprecation.deprecation_utils import GlotaranDeprectedApiError -from glotaran.testing.model_generators import SimpleModelGenerator - -if TYPE_CHECKING: - from glotaran.model import Model - - -@pytest.fixture(scope="module") -def dummy_model() -> Model: - """Minimal model instance for testing.""" - generator = SimpleModelGenerator( - rates=[300e-3], - k_matrix="parallel", - ) - return generator.model +from glotaran.testing.simulated_data.parallel_spectral_decay import MODEL as dummy_model -def test_model_model_dimension(dummy_model: Model): +def test_model_model_dimension(): """Raise ``GlotaranApiDeprecationWarning``.""" expected = ( "Usage of 'Model.model_dimension' was deprecated, " @@ -38,7 +23,7 @@ def test_model_model_dimension(dummy_model: Model): assert str(excinfo.value) == expected -def test_model_global_dimension(dummy_model: Model): +def test_model_global_dimension(): """Raise ``GlotaranApiDeprecationWarning``.""" expected = ( "Usage of 'Model.global_dimension' was deprecated, " diff --git a/glotaran/deprecation/modules/test/test_parameter_parameter_group.py b/glotaran/deprecation/modules/test/test_parameter_parameter_group.py index e13777338..994d4676c 100644 --- a/glotaran/deprecation/modules/test/test_parameter_parameter_group.py +++ b/glotaran/deprecation/modules/test/test_parameter_parameter_group.py @@ -3,23 +3,21 @@ from textwrap import dedent from glotaran.deprecation.modules.test import deprecation_warning_on_call_test_helper -from glotaran.examples.sequential import parameter +from glotaran.testing.simulated_data.sequential_spectral_decay import PARAMETER def test_parameter_group_to_csv_no_stderr(tmp_path: Path): """``ParameterGroup.to_csv`` raises deprecation warning and saves file.""" parameter_path = tmp_path / "test_parameter.csv" deprecation_warning_on_call_test_helper( - parameter.to_csv, args=[parameter_path.as_posix()], raise_exception=True + PARAMETER.to_csv, args=[parameter_path.as_posix()], raise_exception=True ) expected = dedent( """\ label,value,expression,minimum,maximum,non-negative,vary,standard-error - j.1,1.0,None,-inf,inf,False,False,None - j.0,0.0,None,-inf,inf,False,False,None - kinetic.1,0.5,None,-inf,inf,False,True,None - kinetic.2,0.3,None,-inf,inf,False,True,None - kinetic.3,0.1,None,-inf,inf,False,True,None + rates.species_1,0.5,None,-inf,inf,False,True,None + rates.species_2,0.3,None,-inf,inf,False,True,None + rates.species_3,0.1,None,-inf,inf,False,True,None irf.center,0.3,None,-inf,inf,False,True,None irf.width,0.1,None,-inf,inf,False,True,None """ diff --git a/glotaran/deprecation/modules/test/test_project_result.py b/glotaran/deprecation/modules/test/test_project_result.py index 0e1e34d9c..ddc19a233 100644 --- a/glotaran/deprecation/modules/test/test_project_result.py +++ b/glotaran/deprecation/modules/test/test_project_result.py @@ -1,29 +1,32 @@ """Test deprecated functionality in 'glotaran.project.result'.""" from __future__ import annotations -from typing import TYPE_CHECKING - import pytest +from glotaran.analysis.optimize import optimize from glotaran.deprecation.modules.test import deprecation_warning_on_call_test_helper -from glotaran.project.test.test_result import dummy_result # noqa: F401 +from glotaran.project.result import Result +from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME -if TYPE_CHECKING: - from glotaran.project.result import Result +@pytest.fixture(scope="session") +def dummy_result(): + """Dummy result for testing.""" + print(SCHEME.data["dataset_1"]) + yield optimize(SCHEME, raise_exception=True) -def test_Result_get_dataset_method(dummy_result: Result): # noqa: F811 +def test_result_get_dataset_method(dummy_result: Result): """Result.get_dataset(dataset_label) gives correct dataset.""" _, result = deprecation_warning_on_call_test_helper( - dummy_result.get_dataset, args=["dataset1"], raise_exception=True + dummy_result.get_dataset, args=["dataset_1"], raise_exception=True ) - assert result == dummy_result.data["dataset1"] + assert result == dummy_result.data["dataset_1"] -def test_Result_get_dataset_method_error(dummy_result: Result): # noqa: F811 +def test_result_get_dataset_method_error(dummy_result: Result): """Result.get_dataset(dataset_label) error on wrong key.""" with pytest.raises(ValueError, match="Unknown dataset 'foo'"): diff --git a/glotaran/deprecation/modules/test/test_project_scheme.py b/glotaran/deprecation/modules/test/test_project_scheme.py index 17a50ada5..9ae30fedf 100644 --- a/glotaran/deprecation/modules/test/test_project_scheme.py +++ b/glotaran/deprecation/modules/test/test_project_scheme.py @@ -1,7 +1,6 @@ """Test deprecated functionality in 'glotaran.project.schmeme'.""" from __future__ import annotations -from functools import lru_cache from typing import TYPE_CHECKING import pytest @@ -9,27 +8,14 @@ from glotaran.deprecation.modules.test import deprecation_warning_on_call_test_helper from glotaran.project.scheme import Scheme -from glotaran.testing.model_generators import SimpleModelGenerator +from glotaran.testing.simulated_data.parallel_spectral_decay import DATASET +from glotaran.testing.simulated_data.parallel_spectral_decay import MODEL +from glotaran.testing.simulated_data.parallel_spectral_decay import PARAMETER if TYPE_CHECKING: from pathlib import Path -@lru_cache(maxsize=1) -def create_test_args(): - """Objects to initialize a ``Scheme`` for testing.""" - generator = SimpleModelGenerator( - rates=[501e-3, 202e-4, 105e-5], - irf={"center": 1.3, "width": 7.8}, - k_matrix="sequential", - ) - model, parameters = generator.model_and_parameters - dataset = xr.DataArray([[1, 2, 3]], coords=[("e", [1]), ("c", [1, 2, 3])]).to_dataset( - name="data" - ) - return model, parameters, dataset - - def test_scheme_from_yaml_file_method(tmp_path: Path): """Create Scheme from file.""" scheme_path = tmp_path / "scheme.yml" @@ -72,7 +58,7 @@ def test_scheme_from_yaml_file_method(tmp_path: Path): def test_scheme_group_tolerance(): """Argument ``group_tolerance`` raises deprecation and maps to ``clp_link_tolerance``.""" - model, parameters, dataset = create_test_args() + model, parameters, dataset = MODEL, PARAMETER, DATASET warnings, result = deprecation_warning_on_call_test_helper( Scheme, @@ -92,7 +78,7 @@ def test_scheme_group_tolerance(): ) def test_scheme_group(group: bool): """Argument ``group`` raises deprecation and maps to ``dataset_groups.default.link_clp``.""" - model, parameters, dataset = create_test_args() + model, parameters, dataset = MODEL, PARAMETER, DATASET warnings, result = deprecation_warning_on_call_test_helper( Scheme, @@ -114,7 +100,7 @@ def test_scheme_non_negative_least_squares(non_negative_least_squares: bool, exp """Argument ``non_negative_least_squares`` raises deprecation and maps to ``dataset_groups.default.residual_function``. """ - model, parameters, dataset = create_test_args() + model, parameters, dataset = MODEL, PARAMETER, DATASET warnings, result = deprecation_warning_on_call_test_helper( Scheme, diff --git a/glotaran/deprecation/test/dummy_package/__init__.py b/glotaran/deprecation/test/dummy_package/__init__.py index 6637d8959..4fb55ef45 100644 --- a/glotaran/deprecation/test/dummy_package/__init__.py +++ b/glotaran/deprecation/test/dummy_package/__init__.py @@ -7,3 +7,9 @@ new_module_name="glotaran.deprecation.deprecation_utils", to_be_removed_in_version="0.6.0", ) +overwritten_module = deprecate_submodule( + deprecated_module_name="glotaran.deprecation.test.dummy_package.overwritten_module", + new_module_name="glotaran.does_not._need_to_exists", + to_be_removed_in_version="0.6.0", + module_load_overwrite="glotaran.deprecation.deprecation_utils", +) diff --git a/glotaran/deprecation/test/dummy_package/deprecated_module_attribute.py b/glotaran/deprecation/test/dummy_package/deprecated_module_attribute.py index 089f5d4c2..83fbca62f 100644 --- a/glotaran/deprecation/test/dummy_package/deprecated_module_attribute.py +++ b/glotaran/deprecation/test/dummy_package/deprecated_module_attribute.py @@ -13,4 +13,12 @@ def __getattr__(attribute_name: str): to_be_removed_in_version="0.6.0", ) + if attribute_name == "foo_bar": + return deprecate_module_attribute( + deprecated_qual_name=("glotaran.deprecation.test.dummy_package.foo_bar"), + new_qual_name="glotaran.does_not._need_to_exists", + to_be_removed_in_version="0.6.0", + module_load_overwrite="glotaran.deprecation.deprecation_utils.parse_version", + ) + raise AttributeError(f"module {__name__} has no attribute {attribute_name}") diff --git a/glotaran/deprecation/test/test_deprecation_utils.py b/glotaran/deprecation/test/test_deprecation_utils.py index 33ecb01c0..64b977dbe 100644 --- a/glotaran/deprecation/test/test_deprecation_utils.py +++ b/glotaran/deprecation/test/test_deprecation_utils.py @@ -503,6 +503,19 @@ def test_deprecate_module_attribute(): assert Path(record[0].filename) == Path(__file__) +@pytest.mark.usefixtures("glotaran_0_3_0") +def test_deprecate_module_attribute_overwrite(): + """Qualname was only used for the warning""" + + with pytest.warns(GlotaranApiDeprecationWarning) as record: + + from glotaran.deprecation.test.dummy_package.deprecated_module_attribute import foo_bar + + assert foo_bar.__code__ == parse_version.__code__ + assert Path(record[0].filename) == Path(__file__) + assert "glotaran.does_not._need_to_exists" in str(record[0].message) + + @pytest.mark.usefixtures("glotaran_0_3_0") def test_deprecate_submodule(recwarn: WarningsRecorder): """Raise warning when Attribute of fake module is used""" @@ -531,6 +544,20 @@ def test_deprecate_submodule_from_import(recwarn: WarningsRecorder): assert Path(recwarn[0].filename) == Path(__file__) +@pytest.mark.usefixtures("glotaran_0_3_0") +def test_deprecate_submodule_from_import_overwrite(recwarn: WarningsRecorder): + """Qualname was only used for the warning""" + + from glotaran.deprecation.test.dummy_package.overwritten_module import ( # noqa: F401 + parse_version, + ) + + assert len(recwarn) == 1 + assert recwarn[0].category == GlotaranApiDeprecationWarning + assert Path(recwarn[0].filename) == Path(__file__) + assert "glotaran.does_not._need_to_exists" in str(recwarn[0].message) + + @pytest.mark.usefixtures("glotaran_0_3_0") def test_deprecate_submodule_import_error(recwarn: WarningsRecorder): """Raise warning when Attribute of fake module is imported""" diff --git a/glotaran/examples/__init__.py b/glotaran/examples/__init__.py deleted file mode 100644 index 157c7326a..000000000 --- a/glotaran/examples/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from glotaran.examples import sequential diff --git a/glotaran/examples/sequential.py b/glotaran/examples/sequential.py deleted file mode 100644 index cf7275b68..000000000 --- a/glotaran/examples/sequential.py +++ /dev/null @@ -1,152 +0,0 @@ -import numpy as np - -from glotaran.analysis.simulation import simulate -from glotaran.builtin.megacomplexes.decay import DecayMegacomplex -from glotaran.builtin.megacomplexes.spectral import SpectralMegacomplex -from glotaran.model import Model -from glotaran.parameter import ParameterGroup -from glotaran.project import Scheme - -sim_model = Model.from_dict( - { - "initial_concentration": { - "j1": { - "compartments": ["s1", "s2", "s3"], - "parameters": ["j.1", "j.0", "j.0"], - }, - }, - "k_matrix": { - "k1": { - "matrix": { - ("s2", "s1"): "kinetic.1", - ("s3", "s2"): "kinetic.2", - ("s3", "s3"): "kinetic.3", - } - } - }, - "megacomplex": { - "m1": { - "type": "decay", - "k_matrix": ["k1"], - }, - "m2": { - "type": "spectral", - "shape": { - "s1": "sh1", - "s2": "sh2", - "s3": "sh3", - }, - }, - }, - "shape": { - "sh1": { - "type": "gaussian", - "amplitude": "shapes.amps.1", - "location": "shapes.locs.1", - "width": "shapes.width.1", - }, - "sh2": { - "type": "gaussian", - "amplitude": "shapes.amps.2", - "location": "shapes.locs.2", - "width": "shapes.width.2", - }, - "sh3": { - "type": "gaussian", - "amplitude": "shapes.amps.3", - "location": "shapes.locs.3", - "width": "shapes.width.3", - }, - }, - "irf": { - "irf1": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, - }, - "dataset": { - "dataset1": { - "initial_concentration": "j1", - "megacomplex": ["m1"], - "global_megacomplex": ["m2"], - "irf": "irf1", - } - }, - }, - megacomplex_types={"decay": DecayMegacomplex, "spectral": SpectralMegacomplex}, -) - -wanted_parameter = ParameterGroup.from_dict( - { - "j": [ - ["1", 1, {"non-negative": False, "vary": False}], - ["0", 0, {"non-negative": False, "vary": False}], - ], - "kinetic": [ - ["1", 0.5], - ["2", 0.3], - ["3", 0.1], - ], - "shapes": {"amps": [30, 20, 40], "locs": [620, 630, 650], "width": [40, 20, 60]}, - "irf": [["center", 0.3], ["width", 0.1]], - } -) - -parameter = ParameterGroup.from_dict( - { - "j": [ - ["1", 1, {"vary": False, "non-negative": False}], - ["0", 0, {"vary": False, "non-negative": False}], - ], - "kinetic": [ - ["1", 0.5], - ["2", 0.3], - ["3", 0.1], - ], - "irf": [["center", 0.3], ["width", 0.1]], - } -) - -_time = np.arange(-1, 20, 0.01) -_spectral = np.arange(600, 700, 1.4) - -dataset = simulate( - sim_model, - "dataset1", - wanted_parameter, - {"time": _time, "spectral": _spectral}, - noise=True, - noise_std_dev=1e-2, -) - -model = Model.from_dict( - { - "initial_concentration": { - "j1": {"compartments": ["s1", "s2", "s3"], "parameters": ["j.1", "j.0", "j.0"]}, - }, - "k_matrix": { - "k1": { - "matrix": { - ("s2", "s1"): "kinetic.1", - ("s3", "s2"): "kinetic.2", - ("s3", "s3"): "kinetic.3", - } - } - }, - "megacomplex": { - "m1": { - "type": "decay", - "k_matrix": ["k1"], - } - }, - "irf": { - "irf1": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, - }, - "dataset": { - "dataset1": { - "initial_concentration": "j1", - "megacomplex": ["m1"], - "irf": "irf1", - } - }, - }, - megacomplex_types={"decay": DecayMegacomplex}, -) -scheme = Scheme(model=model, parameters=parameter, data={"dataset1": dataset}) diff --git a/glotaran/examples/test/test_example.py b/glotaran/examples/test/test_example.py deleted file mode 100644 index 7bd6e74a4..000000000 --- a/glotaran/examples/test/test_example.py +++ /dev/null @@ -1,7 +0,0 @@ -import xarray as xr - -from glotaran.examples.sequential import dataset - - -def test_dataset(): - assert isinstance(dataset, xr.Dataset) diff --git a/glotaran/model/test/test_model.py b/glotaran/model/test/test_model.py index 6ea35fd95..3ae010e5c 100644 --- a/glotaran/model/test/test_model.py +++ b/glotaran/model/test/test_model.py @@ -1,3 +1,4 @@ +from copy import copy from math import inf from math import nan from textwrap import dedent @@ -23,7 +24,7 @@ from glotaran.model.weight import Weight from glotaran.parameter import Parameter from glotaran.parameter import ParameterGroup -from glotaran.testing.model_generators import SimpleModelGenerator +from glotaran.testing.simulated_data.parallel_spectral_decay import MODEL @model_item( @@ -491,16 +492,11 @@ def test_only_constraint(): def test_model_markdown(): """Full markdown string is as expected.""" - model = SimpleModelGenerator( - rates=[501e-3, 202e-4, 105e-5, {"non-negative": True}], - irf={"center": 1.3, "width": 7.8}, - k_matrix="sequential", - ) expected = dedent( """\ # Model - _Megacomplex Types_: decay + _Megacomplex Types_: decay-parallel ## Dataset Groups @@ -509,69 +505,50 @@ def test_model_markdown(): * *residual_function*: variable_projection * *link_clp*: None - ## K Matrix - - * **k1**: - * *Label*: k1 - * *Matrix*: - * ('s2', 's1'): rates.1(5.01e-01) - * ('s3', 's2'): rates.2(2.02e-02) - * ('s3', 's3'): rates.3(1.05e-03) - - - ## Initial Concentration - - * **j1**: - * *Label*: j1 - * *Compartments*: - * s1 - * s2 - * s3 - * *Parameters*: - * inputs.1(1.00e+00, fixed) - * inputs.0(0.00e+00, fixed) - * inputs.0(0.00e+00, fixed) - * *Exclude From Normalize*: - - ## Irf - * **irf1** (multi-gaussian): - * *Label*: irf1 - * *Type*: multi-gaussian - * *Center*: - * irf.center(1.30e+00) - * *Width*: - * irf.width(7.80e+00) + * **gaussian_irf** (gaussian): + * *Label*: gaussian_irf + * *Type*: gaussian + * *Center*: irf.center(nan) + * *Width*: irf.width(nan) * *Normalize*: True * *Backsweep*: False ## Megacomplex - * **mc1** (None): - * *Label*: mc1 + * **megacomplex_parallel_decay** (decay-parallel): + * *Label*: megacomplex_parallel_decay + * *Type*: decay-parallel + * *Compartments*: + * species_1 + * species_2 + * species_3 + * *Rates*: + * rates.species_1(nan) + * rates.species_2(nan) + * rates.species_3(nan) * *Dimension*: time - * *K Matrix*: - * k1 ## Dataset - * **dataset1**: - * *Label*: dataset1 + * **dataset_1**: + * *Label*: dataset_1 * *Group*: default * *Megacomplex*: - * mc1 - * *Initial Concentration*: j1 - * *Irf*: irf1 + * megacomplex_parallel_decay + * *Irf*: gaussian_irf """ ) + model = copy(MODEL) + model.dataset_group_models["default"].link_clp = None # Preprocessing to remove trailing whitespace after '* *Matrix*:' - result = "\n".join([line.rstrip(" ") for line in str(model.markdown()).split("\n")]) + result = "\n".join([line.rstrip(" ") for line in str(MODEL.markdown()).split("\n")]) print(result) assert result == expected diff --git a/glotaran/project/generators/__init__.py b/glotaran/project/generators/__init__.py new file mode 100644 index 000000000..4b130bdd7 --- /dev/null +++ b/glotaran/project/generators/__init__.py @@ -0,0 +1,4 @@ +"""The glotaran generator package.""" + +from glotaran.project.generators.generator import generate_model +from glotaran.project.generators.generator import generate_model_yml diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py new file mode 100644 index 000000000..f36d91735 --- /dev/null +++ b/glotaran/project/generators/generator.py @@ -0,0 +1,258 @@ +"""The glotaran generator module.""" +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import TypedDict +from typing import cast + +from glotaran.builtin.io.yml.yml import write_dict +from glotaran.model import Model + + +def _generate_decay_model( + *, nr_compartments: int, irf: bool, spectral: bool, decay_type: str +) -> dict[str, Any]: + """Generate a decay model dictionary. + + Parameters + ---------- + nr_compartments : int + The number of compartments. + irf : bool + Whether to add a gaussian irf. + spectral : bool + Whether to add a spectral model. + decay_type : str + The dype of the decay + + Returns + ------- + dict[str, Any] : + The generated model dictionary. + """ + compartments = [f"species_{i+1}" for i in range(nr_compartments)] + rates = [f"rates.species_{i+1}" for i in range(nr_compartments)] + model = { + "megacomplex": { + f"megacomplex_{decay_type}_decay": { + "type": f"decay-{decay_type}", + "compartments": compartments, + "rates": rates, + }, + }, + "dataset": {"dataset_1": {"megacomplex": [f"megacomplex_{decay_type}_decay"]}}, + } + if spectral: + model["megacomplex"]["megacomplex_spectral"] = { # type:ignore[index] + "type": "spectral", + "shape": { + compartment: f"shape_species_{i+1}" for i, compartment in enumerate(compartments) + }, + } + model["shape"] = { + f"shape_species_{i+1}": { + "type": "gaussian", + "amplitude": f"shapes.species_{i+1}.amplitude", + "location": f"shapes.species_{i+1}.location", + "width": f"shapes.species_{i+1}.width", + } + for i in range(nr_compartments) + } + model["dataset"]["dataset_1"]["global_megacomplex"] = [ # type:ignore[index] + "megacomplex_spectral" + ] + if irf: + model["dataset"]["dataset_1"]["irf"] = "gaussian_irf" # type:ignore[index] + model["irf"] = { + "gaussian_irf": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, + } + return model + + +def generate_parallel_decay_model( + *, nr_compartments: int = 1, irf: bool = False +) -> dict[str, Any]: + """Generate a parallel decay model dictionary. + + Parameters + ---------- + nr_compartments : int + The number of compartments. + irf : bool + Whether to add a gaussian irf. + + Returns + ------- + dict[str, Any] : + The generated model dictionary. + """ + return _generate_decay_model( + nr_compartments=nr_compartments, irf=irf, spectral=False, decay_type="parallel" + ) + + +def generate_parallel_spectral_decay_model( + *, nr_compartments: int = 1, irf: bool = False +) -> dict[str, Any]: + """Generate a parallel spectral decay model dictionary. + + Parameters + ---------- + nr_compartments : int + The number of compartments. + irf : bool + Whether to add a gaussian irf. + + Returns + ------- + dict[str, Any] : + The generated model dictionary. + """ + return _generate_decay_model( + nr_compartments=nr_compartments, irf=irf, spectral=True, decay_type="parallel" + ) + + +def generate_sequential_decay_model(nr_compartments: int = 1, irf: bool = False) -> dict[str, Any]: + """Generate a sequential decay model dictionary. + + Parameters + ---------- + nr_compartments : int + The number of compartments. + irf : bool + Whether to add a gaussian irf. + + Returns + ------- + dict[str, Any] : + The generated model dictionary. + """ + return _generate_decay_model( + nr_compartments=nr_compartments, irf=irf, spectral=False, decay_type="sequential" + ) + + +def generate_sequential_spectral_decay_model( + *, nr_compartments: int = 1, irf: bool = False +) -> dict[str, Any]: + """Generate a sequential spectral decay model dictionary. + + Parameters + ---------- + nr_compartments : int + The number of compartments. + irf : bool + Whether to add a gaussian irf. + + Returns + ------- + dict[str, Any] : + The generated model dictionary. + """ + return _generate_decay_model( + nr_compartments=nr_compartments, irf=irf, spectral=True, decay_type="sequential" + ) + + +generators: dict[str, Callable] = { + "decay_parallel": generate_parallel_decay_model, + "spectral_decay_parallel": generate_parallel_spectral_decay_model, + "decay_sequential": generate_sequential_decay_model, + "spectral_decay_sequential": generate_sequential_spectral_decay_model, +} + +available_generators: list[str] = list(generators.keys()) + + +class GeneratorArguments(TypedDict, total=False): + """Arguments used by ``generate_model`` and ``generate_model``. + + Parameters + ---------- + nr_compartments : int + The number of compartments. + irf : bool + Whether to add a gaussian irf. + + See Also + -------- + generate_model + generate_model_yml + """ + + nr_compartments: int + irf: bool + + +def generate_model(*, generator_name: str, generator_arguments: GeneratorArguments) -> Model: + """Generate a model. + + Parameters + ---------- + generator_name : str + The generator to use. + generator_arguments : GeneratorArguments + Arguments for the generator. + + Returns + ------- + Model + The generated model + + See Also + -------- + generate_parallel_decay_model + generate_parallel_spectral_decay_model + generate_sequential_decay_model + generate_sequential_spectral_decay_model + + Raises + ------ + ValueError + Raised when an unknown generator is specified. + """ + if generator_name not in generators: + raise ValueError( + f"Unknown model generator '{generator_name}'. " + f"Known generators are: {list(generators.keys())}" + ) + model = generators[generator_name](**generator_arguments) + return Model.from_dict(model) + + +def generate_model_yml(*, generator_name: str, generator_arguments: GeneratorArguments) -> str: + """Generate a model as yml string. + + Parameters + ---------- + generator_name : str + The generator to use. + generator_arguments : GeneratorArguments + Arguments for the generator. + + Returns + ------- + str + The generated model yml string. + + See Also + -------- + generate_parallel_decay_model + generate_parallel_spectral_decay_model + generate_sequential_decay_model + generate_sequential_spectral_decay_model + + Raises + ------ + ValueError + Raised when an unknown generator is specified. + """ + if generator_name not in generators: + raise ValueError( + f"Unknown model generator '{generator_name}'. " + f"Known generators are: {list(generators.keys())}" + ) + model = generators[generator_name](**generator_arguments) + return cast(str, write_dict(model)) diff --git a/glotaran/project/generators/test/test_genenerate_decay_model.py b/glotaran/project/generators/test/test_genenerate_decay_model.py new file mode 100644 index 000000000..a301d0a52 --- /dev/null +++ b/glotaran/project/generators/test/test_genenerate_decay_model.py @@ -0,0 +1,71 @@ +import pytest + +from glotaran.project.generators.generator import generate_model + + +@pytest.mark.parametrize("megacomplex_type", ["parallel", "sequential"]) +@pytest.mark.parametrize("irf", [True, False]) +@pytest.mark.parametrize("spectral", [True, False]) +def test_generate_parallel_model(megacomplex_type: str, irf: bool, spectral: bool): + nr_compartments = 5 + expected_compartments = [f"species_{i+1}" for i in range(nr_compartments)] + model_type = f"spectral_decay_{megacomplex_type}" if spectral else f"decay_{megacomplex_type}" + model = generate_model( + generator_name=model_type, + generator_arguments={ + "nr_compartments": nr_compartments, + "irf": irf, + }, + ) + print(model) + + assert ( + f"megacomplex_{megacomplex_type}_decay" in model.megacomplex # type:ignore[attr-defined] + ) + megacomplex = model.megacomplex[ # type:ignore[attr-defined] + f"megacomplex_{megacomplex_type}_decay" + ] + assert megacomplex.type == f"decay-{megacomplex_type}" + assert megacomplex.compartments == expected_compartments + assert [r.full_label for r in megacomplex.rates] == [ + f"rates.species_{i+1}" for i in range(nr_compartments) + ] + + assert "dataset_1" in model.dataset # type:ignore[attr-defined] + dataset = model.dataset["dataset_1"] # type:ignore[attr-defined] + assert dataset.megacomplex == [f"megacomplex_{megacomplex_type}_decay"] + + if spectral: + assert "megacomplex_spectral" in model.megacomplex # type:ignore[attr-defined] + megacomplex = model.megacomplex["megacomplex_spectral"] # type:ignore[attr-defined] + assert expected_compartments == list(megacomplex.shape.keys()) + expected_shapes = [f"shape_species_{i+1}" for i in range(nr_compartments)] + assert expected_shapes == list(megacomplex.shape.values()) + + for i, shape in enumerate(expected_shapes): + assert shape in model.shape # type:ignore[attr-defined] + assert model.shape[shape].type == "gaussian" # type:ignore[attr-defined] + assert ( + model.shape[shape].amplitude.full_label # type:ignore[attr-defined] + == f"shapes.species_{i+1}.amplitude" + ) + assert ( + model.shape[shape].location.full_label # type:ignore[attr-defined] + == f"shapes.species_{i+1}.location" + ) + assert ( + model.shape[shape].width.full_label # type:ignore[attr-defined] + == f"shapes.species_{i+1}.width" + ) + assert dataset.global_megacomplex == ["megacomplex_spectral"] + + if irf: + assert dataset.irf == "gaussian_irf" + assert "gaussian_irf" in model.irf # type:ignore[attr-defined] + assert ( + model.irf["gaussian_irf"].center.full_label # type:ignore[attr-defined] + == "irf.center" + ) + assert ( + model.irf["gaussian_irf"].width.full_label == "irf.width" # type:ignore[attr-defined] + ) diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index 60c27fd37..7f9f5f80e 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -4,36 +4,15 @@ from IPython.core.formatters import format_display_data from glotaran.analysis.optimize import optimize -from glotaran.analysis.simulation import simulate -from glotaran.analysis.test.models import ThreeDatasetDecay as suite -from glotaran.project import Scheme from glotaran.project.result import Result +from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME @pytest.fixture(scope="session") def dummy_result(): """Dummy result for testing.""" - - wanted_parameters = suite.wanted_parameters - data = {} - for i in range(3): - global_axis = getattr(suite, "global_axis" if i == 0 else f"global_axis{i+1}") - model_axis = getattr(suite, "model_axis" if i == 0 else f"model_axis{i+1}") - - data[f"dataset{i+1}"] = simulate( - suite.sim_model, - f"dataset{i+1}", - wanted_parameters, - {"global": global_axis, "model": model_axis}, - ) - scheme = Scheme( - model=suite.model, - parameters=suite.initial_parameters, - data=data, - maximum_number_function_evaluations=1, - ) - - yield optimize(scheme) + print(SCHEME.data["dataset_1"]) + yield optimize(SCHEME, raise_exception=True) def test_result_ipython_rendering(dummy_result: Result): diff --git a/glotaran/testing/model_generators.py b/glotaran/testing/model_generators.py deleted file mode 100644 index eeb9d638a..000000000 --- a/glotaran/testing/model_generators.py +++ /dev/null @@ -1,296 +0,0 @@ -"""Model generators used to generate simple models from a set of inputs.""" - -from __future__ import annotations - -from dataclasses import dataclass -from dataclasses import field -from typing import TYPE_CHECKING -from typing import Literal - -from glotaran.model import Model -from glotaran.parameter.parameter_group import ParameterGroup - -if TYPE_CHECKING: - from glotaran.utils.ipython import MarkdownStr - - -def _split_iterable_in_non_dict_and_dict_items( - input_list: list[float, dict[str, bool | float]], -) -> tuple[list[float], list[dict[str, bool | float]]]: - """Split an iterable (list) into non-dict and dict items. - - Parameters - ---------- - input_list : list[float, dict[str, bool | float]] - A list of values of type `float` and a dict with parameter options, e.g. - `[1, 2, 3, {"vary": False, "non-negative": True}]` - - Returns - ------- - tuple[list[float], list[dict[str, bool | float]]] - Split a list into non-dict (`values`) and dict items (`defaults`), - return a tuple (`values`, `defaults`) - """ - values: list = [val for val in input_list if not isinstance(val, dict)] - defaults: list = [val for val in input_list if isinstance(val, dict)] - return values, defaults - - -@dataclass -class SimpleModelGenerator: - """A minimal boilerplate model and parameters generator. - - Generates a model (together with the parameters specification) based on - parameter input values assigned to the generator's attributes - """ - - rates: list[float] = field(default_factory=list) - """A list of values representing decay rates""" - k_matrix: Literal["parallel", "sequential"] | dict[tuple[str, str], str] = "parallel" - """"A `dict` with a k_matrix specification or `Literal["parallel", "sequential"]`""" - compartments: list[str] | None = None - """A list of compartment names""" - irf: dict[str, float] = field(default_factory=dict) - """A dict of items specifying an irf""" - initial_concentration: list[float] = field(default_factory=list) - """A list values representing the initial concentration""" - dispersion_coefficients: list[float] = field(default_factory=list) - """A list of values representing the dispersion coefficients""" - dispersion_center: float | None = None - """A value representing the dispersion center""" - default_megacomplex: str = "decay" - """The default_megacomplex identifier""" - # TODO: add support for a spectral model: - # shapes: list[float] = field(default_factory=list, init=False) - - @property - def valid(self) -> bool: - """Check if the generator state is valid. - - Returns - ------- - bool - Generator state obtained by calling the generated model's - `valid` function with the generated parameters as input. - """ - try: - return self.model.valid(parameters=self.parameters) - except ValueError: - return False - - def validate(self) -> str: - """Call `validate` on the generated model and return its output. - - Returns - ------- - str - A string listing problems in the generated model and parameters if any. - """ - return self.model.validate(parameters=self.parameters) - - @property - def model(self) -> Model: - """Return the generated model. - - Returns - ------- - Model - The generated model of type :class:`glotaran.model.Model`. - """ - return Model.from_dict(self.model_dict) - - @property - def model_dict(self) -> dict: - """Return a dict representation of the generated model. - - Returns - ------- - dict - A dict representation of the generated model. - """ - return self._model_dict() - - @property - def parameters(self) -> ParameterGroup: - """Return the generated parameters of type :class:`glotaran.parameter.ParameterGroup`. - - Returns - ------- - ParameterGroup - The generated parameters of type of type :class:`glotaran.parameter.ParameterGroup`. - """ - return ParameterGroup.from_dict(self.parameters_dict) - - @property - def parameters_dict(self) -> dict: - """Return a dict representation of the generated parameters. - - Returns - ------- - dict - A dict representing the generated parameters. - """ - return self._parameters_dict() - - @property - def model_and_parameters(self) -> tuple[Model, ParameterGroup]: - """Return generated model and parameters. - - Returns - ------- - tuple[Model, ParameterGroup] - A model of type :class:`glotaran.model.Model` and - and parameters of type :class:`glotaran.parameter.ParameterGroup`. - """ - return self.model, self.parameters - - @property - def _rates(self) -> tuple[list[float], list[dict[str, bool | float]]]: - """Validate input to rates, return a tuple of rates and parameter defaults. - - Returns - ------- - tuple[list[float], list[dict[str, bool | float]]] - A tuple of a list of rates and a dict containing parameter defaults - - Raises - ------ - ValueError - Raised if rates is not a list of at least one number. - """ - if not isinstance(self.rates, list): - raise ValueError(f"generator.rates: must be a `list`, got: {self.rates}") - if len(self.rates) == 0: - raise ValueError("generator.rates: must be a `list` with 1 or more rates") - if not isinstance(self.rates[0], (int, float)): - raise ValueError(f"generator.rates: 1st element must be numeric, got: {self.rates[0]}") - return _split_iterable_in_non_dict_and_dict_items(self.rates) - - def _parameters_dict_items(self) -> dict: - """Return a dict with items used in constructing the parameters. - - Returns - ------- - dict - A dict with items used in constructing a parameters dict. - """ - rates, rates_defaults = self._rates - items = {"rates": rates} - if rates_defaults: - items["rates_defaults"] = rates_defaults[0] - items["irf"] = [[key, value] for key, value in self.irf.items()] - if self.initial_concentration: - items["inputs"] = self.initial_concentration - elif self.k_matrix == "parallel": - items["inputs"] = [ - ["1", 1], - {"vary": False}, - ] - elif self.k_matrix == "sequential": - items["inputs"] = [ - ["1", 1], - ["0", 0], - {"vary": False}, - ] - return items - - def _model_dict_items(self) -> dict: - """Return a dict with items used in constructing the model. - - Returns - ------- - dict - A dict with items used in constructing a model dict. - """ - rates, _ = self._rates - nr = len(rates) - indices = list(range(1, 1 + nr)) - items = {"default_megacomplex": self.default_megacomplex} - if self.irf: - items["irf"] = { - "type": "multi-gaussian", - "center": ["irf.center"], - "width": ["irf.width"], - } - if isinstance(self.k_matrix, dict): - items["k_matrix"] = self.k_matrix - items["input_parameters"] = [f"inputs.{i}" for i in indices] - items["compartments"] = [f"s{i}" for i in indices] - # TODO: get unique compartments from user defined k_matrix - if self.k_matrix == "parallel": - items["input_parameters"] = ["inputs.1"] * nr - items["k_matrix"] = {(f"s{i}", f"s{i}"): f"rates.{i}" for i in indices} - elif self.k_matrix == "sequential": - items["input_parameters"] = ["inputs.1"] + ["inputs.0"] * (nr - 1) - items["k_matrix"] = { - (f"s{i if i==nr else i+1}", f"s{i}"): f"rates.{i}" for i in indices - } - - if self.k_matrix in ("parallel", "sequential"): - items["compartments"] = [f"s{i}" for i in indices] - return items - - def _parameters_dict(self) -> dict: - """Return a parameters dict. - - Returns - ------- - dict - A dict that can be passed to the `ParameterGroup` `from_dict` method. - """ - items = self._parameters_dict_items() - rates = items["rates"] - if "rates_defaults" in items: - rates += [items["rates_defaults"]] - result = {"rates": rates} - if items["irf"]: - result["irf"] = items["irf"] - result["inputs"] = items["inputs"] - return result - - def _model_dict(self) -> dict: - """Return a model dict. - - Returns - ------- - dict - A dict that can be passed to the `Model` `from_dict` method. - """ - items = self._model_dict_items() - result = {"default_megacomplex": items["default_megacomplex"]} - result.update( - { - "initial_concentration": { - "j1": { - "compartments": items["compartments"], - "parameters": items["input_parameters"], - }, - }, - "megacomplex": { - "mc1": {"k_matrix": ["k1"]}, - }, - "k_matrix": {"k1": {"matrix": items["k_matrix"]}}, - "dataset": { - "dataset1": { - "initial_concentration": "j1", - "megacomplex": ["mc1"], - }, - }, - } - ) - if "irf" in items: - result["dataset"]["dataset1"].update({"irf": "irf1"}) - result["irf"] = { - "irf1": items["irf"], - } - return result - - def markdown(self) -> MarkdownStr: - """Return a markdown string representation of the generated model and parameters. - - Returns - ------- - MarkdownStr - A markdown string - """ - return self.model.markdown(parameters=self.parameters) diff --git a/glotaran/testing/simulated_data/__init__.py b/glotaran/testing/simulated_data/__init__.py new file mode 100644 index 000000000..cd5566321 --- /dev/null +++ b/glotaran/testing/simulated_data/__init__.py @@ -0,0 +1 @@ +"""Package containing simulated data for testing and quick demos.""" diff --git a/glotaran/testing/simulated_data/parallel_spectral_decay.py b/glotaran/testing/simulated_data/parallel_spectral_decay.py new file mode 100644 index 000000000..fb0874a06 --- /dev/null +++ b/glotaran/testing/simulated_data/parallel_spectral_decay.py @@ -0,0 +1,33 @@ +"""A simple parallel decay for testing purposes.""" + +from glotaran.analysis.simulation import simulate +from glotaran.io import load_model +from glotaran.project import Scheme +from glotaran.project.generators import generate_model_yml +from glotaran.testing.simulated_data.shared_decay import PARAMETER +from glotaran.testing.simulated_data.shared_decay import SIMULATION_COORDINATES +from glotaran.testing.simulated_data.shared_decay import SIMULATION_PARAMETER +from glotaran.testing.simulated_data.shared_decay import * # noqa F403 + +SIMULATION_MODEL_YML = generate_model_yml( + generator_name="spectral_decay_parallel", + generator_arguments={"nr_compartments": 3, "irf": True}, +) +SIMULATION_MODEL = load_model(SIMULATION_MODEL_YML, format_name="yml_str") + +MODEL_YML = generate_model_yml( + generator_name="decay_parallel", + generator_arguments={"nr_compartments": 3, "irf": True}, +) +MODEL = load_model(MODEL_YML, format_name="yml_str") + +DATASET = simulate( + SIMULATION_MODEL, + "dataset_1", + SIMULATION_PARAMETER, + SIMULATION_COORDINATES, + noise=True, + noise_std_dev=1e-2, +) + +SCHEME = Scheme(model=MODEL, parameters=PARAMETER, data={"dataset_1": DATASET}) diff --git a/glotaran/testing/simulated_data/sequential_spectral_decay.py b/glotaran/testing/simulated_data/sequential_spectral_decay.py new file mode 100644 index 000000000..0a75ba917 --- /dev/null +++ b/glotaran/testing/simulated_data/sequential_spectral_decay.py @@ -0,0 +1,34 @@ +"""A simple sequential decay for testing purposes.""" + +from glotaran.analysis.simulation import simulate +from glotaran.io import load_model +from glotaran.project import Scheme +from glotaran.project.generators import generate_model_yml +from glotaran.testing.simulated_data.shared_decay import PARAMETER +from glotaran.testing.simulated_data.shared_decay import SIMULATION_COORDINATES +from glotaran.testing.simulated_data.shared_decay import SIMULATION_PARAMETER +from glotaran.testing.simulated_data.shared_decay import * # noqa F403 + +SIMULATION_MODEL_YML = generate_model_yml( + generator_name="spectral_decay_sequential", + generator_arguments={"nr_compartments": 3, "irf": True}, # type:ignore[arg-type] +) +SIMULATION_MODEL = load_model(SIMULATION_MODEL_YML, format_name="yml_str") + +MODEL_YML = generate_model_yml( + generator_name="decay_sequential", + generator_arguments={"nr_compartments": 3, "irf": True}, # type:ignore[arg-type] +) +MODEL = load_model(MODEL_YML, format_name="yml_str") + + +DATASET = simulate( + SIMULATION_MODEL, + "dataset_1", + SIMULATION_PARAMETER, + SIMULATION_COORDINATES, + noise=True, + noise_std_dev=1e-2, +) + +SCHEME = Scheme(model=MODEL, parameters=PARAMETER, data={"dataset_1": DATASET}) diff --git a/glotaran/testing/simulated_data/shared_decay.py b/glotaran/testing/simulated_data/shared_decay.py new file mode 100644 index 000000000..89e28ae94 --- /dev/null +++ b/glotaran/testing/simulated_data/shared_decay.py @@ -0,0 +1,46 @@ +"""Shared variables for simulated decays.""" +import numpy as np + +from glotaran.io import load_parameters + +SIMULATION_PARAMETER_YML = """ +rates: + - [species_1, 0.5] + - [species_2, 0.3] + - [species_3, 0.1] + +irf: + - [center, 0.3] + - [width, 0.1] + +shapes: + species_1: + - [amplitude, 30] + - [location, 620] + - [width, 40] + species_2: + - [amplitude, 20] + - [location, 630] + - [width, 20] + species_3: + - [amplitude, 60] + - [location, 650] + - [width, 60] +""" +SIMULATION_PARAMETER = load_parameters(SIMULATION_PARAMETER_YML, format_name="yml_str") + +PARAMETER_YML = """ +rates: + - [species_1, 0.5] + - [species_2, 0.3] + - [species_3, 0.1] + +irf: + - [center, 0.3] + - [width, 0.1] +""" +PARAMETER = load_parameters(PARAMETER_YML, format_name="yml_str") + +TIME_AXIS = np.arange(-1, 20, 0.01) +SPECTRAL_AXIS = np.arange(600, 700, 1.4) +SIMULATION_COORDINATES = {"time": TIME_AXIS, "spectral": SPECTRAL_AXIS} diff --git a/glotaran/testing/test/test_example.py b/glotaran/testing/test/test_example.py new file mode 100644 index 000000000..4d472e77d --- /dev/null +++ b/glotaran/testing/test/test_example.py @@ -0,0 +1,9 @@ +import xarray as xr + +from glotaran.testing.simulated_data.parallel_spectral_decay import DATASET as parallel_dataset +from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET as sequential_dataset + + +def test_dataset(): + assert isinstance(parallel_dataset, xr.Dataset) + assert isinstance(sequential_dataset, xr.Dataset) diff --git a/glotaran/testing/test/test_model_generators.py b/glotaran/testing/test/test_model_generators.py deleted file mode 100644 index 8ecafd859..000000000 --- a/glotaran/testing/test/test_model_generators.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations - -from copy import deepcopy - -import pytest -from rich import pretty -from rich import print # pylint: disable=W0622 - -from glotaran.model import Model -from glotaran.parameter import ParameterGroup -from glotaran.testing.model_generators import SimpleModelGenerator - -pretty.install() - - -REF_PARAMETER_DICT = { - "rates": [ - ["1", 501e-3], - ["2", 202e-4], - ["3", 105e-5], - {"non-negative": True}, - ], - "irf": [["center", 1.3], ["width", 7.8]], - "inputs": [ - ["1", 1], - ["0", 0], - {"vary": False}, - ], -} - -REF_MODEL_DICT = { - "default_megacomplex": "decay", - "initial_concentration": { - "j1": { - "compartments": ["s1", "s2", "s3"], - "parameters": ["inputs.1", "inputs.0", "inputs.0"], - }, - }, - "megacomplex": { - "mc1": {"k_matrix": ["k1"]}, - }, - "k_matrix": { - "k1": { - "matrix": { - ("s2", "s1"): "rates.1", - ("s3", "s2"): "rates.2", - ("s3", "s3"): "rates.3", - } - } - }, - "irf": { - "irf1": { - "type": "multi-gaussian", - "center": ["irf.center"], - "width": ["irf.width"], - }, - }, - "dataset": { - "dataset1": { - "initial_concentration": "j1", - "irf": "irf1", - "megacomplex": ["mc1"], - }, - }, -} - - -def simple_diff_between_string(string1, string2): - return "".join(c2 for c1, c2 in zip(string1, string2) if c1 != c2) - - -def test_three_component_sequential_model(): - ref_model = Model.from_dict(deepcopy(REF_MODEL_DICT)) - ref_parameters = ParameterGroup.from_dict(deepcopy(REF_PARAMETER_DICT)) - generator = SimpleModelGenerator( - rates=[501e-3, 202e-4, 105e-5, {"non-negative": True}], - irf={"center": 1.3, "width": 7.8}, - k_matrix="sequential", - ) - for key, _ in REF_PARAMETER_DICT.items(): - assert key in generator.parameters_dict - # TODO: check contents - - model, parameters = generator.model_and_parameters - assert str(ref_model) == str(model), print( - simple_diff_between_string(str(model), str(ref_model)) - ) - assert str(ref_parameters) == str(parameters), print( - simple_diff_between_string(str(parameters), str(ref_parameters)) - ) - - -def test_only_rates_no_irf(): - generator = SimpleModelGenerator(rates=[0.1, 0.02, 0.003]) - assert "irf" not in generator.model_dict.keys() - - -def test_no_rates(): - generator = SimpleModelGenerator() - assert generator.valid is False - - -def test_one_rate(): - generator = SimpleModelGenerator([1]) - assert generator.valid is True - assert "is valid" in generator.validate() - - -def test_rates_not_a_list(): - generator = SimpleModelGenerator(1) - assert generator.valid is False - with pytest.raises(ValueError): - print(generator.validate()) - - -def test_set_rates_delayed(): - generator = SimpleModelGenerator() - generator.rates = [1, 2, 3] - assert generator.valid is True