From 0e8f11386771ff6c87744b8f48ed4fa2e3648728 Mon Sep 17 00:00:00 2001 From: Joern Weissenborn Date: Sun, 8 Aug 2021 23:59:30 +0200 Subject: [PATCH 01/32] Added basic project class for experimenting --- glotaran/project/generators/__init__.py | 0 glotaran/project/generators/generator.py | 31 +++++++++ glotaran/project/project.py | 80 ++++++++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 glotaran/project/generators/__init__.py create mode 100644 glotaran/project/generators/generator.py create mode 100644 glotaran/project/project.py diff --git a/glotaran/project/generators/__init__.py b/glotaran/project/generators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py new file mode 100644 index 000000000..d03d2340d --- /dev/null +++ b/glotaran/project/generators/generator.py @@ -0,0 +1,31 @@ +from __future__ import annotations + + +def generate_parallel_model(nr_species: int = 1): + species = [f"species_{i}" for i in range(nr_species)] + initial_concentration_parameters = [f"intital_concentration{i}" for i in range(nr_species)] + k_matrix = {f"(species_{i}, species_{i})": f"decay_species_{i}" for i in range(nr_species)} + return { + "initial_concentration": { + "initial_concentration_dataset_1": { + "compartments": species, + "parameters": initial_concentration_parameters, + }, + }, + "k_matrix": {"k_matrix_parallel": {"matrix": k_matrix}}, + "megacomplex": { + "megacomplex_parallel_decay": { + "type": "decay", + "k_matrix": ["k_matrix_parallel"], + }, + }, + "dataset": { + "dataset_1": { + "initial_concentration": "initial_concentration_dataset_1", + "megacomplex": ["megacomplex_parallel_decay"], + } + }, + } + + +generators = {"decay_parallel": generate_parallel_model} diff --git a/glotaran/project/project.py b/glotaran/project/project.py new file mode 100644 index 000000000..47455dbb6 --- /dev/null +++ b/glotaran/project/project.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from dataclasses import dataclass +from os import getcwd +from os import mkdir +from pathlib import Path +from typing import List +from typing import Literal + +from yaml import dump +from yaml import load + +from glotaran import __version__ as gta_version +from glotaran.io import load_model +from glotaran.project.generators.generator import generators + +TEMPLATE = """version: {gta_version} + +name: {name} +""" + + +@dataclass +class Project: + """A project represents a projectfolder on disk which contains a project file. + + A projectfile is a file in `yml` format with name `project.gta` + + """ + + folder: str | Path + name: str + version: str + + def __post_init__(self): + if isinstance(self.folder, str): + self.folder = Path(self.folder).parent + pass + + @classmethod + def create(cls, name: str | None = None): + project_folder = Path(getcwd()) + name = name if name else project_folder.name + project_file = project_folder / "project.gta" + with open(project_file, "w") as f: + f.write(TEMPLATE.format(gta_version=gta_version, name=name)) + + with open(project_file) as f: + project_dict = load(f) + project_dict["folder"] = project_folder + print("ass", project_file) + return cls(**project_dict) + + @property + def model_dir(self) -> Path: + return self.folder / "models/" + + def create_model_dir_if_not_exist(self): + if not self.model_dir.exists(): + mkdir(self.model_dir) + + def models(self): + if not self.model_dir.exists(): + return {} + # print(model_file) + return {model_file.name: load_model(model_file) for model_file in self.model_dir.iterdir()} + + def has_models(self): + return len(self.models()) != 0 + + def create_model(self, model_type: Literal[list[generators.keys()]] = "decay_parallel"): + self.create_model_dir_if_not_exist() + model = generators[model_type] + with open(self.model_dir / "p_model.yml", "w") as f: + print(model()) + f.write(dump(model())) + + def run(self): + if not self.models: + raise ValueError(f"No models defined for project {self.name}") From 3b68cc6c9800fdeb023b6fe8847c1bd7ca34517e Mon Sep 17 00:00:00 2001 From: Joern Weissenborn Date: Mon, 9 Aug 2021 01:23:12 +0200 Subject: [PATCH 02/32] Basic project idea --- glotaran/model/item.py | 12 ++++++++++ glotaran/model/model.py | 30 +++++++++++++++++------- glotaran/project/generators/generator.py | 4 ++-- glotaran/project/project.py | 9 ++++--- 4 files changed, 42 insertions(+), 13 deletions(-) diff --git a/glotaran/model/item.py b/glotaran/model/item.py index f3133090f..66139b893 100644 --- a/glotaran/model/item.py +++ b/glotaran/model/item.py @@ -124,6 +124,9 @@ def decorator(cls): fill = _create_fill_func(cls) setattr(cls, "fill", fill) + get_parameters = _create_get_parameters(cls) + setattr(cls, "get_parameters", get_parameters) + mprint = _create_mprint_func(cls) setattr(cls, "mprint", mprint) @@ -304,6 +307,15 @@ def fill(self, model: Model, parameters: ParameterGroup) -> cls: return fill +def _create_get_parameters(cls): + @wrap_func_as_method(cls) + def get_parameters(self) -> list[str]: + """Returns all parameter full labels of the item.""" + return [p for p in self._glotaran_properties if p._is_parameter_value] + + return get_parameters + + def _create_get_state_func(cls): @wrap_func_as_method(cls) def get_state(self) -> cls: diff --git a/glotaran/model/model.py b/glotaran/model/model.py index 8d56c509c..c2809917f 100644 --- a/glotaran/model/model.py +++ b/glotaran/model/model.py @@ -96,7 +96,7 @@ def from_dict( # iterate over items for name, items in list(model_dict_local.items()): - if name not in model._model_items: + if name not in model.model_items: warn(f"Unknown model item type '{name}'.") continue @@ -112,7 +112,7 @@ def from_dict( def _add_dict_items(self, name: str, items: dict): for label, item in items.items(): - item_cls = self._model_items[name] + item_cls = self.model_items[name] is_typed = hasattr(item_cls, "_glotaran_model_item_typed") if is_typed: if "type" not in item and item_cls.get_default_type() is None: @@ -130,7 +130,7 @@ def _add_dict_items(self, name: str, items: dict): def _add_list_items(self, name: str, items: list): for item in items: - item_cls = self._model_items[name] + item_cls = self.model_items[name] is_typed = hasattr(item_cls, "_glotaran_model_item_typed") if is_typed: if "type" not in item: @@ -169,14 +169,14 @@ def _add_megacomplex_type(self, megacomplex_type: type[Megacomplex]): self._add_dataset_property(name, prop) def _add_model_item(self, name: str, item: type): - if name in self._model_items: - if self._model_items[name] != item: + if name in self.model_items: + if self.model_items[name] != item: raise ModelError( f"Cannot add item of type {name}. Model item '{name}' was already defined" "as a different type." ) return - self._model_items[name] = item + self.model_items[name] = item if getattr(item, "_glotaran_has_label"): setattr(self, f"{name}", {}) @@ -263,7 +263,7 @@ def problem_list(self, parameters: ParameterGroup = None) -> list[str]: """ problems = [] - for name in self._model_items: + for name in self.model_items: items = getattr(self, name) if isinstance(items, list): for item in items: @@ -308,6 +308,20 @@ def valid(self, parameters: ParameterGroup = None) -> bool: """ return len(self.problem_list(parameters)) == 0 + @property + def parameters(self) -> list[str]: + r = [] + for item_name in self.model_items: + print(item_name) + items = getattr(self, item_name) + if isinstance(items, list): + for item in items: + print(item.get_parameters()) + else: + for item in items.values(): + print(item.get_parameters()) + return r + def markdown( self, parameters: ParameterGroup = None, @@ -338,7 +352,7 @@ def markdown( string += ", ".join(self._megacomplex_types) string += "\n\n" - for name in self._model_items: + for name in self.model_items: items = getattr(self, name) if not items: continue diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py index d03d2340d..7c7b326ad 100644 --- a/glotaran/project/generators/generator.py +++ b/glotaran/project/generators/generator.py @@ -3,8 +3,8 @@ def generate_parallel_model(nr_species: int = 1): species = [f"species_{i}" for i in range(nr_species)] - initial_concentration_parameters = [f"intital_concentration{i}" for i in range(nr_species)] - k_matrix = {f"(species_{i}, species_{i})": f"decay_species_{i}" for i in range(nr_species)} + initial_concentration_parameters = [f"intital_concentration.{i}" for i in range(nr_species)] + k_matrix = {f"(species_{i}, species_{i})": f"decay_species.{i}" for i in range(nr_species)} return { "initial_concentration": { "initial_concentration_dataset_1": { diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 47455dbb6..5d2726973 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -4,7 +4,6 @@ from os import getcwd from os import mkdir from pathlib import Path -from typing import List from typing import Literal from yaml import dump @@ -63,12 +62,16 @@ def models(self): if not self.model_dir.exists(): return {} # print(model_file) - return {model_file.name: load_model(model_file) for model_file in self.model_dir.iterdir()} + return { + model_file.name: load_model(model_file) + for model_file in self.model_dir.iterdir() + if "yml" in model_file + } def has_models(self): return len(self.models()) != 0 - def create_model(self, model_type: Literal[list[generators.keys()]] = "decay_parallel"): + def create_model(self, model_type: Literal[generators.keys()] = "decay_parallel"): self.create_model_dir_if_not_exist() model = generators[model_type] with open(self.model_dir / "p_model.yml", "w") as f: From 90d4fe3740cc478ad7523b1ceadcaffd287bd4d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Thu, 12 Aug 2021 14:46:00 +0200 Subject: [PATCH 03/32] Added test for project create and open --- glotaran/project/project.py | 34 ++++++++++++++++++------ glotaran/project/test/test_project.py | 37 +++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 8 deletions(-) create mode 100644 glotaran/project/test/test_project.py diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 5d2726973..09256989c 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -18,6 +18,8 @@ name: {name} """ +PROJECT_FILE_NAME = "project.gta" + @dataclass class Project: @@ -27,27 +29,44 @@ class Project: """ - folder: str | Path + file: str | Path name: str version: str + folder: str | Path = None + def __post_init__(self): + if isinstance(self.file, str): + self.file = Path(self.file) + if self.folder is None: + self.folder = self.file.parent if isinstance(self.folder, str): - self.folder = Path(self.folder).parent + self.folder = Path(self.folder) pass @classmethod - def create(cls, name: str | None = None): - project_folder = Path(getcwd()) + def create(cls, name: str | None = None, project_folder: str | None = None): + if project_folder is None: + project_folder = getcwd() + project_folder = Path(project_folder) name = name if name else project_folder.name - project_file = project_folder / "project.gta" + project_file = project_folder / PROJECT_FILE_NAME with open(project_file, "w") as f: f.write(TEMPLATE.format(gta_version=gta_version, name=name)) with open(project_file) as f: project_dict = load(f) - project_dict["folder"] = project_folder - print("ass", project_file) + project_dict["file"] = project_folder + return cls(**project_dict) + + @classmethod + def open(cls, project_folder: str): + project_file = Path(project_folder) + if not project_file.match(PROJECT_FILE_NAME): + project_file = project_file / PROJECT_FILE_NAME + with open(project_file) as f: + project_dict = load(f) + project_dict["file"] = project_file return cls(**project_dict) @property @@ -75,7 +94,6 @@ def create_model(self, model_type: Literal[generators.keys()] = "decay_parallel" self.create_model_dir_if_not_exist() model = generators[model_type] with open(self.model_dir / "p_model.yml", "w") as f: - print(model()) f.write(dump(model())) def run(self): diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py new file mode 100644 index 000000000..49291a9a7 --- /dev/null +++ b/glotaran/project/test/test_project.py @@ -0,0 +1,37 @@ +from pathlib import Path + +import pytest + +from glotaran import __version__ as gta_version +from glotaran.project.project import TEMPLATE +from glotaran.project.project import Project + + +@pytest.fixture(scope="module") +def project_folder(tmpdir_factory): + return str(tmpdir_factory.mktemp("test_project")) + + +def test_create(project_folder): + print(project_folder) # noqa T001 + Project.create("testproject", project_folder) + project_file = Path(project_folder) / "project.gta" + assert project_file.exists() + assert project_file.read_text(encoding="utf-8") == TEMPLATE.format( + gta_version=gta_version, name="testproject" + ) + + +def test_open(project_folder): + print(project_folder) # noqa T001 + project_from_folder = Project.open(project_folder) + + project_file = Path(project_folder) / "project.gta" + project_from_file = Project.open(project_file) + + assert project_from_folder == project_from_file + + project = project_from_file + + assert project.name == "testproject" + assert project.version == gta_version From 76f49ad0b6ea057c4e92f72eaf6c3be9d87a73d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Thu, 12 Aug 2021 15:10:10 +0200 Subject: [PATCH 04/32] Added test fparallel model generator --- .../test/test_gen_parallel_model.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 glotaran/project/generators/test/test_gen_parallel_model.py diff --git a/glotaran/project/generators/test/test_gen_parallel_model.py b/glotaran/project/generators/test/test_gen_parallel_model.py new file mode 100644 index 000000000..877900d9b --- /dev/null +++ b/glotaran/project/generators/test/test_gen_parallel_model.py @@ -0,0 +1,33 @@ +from yaml import dump + +from glotaran.io import load_model +from glotaran.project.generators.generator import generate_parallel_model + + +def test_generate_parallel_model(tmpdir_factory): + nr_species = 5 + model_yaml = dump(generate_parallel_model(nr_species)) + print(model_yaml) # noqa T001 + model_file = tmpdir_factory.mktemp("gen_par_model") / "model.yml" + model_file.write_text(model_yaml, encoding="utf-8") + + model = load_model(model_file) + + assert model.valid() + + assert "initial_concentration_dataset_1" in model.initial_concentration + initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] + assert initial_concentration.compartments == [f"species_{i}" for i in range(nr_species)] + for i in range(nr_species): + assert initial_concentration.parameters[i].full_label == f"intital_concentration.{i}" + + assert "k_matrix_parallel" in model.k_matrix + k_matrix = model.k_matrix["k_matrix_parallel"] + for i, (k, v) in enumerate(k_matrix.matrix.items()): + assert k == (f"species_{i}", f"species_{i}") + assert v.full_label == f"decay_species.{i}" + + assert "dataset_1" in model.dataset + dataset = model.dataset["dataset_1"] + assert dataset.initial_concentration == "initial_concentration_dataset_1" + assert dataset.megacomplex == ["megacomplex_parallel_decay"] From bb743cf11d1b5418b706bdc908dddebda0701d46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Thu, 12 Aug 2021 15:40:43 +0200 Subject: [PATCH 05/32] Added test for project model generation --- glotaran/project/generators/generator.py | 2 +- glotaran/project/project.py | 34 +++++++++++++++++------- glotaran/project/test/test_project.py | 24 ++++++++++++++--- 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py index 7c7b326ad..c848f2a95 100644 --- a/glotaran/project/generators/generator.py +++ b/glotaran/project/generators/generator.py @@ -28,4 +28,4 @@ def generate_parallel_model(nr_species: int = 1): } -generators = {"decay_parallel": generate_parallel_model} +generators = {"decay-parallel": generate_parallel_model} diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 09256989c..c752b434b 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -4,6 +4,7 @@ from os import getcwd from os import mkdir from pathlib import Path +from typing import Any from typing import Literal from yaml import dump @@ -11,6 +12,7 @@ from glotaran import __version__ as gta_version from glotaran.io import load_model +from glotaran.model import Model from glotaran.project.generators.generator import generators TEMPLATE = """version: {gta_version} @@ -77,24 +79,38 @@ def create_model_dir_if_not_exist(self): if not self.model_dir.exists(): mkdir(self.model_dir) + @property + def has_models(self) -> bool: + return len(self.models) != 0 + + @property def models(self): if not self.model_dir.exists(): return {} - # print(model_file) return { model_file.name: load_model(model_file) for model_file in self.model_dir.iterdir() - if "yml" in model_file + if model_file.suffix == ".yml" or model_file.suffix == "yaml" } - def has_models(self): - return len(self.models()) != 0 - - def create_model(self, model_type: Literal[generators.keys()] = "decay_parallel"): + def generate_model( + self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] + ): + if generator not in generators: + raise ValueError( + f"Unknown model generator '{generator}'. " + f"Known generators are: {list(generators.keys())}" + ) self.create_model_dir_if_not_exist() - model = generators[model_type] - with open(self.model_dir / "p_model.yml", "w") as f: - f.write(dump(model())) + model = generators[generator](**generator_arguments) + with open(self.model_dir / f"{name}.yml", "w") as f: + f.write(dump(model)) + + def load_model(self, name: str) -> Model: + model_path = self.model_dir / f"{name}.yml" + if not model_path.exists(): + raise ValueError(f"Model file for model '{name}' does not exist.") + return load_model(model_path) def run(self): if not self.models: diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py index 49291a9a7..0b3474bf1 100644 --- a/glotaran/project/test/test_project.py +++ b/glotaran/project/test/test_project.py @@ -12,21 +12,24 @@ def project_folder(tmpdir_factory): return str(tmpdir_factory.mktemp("test_project")) -def test_create(project_folder): +@pytest.fixture(scope="module") +def project_file(project_folder): + return Path(project_folder) / "project.gta" + + +def test_create(project_folder, project_file): print(project_folder) # noqa T001 Project.create("testproject", project_folder) - project_file = Path(project_folder) / "project.gta" assert project_file.exists() assert project_file.read_text(encoding="utf-8") == TEMPLATE.format( gta_version=gta_version, name="testproject" ) -def test_open(project_folder): +def test_open(project_folder, project_file): print(project_folder) # noqa T001 project_from_folder = Project.open(project_folder) - project_file = Path(project_folder) / "project.gta" project_from_file = Project.open(project_file) assert project_from_folder == project_from_file @@ -35,3 +38,16 @@ def test_open(project_folder): assert project.name == "testproject" assert project.version == gta_version + + +def test_generate_model(project_file): + project = Project.open(project_file) + + assert not project.has_models + + project.generate_model("test_model", "decay-parallel", {"nr_species": 5}) + + assert project.has_models + + model = project.load_model("test_model") + assert "megacomplex_parallel_decay" in model.megacomplex From e9ca6d3511a581a03ed118bae922e3ee306040c5 Mon Sep 17 00:00:00 2001 From: Joris Snellenburg Date: Fri, 13 Aug 2021 21:06:33 +0200 Subject: [PATCH 06/32] Added project parameter generation generation --- glotaran/model/item.py | 7 +- glotaran/model/model.py | 17 ++- glotaran/model/property.py | 16 ++- glotaran/project/generators/generator.py | 2 +- .../test/test_gen_parallel_model.py | 2 +- glotaran/project/project.py | 101 +++++++++++++++++- glotaran/project/test/test_project.py | 31 +++++- 7 files changed, 154 insertions(+), 22 deletions(-) diff --git a/glotaran/model/item.py b/glotaran/model/item.py index 66139b893..ce0abb059 100644 --- a/glotaran/model/item.py +++ b/glotaran/model/item.py @@ -311,7 +311,12 @@ def _create_get_parameters(cls): @wrap_func_as_method(cls) def get_parameters(self) -> list[str]: """Returns all parameter full labels of the item.""" - return [p for p in self._glotaran_properties if p._is_parameter_value] + parameters = [] + for name in self._glotaran_properties: + value = getattr(self, name) + prop = getattr(self.__class__, name) + parameters += prop.get_parameters(value) + return parameters return get_parameters diff --git a/glotaran/model/model.py b/glotaran/model/model.py index c2809917f..a382419e2 100644 --- a/glotaran/model/model.py +++ b/glotaran/model/model.py @@ -308,19 +308,14 @@ def valid(self, parameters: ParameterGroup = None) -> bool: """ return len(self.problem_list(parameters)) == 0 - @property - def parameters(self) -> list[str]: - r = [] + def get_parameters(self) -> list[str]: + parameters = [] for item_name in self.model_items: - print(item_name) items = getattr(self, item_name) - if isinstance(items, list): - for item in items: - print(item.get_parameters()) - else: - for item in items.values(): - print(item.get_parameters()) - return r + item_iterator = items if isinstance(items, list) else items.values() + for item in item_iterator: + parameters += item.get_parameters() + return parameters def markdown( self, diff --git a/glotaran/model/property.py b/glotaran/model/property.py index 41c6d4c0f..a49f700c8 100644 --- a/glotaran/model/property.py +++ b/glotaran/model/property.py @@ -1,4 +1,5 @@ """The model property class.""" +from __future__ import annotations import typing @@ -45,10 +46,10 @@ def allow_none(self) -> bool: return self._allow_none @property - def property_type(self) -> typing.Type: + def property_type(self) -> type: return self._type - def validate(self, value, model, parameters=None) -> typing.List[str]: + def validate(self, value, model, parameters=None) -> list[str]: if value is None and self.allow_none: return [] @@ -134,3 +135,14 @@ def _determine_if_parameter(self, type): self._is_parameter = ( self._is_parameter_value or self._is_parameter_list or self._is_parameter_dict ) + + def get_parameters(self, value: typing.Any) -> list[str]: + if value is None: + return [] + elif self._is_parameter_value: + return [value.full_label] + elif self._is_parameter_list: + return [v.full_label for v in value] + elif self._is_parameter_dict: + return [v.full_label for v in value.values()] + return [] diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py index c848f2a95..3c10bf2fb 100644 --- a/glotaran/project/generators/generator.py +++ b/glotaran/project/generators/generator.py @@ -3,7 +3,7 @@ def generate_parallel_model(nr_species: int = 1): species = [f"species_{i}" for i in range(nr_species)] - initial_concentration_parameters = [f"intital_concentration.{i}" for i in range(nr_species)] + initial_concentration_parameters = [f"intitial_concentration.{i}" for i in range(nr_species)] k_matrix = {f"(species_{i}, species_{i})": f"decay_species.{i}" for i in range(nr_species)} return { "initial_concentration": { diff --git a/glotaran/project/generators/test/test_gen_parallel_model.py b/glotaran/project/generators/test/test_gen_parallel_model.py index 877900d9b..f88d6cee4 100644 --- a/glotaran/project/generators/test/test_gen_parallel_model.py +++ b/glotaran/project/generators/test/test_gen_parallel_model.py @@ -19,7 +19,7 @@ def test_generate_parallel_model(tmpdir_factory): initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] assert initial_concentration.compartments == [f"species_{i}" for i in range(nr_species)] for i in range(nr_species): - assert initial_concentration.parameters[i].full_label == f"intital_concentration.{i}" + assert initial_concentration.parameters[i].full_label == f"intitial_concentration.{i}" assert "k_matrix_parallel" in model.k_matrix k_matrix = model.k_matrix["k_matrix_parallel"] diff --git a/glotaran/project/project.py b/glotaran/project/project.py index c752b434b..7d6c472b5 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -12,7 +12,10 @@ from glotaran import __version__ as gta_version from glotaran.io import load_model +from glotaran.io import load_parameters from glotaran.model import Model +from glotaran.model import ModelError +from glotaran.parameter.parameter import Keys from glotaran.project.generators.generator import generators TEMPLATE = """version: {gta_version} @@ -93,6 +96,40 @@ def models(self): if model_file.suffix == ".yml" or model_file.suffix == "yaml" } + def load_model(self, name: str) -> Model: + model_path = self.model_dir / f"{name}.yml" + if not model_path.exists(): + raise ValueError(f"Model file for model '{name}' does not exist.") + return load_model(model_path) + + @property + def parameters_dir(self) -> Path: + return self.folder / "parameters/" + + def create_parameters_dir_if_not_exist(self): + if not self.parameters_dir.exists(): + mkdir(self.parameters_dir) + + @property + def has_parameters(self) -> bool: + return len(self.parameters) != 0 + + @property + def parameters(self): + if not self.parameters_dir.exists(): + return {} + return { + parameters_file.name: load_parameters(parameters_file) + for parameters_file in self.parameters_dir.iterdir() + if parameters_file.suffix == ".yml" or parameters_file.suffix == "yaml" + } + + def load_parameters(self, name: str) -> Model: + parameters_path = self.parameters_dir / f"{name}.yml" + if not parameters_path.exists(): + raise ValueError(f"Parameters file for parameters '{name}' does not exist.") + return load_parameters(parameters_path) + def generate_model( self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] ): @@ -106,11 +143,65 @@ def generate_model( with open(self.model_dir / f"{name}.yml", "w") as f: f.write(dump(model)) - def load_model(self, name: str) -> Model: - model_path = self.model_dir / f"{name}.yml" - if not model_path.exists(): - raise ValueError(f"Model file for model '{name}' does not exist.") - return load_model(model_path) + def generate_parameters( + self, model_name: str, name: str | None = None, fmt: Literal[["yml", "yaml"]] = "yml" + ): + self.create_parameters_dir_if_not_exist() + model = self.load_model(model_name) + parameters = {} + for parameter in model.get_parameters(): + groups = parameter.split(".") + label = groups.pop() + if len(groups) == 0: + if isinstance(parameters, dict) and len(parameters) != 0: + raise ModelError( + "The root parameter group cannot contain both groups and parameters." + ) + elif isinstance(parameters, dict): + parameters = [] + parameters.append( + [ + label, + 0.0, + { + Keys.EXPR: "None", + Keys.MAX: "None", + Keys.MIN: "None", + Keys.NON_NEG: "false", + Keys.VARY: "true", + }, + ] + ) + else: + if isinstance(parameters, list): + raise ModelError( + "The root parameter group cannot contain both groups and parameters." + ) + this_group = groups.pop() + group = parameters + for name in groups: + if name not in group: + group[name] = {} + group = group[name] + if this_group not in group: + group[this_group] = [] + group[this_group].append( + [ + label, + 0.0, + { + Keys.EXPR: None, + Keys.MAX: "inf", + Keys.MIN: "-inf", + Keys.NON_NEG: "false", + Keys.VARY: "true", + }, + ] + ) + parameter_yml = dump(parameters) + name = name if name is not None else model_name + "_parameters" + with open(self.parameters_dir / f"{name}.{fmt}", "w") as f: + f.write(parameter_yml) def run(self): if not self.models: diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py index 0b3474bf1..2843a2394 100644 --- a/glotaran/project/test/test_project.py +++ b/glotaran/project/test/test_project.py @@ -40,14 +40,43 @@ def test_open(project_folder, project_file): assert project.version == gta_version -def test_generate_model(project_file): +def test_generate_model(project_folder, project_file): project = Project.open(project_file) assert not project.has_models project.generate_model("test_model", "decay-parallel", {"nr_species": 5}) + model_folder = Path(project_folder) / "models" + assert model_folder.exists() + + model_file = model_folder / "test_model.yml" + assert model_file.exists() + assert project.has_models model = project.load_model("test_model") assert "megacomplex_parallel_decay" in model.megacomplex + + +def test_generate_parameters(project_folder, project_file): + project = Project.open(project_file) + + assert project.has_models + assert not project.has_parameters + + project.generate_parameters("test_model") + + parameter_folder = Path(project_folder) / "parameters" + assert parameter_folder.exists() + + parameter_file = parameter_folder / "test_model_parameters.yml" + assert parameter_file.exists() + + assert project.has_parameters + + model = project.load_model("test_model") + parameters = project.load_parameters("test_model_parameters") + + for parameter in model.get_parameters(): + assert parameters.has(parameter) From 0f863d4d435d97d65872885095fafcdbcef40a79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Fri, 13 Aug 2021 13:12:13 +0200 Subject: [PATCH 07/32] Added csv to parameter generation --- glotaran/project/project.py | 26 +++++++++++++++++++------- glotaran/project/test/test_project.py | 18 +++++++++++------- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 7d6c472b5..9bc5138d7 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -15,6 +15,7 @@ from glotaran.io import load_parameters from glotaran.model import Model from glotaran.model import ModelError +from glotaran.parameter import ParameterGroup from glotaran.parameter.parameter import Keys from glotaran.project.generators.generator import generators @@ -121,12 +122,14 @@ def parameters(self): return { parameters_file.name: load_parameters(parameters_file) for parameters_file in self.parameters_dir.iterdir() - if parameters_file.suffix == ".yml" or parameters_file.suffix == "yaml" + if parameters_file.suffix in [".yml", ".yaml", ".csv"] } def load_parameters(self, name: str) -> Model: - parameters_path = self.parameters_dir / f"{name}.yml" - if not parameters_path.exists(): + + try: + parameters_path = next(p for p in self.parameters_dir.iterdir() if name in p.name) + except StopIteration: raise ValueError(f"Parameters file for parameters '{name}' does not exist.") return load_parameters(parameters_path) @@ -144,7 +147,10 @@ def generate_model( f.write(dump(model)) def generate_parameters( - self, model_name: str, name: str | None = None, fmt: Literal[["yml", "yaml"]] = "yml" + self, + model_name: str, + name: str | None = None, + fmt: Literal[["yml", "yaml", "csv"]] = "csv", ): self.create_parameters_dir_if_not_exist() model = self.load_model(model_name) @@ -198,10 +204,16 @@ def generate_parameters( }, ] ) - parameter_yml = dump(parameters) + name = name if name is not None else model_name + "_parameters" - with open(self.parameters_dir / f"{name}.{fmt}", "w") as f: - f.write(parameter_yml) + parameter_file = self.parameters_dir / f"{name}.{fmt}" + if fmt in ["yml", "yaml"]: + parameter_yml = dump(parameters) + with open(parameter_file, "w") as f: + f.write(parameter_yml) + elif fmt == "csv": + parameter_group = ParameterGroup.from_dict(parameters) + parameter_group.to_csv(parameter_file) def run(self): if not self.models: diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py index 2843a2394..2d494c7e5 100644 --- a/glotaran/project/test/test_project.py +++ b/glotaran/project/test/test_project.py @@ -1,3 +1,4 @@ +import os from pathlib import Path import pytest @@ -38,13 +39,13 @@ def test_open(project_folder, project_file): assert project.name == "testproject" assert project.version == gta_version + assert not project.has_parameters + assert not project.has_models def test_generate_model(project_folder, project_file): project = Project.open(project_file) - assert not project.has_models - project.generate_model("test_model", "decay-parallel", {"nr_species": 5}) model_folder = Path(project_folder) / "models" @@ -59,24 +60,27 @@ def test_generate_model(project_folder, project_file): assert "megacomplex_parallel_decay" in model.megacomplex -def test_generate_parameters(project_folder, project_file): +@pytest.mark.parametrize("name", ["test_parameter", None]) +@pytest.mark.parametrize("fmt", ["yml", "yaml", "csv"]) +def test_generate_parameters(project_folder, project_file, name, fmt): project = Project.open(project_file) assert project.has_models - assert not project.has_parameters - project.generate_parameters("test_model") + project.generate_parameters("test_model", name=name, fmt=fmt) parameter_folder = Path(project_folder) / "parameters" assert parameter_folder.exists() - parameter_file = parameter_folder / "test_model_parameters.yml" + parameter_file_name = f"{'test_model_parameters' if name is None else name}.{fmt}" + parameter_file = parameter_folder / parameter_file_name assert parameter_file.exists() assert project.has_parameters model = project.load_model("test_model") - parameters = project.load_parameters("test_model_parameters") + parameters = project.load_parameters("test_model_parameters" if name is None else name) for parameter in model.get_parameters(): assert parameters.has(parameter) + os.remove(parameter_file) From a6c1910ee3e7a4622d557b6b98bd7fa61af0074e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Fri, 13 Aug 2021 13:49:48 +0200 Subject: [PATCH 08/32] Tweaked result saving as yaml --- glotaran/analysis/optimize.py | 18 ++++++++++-------- glotaran/analysis/problem.py | 1 - glotaran/builtin/io/yml/yml.py | 6 +++++- glotaran/project/result.py | 27 +++++++++++++-------------- glotaran/project/test/test_result.py | 10 +++++++++- 5 files changed, 37 insertions(+), 25 deletions(-) diff --git a/glotaran/analysis/optimize.py b/glotaran/analysis/optimize.py index a07169f17..4af7ce6f7 100644 --- a/glotaran/analysis/optimize.py +++ b/glotaran/analysis/optimize.py @@ -6,6 +6,7 @@ from scipy.optimize import OptimizeResult from scipy.optimize import least_squares +from glotaran import __version__ as glotaran_version from glotaran.analysis.problem import Problem from glotaran.analysis.problem_grouped import GroupedProblem from glotaran.analysis.problem_ungrouped import UngroupedProblem @@ -123,23 +124,24 @@ def _create_result( return Result( additional_penalty=problem.additional_penalty, + chi_square=chi_square, cost=problem.cost, + covariance_matrix=covariance_matrix, data=data, + degrees_of_freedom=degrees_of_freedom, free_parameter_labels=free_parameter_labels, - number_of_function_evaluations=number_of_function_evaluation, + glotaran_version=glotaran_version, initial_parameters=problem.scheme.parameters, - optimized_parameters=parameters, - scheme=problem.scheme, - success=success, - termination_reason=termination_reason, - chi_square=chi_square, - covariance_matrix=covariance_matrix, - degrees_of_freedom=degrees_of_freedom, jacobian=jacobian, number_of_data_points=number_of_data_points, + number_of_function_evaluations=number_of_function_evaluation, number_of_jacobian_evaluations=number_of_jacobian_evaluation, number_of_variables=number_of_variables, optimality=optimality, + optimized_parameters=parameters, reduced_chi_square=reduced_chi_square, root_mean_square_error=root_mean_square_error, + scheme=problem.scheme, + success=success, + termination_reason=termination_reason, ) diff --git a/glotaran/analysis/problem.py b/glotaran/analysis/problem.py index 4d10d4c31..3edf55277 100644 --- a/glotaran/analysis/problem.py +++ b/glotaran/analysis/problem.py @@ -78,7 +78,6 @@ def __init__(self, scheme: Scheme): self._residual_function = ( residual_nnls if scheme.non_negative_least_squares else residual_variable_projection ) - self._parameters = None self._dataset_models = None self._overwrite_index_dependent = self.model.need_index_dependent() diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index 540c59824..becf2fd60 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -193,7 +193,11 @@ def save_result(self, result: Result, result_path: str): result_scheme.data[label] = dataset_path result_file_path = os.path.join(result_path, "result.yml") - _write_dict(result_file_path, dataclasses.asdict(result)) + result_dict = dataclasses.asdict(result) + if result_dict["jacobian"] is not None: + result_dict["jacobian"] = result_dict["jacobian"].tolist() + result_dict["covariance_matrix"] = result_dict["covariance_matrix"].tolist() + _write_dict(result_file_path, result_dict) result_scheme.result_path = result_file_path self.save_scheme(scheme=result_scheme, file_name=scheme_path) diff --git a/glotaran/project/result.py b/glotaran/project/result.py index 1b01d55c6..c3ee783e1 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -2,6 +2,7 @@ from __future__ import annotations from dataclasses import dataclass +from dataclasses import replace import numpy as np import xarray as xr @@ -44,18 +45,21 @@ class Result: termination_reason: str """The reason (message when) the optimizer terminated""" + glotaran_version: str + """The glotaran version used to create the result.""" + # The below can be none in case of unsuccessful optimization chi_square: float | None = None r"""The chi-square of the optimization. :math:`\chi^2 = \sum_i^N [{Residual}_i]^2`.""" - covariance_matrix: ArrayLike | None = None + covariance_matrix: ArrayLike | list | None = None """Covariance matrix. The rows and columns are corresponding to :attr:`free_parameter_labels`.""" degrees_of_freedom: int | None = None """Degrees of freedom in optimization :math:`N - N_{vars}`.""" - jacobian: ArrayLike | None = None + jacobian: ArrayLike | list | None = None """Modified Jacobian matrix at the solution See also: :func:`scipy.optimize.least_squares` @@ -79,6 +83,11 @@ class Result: :math:`rms = \sqrt{\chi^2_{red}}` """ + def __post_init__(self): + if isinstance(self.jacobian, list): + self.jacobian = np.array(self.jacobian) + self.covariance_matrix = np.array(self.covariance_matrix) + @property def model(self) -> Model: return self.scheme.model @@ -99,18 +108,8 @@ def get_scheme(self) -> Scheme: if "weight" in dataset: data[label]["weight"] = dataset.weight - return Scheme( - model=self.model, - parameters=self.optimized_parameters, - data=data, - group_tolerance=self.scheme.group_tolerance, - non_negative_least_squares=self.scheme.non_negative_least_squares, - maximum_number_function_evaluations=self.scheme.maximum_number_function_evaluations, - ftol=self.scheme.ftol, - gtol=self.scheme.gtol, - xtol=self.scheme.xtol, - optimization_method=self.scheme.optimization_method, - ) + new_scheme = replace(self.scheme, parameters=self.optimized_parameters) + return new_scheme def markdown(self, with_model: bool = True, base_heading_level: int = 1) -> MarkdownStr: """Formats the model as a markdown text. diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index 60c27fd37..ab79f70a2 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -30,12 +30,20 @@ def dummy_result(): model=suite.model, parameters=suite.initial_parameters, data=data, - maximum_number_function_evaluations=1, + maximum_number_function_evaluations=9, ) yield optimize(scheme) +def test_get_scheme(dummy_result: Result): + scheme = dummy_result.get_scheme() + assert all(scheme.parameters.to_dataframe() != dummy_result.scheme.parameters.to_dataframe()) + assert all( + scheme.parameters.to_dataframe() == dummy_result.optimized_parameters.to_dataframe() + ) + + def test_result_ipython_rendering(dummy_result: Result): """Autorendering in ipython""" From 816eff7e7674e45ffbb872deae0ab27d40582576 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Fri, 13 Aug 2021 14:33:35 +0200 Subject: [PATCH 09/32] Added dataset import to project --- glotaran/project/project.py | 70 +++++++++++++++++++++------ glotaran/project/test/test_project.py | 30 +++++++++++- glotaran/project/test/test_result.py | 13 +++-- 3 files changed, 95 insertions(+), 18 deletions(-) diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 9bc5138d7..4c2a09109 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -7,10 +7,12 @@ from typing import Any from typing import Literal +import xarray as xr from yaml import dump from yaml import load from glotaran import __version__ as gta_version +from glotaran.io import load_dataset from glotaran.io import load_model from glotaran.io import load_parameters from glotaran.model import Model @@ -75,6 +77,47 @@ def open(cls, project_folder: str): project_dict["file"] = project_file return cls(**project_dict) + @property + def data_dir(self) -> Path: + return self.folder / "data/" + + def create_data_dir_if_not_exist(self): + if not self.data_dir.exists(): + mkdir(self.data_dir) + + @property + def has_data(self) -> bool: + return len(self.data) != 0 + + @property + def data(self): + if not self.data_dir.exists(): + return {} + return { + data_file.name: load_dataset(data_file) + for data_file in self.data_dir.iterdir() + if data_file.suffix == ".nc" + } + + def load_data(self, name: str) -> xr.DataSet: + try: + data_path = next(p for p in self.data_dir.iterdir() if name in p.name) + except StopIteration: + raise ValueError(f"Model file for model '{name}' does not exist.") + return load_dataset(data_path) + + def import_data(self, path: str | Path, name: str | None = None) -> xr.DataSet: + + if not isinstance(path, Path): + path = Path(path) + + name = name or path.with_suffix("").name + data_path = self.data_dir / f"{name}.nc" + + self.create_data_dir_if_not_exist() + dataset = load_dataset(path) + dataset.to_netcdf(data_path) + @property def model_dir(self) -> Path: return self.folder / "models/" @@ -103,6 +146,19 @@ def load_model(self, name: str) -> Model: raise ValueError(f"Model file for model '{name}' does not exist.") return load_model(model_path) + def generate_model( + self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] + ): + if generator not in generators: + raise ValueError( + f"Unknown model generator '{generator}'. " + f"Known generators are: {list(generators.keys())}" + ) + self.create_model_dir_if_not_exist() + model = generators[generator](**generator_arguments) + with open(self.model_dir / f"{name}.yml", "w") as f: + f.write(dump(model)) + @property def parameters_dir(self) -> Path: return self.folder / "parameters/" @@ -126,26 +182,12 @@ def parameters(self): } def load_parameters(self, name: str) -> Model: - try: parameters_path = next(p for p in self.parameters_dir.iterdir() if name in p.name) except StopIteration: raise ValueError(f"Parameters file for parameters '{name}' does not exist.") return load_parameters(parameters_path) - def generate_model( - self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] - ): - if generator not in generators: - raise ValueError( - f"Unknown model generator '{generator}'. " - f"Known generators are: {list(generators.keys())}" - ) - self.create_model_dir_if_not_exist() - model = generators[generator](**generator_arguments) - with open(self.model_dir / f"{name}.yml", "w") as f: - f.write(dump(model)) - def generate_parameters( self, model_name: str, diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py index 2d494c7e5..f4d315037 100644 --- a/glotaran/project/test/test_project.py +++ b/glotaran/project/test/test_project.py @@ -6,6 +6,7 @@ from glotaran import __version__ as gta_version from glotaran.project.project import TEMPLATE from glotaran.project.project import Project +from glotaran.project.test.test_result import dummy_data # noqa F401 @pytest.fixture(scope="module") @@ -18,6 +19,13 @@ def project_file(project_folder): return Path(project_folder) / "project.gta" +@pytest.fixture(scope="module") +def dummy_data_path(tmpdir_factory, dummy_data): # noqa F811 + path = Path(tmpdir_factory.mktemp("test_project")) / "dummydata.nc" + dummy_data["dataset1"].to_netcdf(path) + return path + + def test_create(project_folder, project_file): print(project_folder) # noqa T001 Project.create("testproject", project_folder) @@ -39,8 +47,9 @@ def test_open(project_folder, project_file): assert project.name == "testproject" assert project.version == gta_version - assert not project.has_parameters assert not project.has_models + assert not project.has_data + assert not project.has_parameters def test_generate_model(project_folder, project_file): @@ -84,3 +93,22 @@ def test_generate_parameters(project_folder, project_file, name, fmt): for parameter in model.get_parameters(): assert parameters.has(parameter) os.remove(parameter_file) + + +@pytest.mark.parametrize("name", ["test_data", None]) +def test_import_data(project_folder, project_file, dummy_data, dummy_data_path, name): # noqa F811 + project = Project.open(project_file) + + project.import_data(dummy_data_path, name=name) + + data_folder = Path(project_folder) / "data" + assert data_folder.exists() + + data_file_name = f"{'dummydata' if name is None else name}.nc" + data_file = data_folder / data_file_name + assert data_file.exists() + + assert project.has_data + + data = project.load_data("dummydata" if name is None else name) + assert data == dummy_data["dataset1"] diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index ab79f70a2..a91806c2e 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -11,8 +11,8 @@ @pytest.fixture(scope="session") -def dummy_result(): - """Dummy result for testing.""" +def dummy_data(): + """Dummy data for testing.""" wanted_parameters = suite.wanted_parameters data = {} @@ -26,10 +26,17 @@ def dummy_result(): wanted_parameters, {"global": global_axis, "model": model_axis}, ) + yield data + + +@pytest.fixture(scope="session") +def dummy_result(dummy_data): + """Dummy result for testing.""" + scheme = Scheme( model=suite.model, parameters=suite.initial_parameters, - data=data, + data=dummy_data, maximum_number_function_evaluations=9, ) From 5c8312f0a0d2295480c9606e3032c67ad7c0ecbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Fri, 13 Aug 2021 14:51:54 +0200 Subject: [PATCH 10/32] Added sequential model generator --- glotaran/project/generators/generator.py | 44 +++++++++++- .../test/test_gen_parallel_model.py | 33 --------- .../test/test_genenerate_decay_model.py | 72 +++++++++++++++++++ 3 files changed, 113 insertions(+), 36 deletions(-) delete mode 100644 glotaran/project/generators/test/test_gen_parallel_model.py create mode 100644 glotaran/project/generators/test/test_genenerate_decay_model.py diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py index 3c10bf2fb..0d8ca71cb 100644 --- a/glotaran/project/generators/generator.py +++ b/glotaran/project/generators/generator.py @@ -2,9 +2,13 @@ def generate_parallel_model(nr_species: int = 1): - species = [f"species_{i}" for i in range(nr_species)] - initial_concentration_parameters = [f"intitial_concentration.{i}" for i in range(nr_species)] - k_matrix = {f"(species_{i}, species_{i})": f"decay_species.{i}" for i in range(nr_species)} + species = [f"species_{i+1}" for i in range(nr_species)] + initial_concentration_parameters = [ + f"intitial_concentration.species_{i+1}" for i in range(nr_species) + ] + k_matrix = { + f"(species_{i+1}, species_{i+1})": f"decay.species_{i+1}" for i in range(nr_species) + } return { "initial_concentration": { "initial_concentration_dataset_1": { @@ -28,4 +32,38 @@ def generate_parallel_model(nr_species: int = 1): } +def generate_sequential_model(nr_species: int = 1): + species = [f"species_{i}" for i in range(nr_species)] + initial_concentration_parameters = [ + f"intitial_concentration.species_{i}" for i in range(nr_species) + ] + k_matrix = { + f"(species_{i+2}, species_{i+1})": f"decay.species_{i+1}" for i in range(nr_species - 1) + } + k_matrix[f"(species_{nr_species}, species_{nr_species})"] = f"decay.species_{nr_species}" + + return { + "initial_concentration": { + "initial_concentration_dataset_1": { + "compartments": species, + "parameters": initial_concentration_parameters, + }, + }, + "k_matrix": {"k_matrix_sequential": {"matrix": k_matrix}}, + "megacomplex": { + "megacomplex_parallel_decay": { + "type": "decay", + "k_matrix": ["k_matrix_sequential"], + }, + }, + "dataset": { + "dataset_1": { + "initial_concentration": "initial_concentration_dataset_1", + "megacomplex": ["megacomplex_parallel_decay"], + } + }, + } + + generators = {"decay-parallel": generate_parallel_model} +generators = {"decay-sequential": generate_parallel_model} diff --git a/glotaran/project/generators/test/test_gen_parallel_model.py b/glotaran/project/generators/test/test_gen_parallel_model.py deleted file mode 100644 index f88d6cee4..000000000 --- a/glotaran/project/generators/test/test_gen_parallel_model.py +++ /dev/null @@ -1,33 +0,0 @@ -from yaml import dump - -from glotaran.io import load_model -from glotaran.project.generators.generator import generate_parallel_model - - -def test_generate_parallel_model(tmpdir_factory): - nr_species = 5 - model_yaml = dump(generate_parallel_model(nr_species)) - print(model_yaml) # noqa T001 - model_file = tmpdir_factory.mktemp("gen_par_model") / "model.yml" - model_file.write_text(model_yaml, encoding="utf-8") - - model = load_model(model_file) - - assert model.valid() - - assert "initial_concentration_dataset_1" in model.initial_concentration - initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] - assert initial_concentration.compartments == [f"species_{i}" for i in range(nr_species)] - for i in range(nr_species): - assert initial_concentration.parameters[i].full_label == f"intitial_concentration.{i}" - - assert "k_matrix_parallel" in model.k_matrix - k_matrix = model.k_matrix["k_matrix_parallel"] - for i, (k, v) in enumerate(k_matrix.matrix.items()): - assert k == (f"species_{i}", f"species_{i}") - assert v.full_label == f"decay_species.{i}" - - assert "dataset_1" in model.dataset - dataset = model.dataset["dataset_1"] - assert dataset.initial_concentration == "initial_concentration_dataset_1" - assert dataset.megacomplex == ["megacomplex_parallel_decay"] diff --git a/glotaran/project/generators/test/test_genenerate_decay_model.py b/glotaran/project/generators/test/test_genenerate_decay_model.py new file mode 100644 index 000000000..e9024e71c --- /dev/null +++ b/glotaran/project/generators/test/test_genenerate_decay_model.py @@ -0,0 +1,72 @@ +from yaml import dump + +from glotaran.io import load_model +from glotaran.project.generators.generator import generate_parallel_model +from glotaran.project.generators.generator import generate_sequential_model + + +def test_generate_parallel_model(tmpdir_factory): + nr_species = 5 + model_yaml = dump(generate_parallel_model(nr_species)) + print(model_yaml) # noqa T001 + model_file = tmpdir_factory.mktemp("gen_par_model") / "model.yml" + model_file.write_text(model_yaml, encoding="utf-8") + + model = load_model(model_file) + + assert model.valid() + + assert "initial_concentration_dataset_1" in model.initial_concentration + initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] + assert initial_concentration.compartments == [f"species_{i+1}" for i in range(nr_species)] + for i in range(nr_species): + assert ( + initial_concentration.parameters[i].full_label + == f"intitial_concentration.species_{i+1}" + ) + + assert "k_matrix_parallel" in model.k_matrix + k_matrix = model.k_matrix["k_matrix_parallel"] + for i, (k, v) in enumerate(k_matrix.matrix.items()): + assert k == (f"species_{i+1}", f"species_{i+1}") + assert v.full_label == f"decay.species_{i+1}" + + assert "dataset_1" in model.dataset + dataset = model.dataset["dataset_1"] + assert dataset.initial_concentration == "initial_concentration_dataset_1" + assert dataset.megacomplex == ["megacomplex_parallel_decay"] + + +def test_generate_decay_model(tmpdir_factory): + nr_species = 5 + model_yaml = dump(generate_sequential_model(nr_species)) + print(model_yaml) # noqa T001 + model_file = tmpdir_factory.mktemp("gen_seq_model") / "model.yml" + model_file.write_text(model_yaml, encoding="utf-8") + + model = load_model(model_file) + + print(model.validate()) # noqa T001 + assert model.valid() + + assert "initial_concentration_dataset_1" in model.initial_concentration + initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] + assert initial_concentration.compartments == [f"species_{i}" for i in range(nr_species)] + for i in range(nr_species): + assert ( + initial_concentration.parameters[i].full_label == f"intitial_concentration.species_{i}" + ) + + assert "k_matrix_sequential" in model.k_matrix + k_matrix = model.k_matrix["k_matrix_sequential"] + for i, (k, v) in enumerate(k_matrix.matrix.items()): + if i < len(k_matrix.matrix) - 1: + assert k == (f"species_{i+2}", f"species_{i+1}") + else: + assert k == (f"species_{i+1}", f"species_{i+1}") + assert v.full_label == f"decay.species_{i+1}" + + assert "dataset_1" in model.dataset + dataset = model.dataset["dataset_1"] + assert dataset.initial_concentration == "initial_concentration_dataset_1" + assert dataset.megacomplex == ["megacomplex_parallel_decay"] From b92fff4f6fa4d6753d80d97c0f13f01a56c8f0ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Fri, 13 Aug 2021 15:31:02 +0200 Subject: [PATCH 11/32] Example model is now generated --- glotaran/examples/sequential.py | 70 ++++++------------- glotaran/project/generators/generator.py | 28 ++++++-- .../test/test_genenerate_decay_model.py | 30 ++++---- glotaran/project/project.py | 10 +-- 4 files changed, 66 insertions(+), 72 deletions(-) diff --git a/glotaran/examples/sequential.py b/glotaran/examples/sequential.py index d0f0f635e..bbc85711b 100644 --- a/glotaran/examples/sequential.py +++ b/glotaran/examples/sequential.py @@ -3,8 +3,11 @@ from glotaran.analysis.simulation import simulate from glotaran.builtin.megacomplexes.decay import DecayMegacomplex from glotaran.builtin.megacomplexes.spectral import SpectralMegacomplex +from glotaran.io import load_model +from glotaran.io import load_parameters from glotaran.model import Model from glotaran.parameter import ParameterGroup +from glotaran.project.generators.generator import generate_sequential_model sim_model = Model.from_dict( { @@ -88,21 +91,6 @@ } ) -parameter = ParameterGroup.from_dict( - { - "j": [ - ["1", 1, {"vary": False, "non-negative": False}], - ["0", 0, {"vary": False, "non-negative": False}], - ], - "kinetic": [ - ["1", 0.5], - ["2", 0.3], - ["3", 0.1], - ], - "irf": [["center", 0.3], ["width", 0.1]], - } -) - _time = np.arange(-1, 20, 0.01) _spectral = np.arange(600, 700, 1.4) @@ -115,36 +103,22 @@ noise_std_dev=1e-2, ) -model = Model.from_dict( - { - "initial_concentration": { - "j1": {"compartments": ["s1", "s2", "s3"], "parameters": ["j.1", "j.0", "j.0"]}, - }, - "k_matrix": { - "k1": { - "matrix": { - ("s2", "s1"): "kinetic.1", - ("s3", "s2"): "kinetic.2", - ("s3", "s3"): "kinetic.3", - } - } - }, - "megacomplex": { - "m1": { - "type": "decay", - "k_matrix": ["k1"], - } - }, - "irf": { - "irf1": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, - }, - "dataset": { - "dataset1": { - "initial_concentration": "j1", - "megacomplex": ["m1"], - "irf": "irf1", - } - }, - }, - megacomplex_types={"decay": DecayMegacomplex}, -) +parameter_yml = """ +initial_concentration: + - ["1", 1] + - ["0", 0] + - {"vary": False, "non-negative": False} + +kinetic: + - [species_1, 0.5] + - [species_2, 0.3] + - [species_3, 0.1] + +irf: + - [center, 0.3] + - [width, 0.1] +""" +parameter = load_parameters(parameter_yml, format_name="yml_str") + +model_yml = generate_sequential_model(nr_species=3, irf=True) +model = load_model(model_yml, format_name="yml_str") diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py index 0d8ca71cb..17164d697 100644 --- a/glotaran/project/generators/generator.py +++ b/glotaran/project/generators/generator.py @@ -1,5 +1,9 @@ from __future__ import annotations +from typing import Any + +from yaml import dump + def generate_parallel_model(nr_species: int = 1): species = [f"species_{i+1}" for i in range(nr_species)] @@ -32,17 +36,17 @@ def generate_parallel_model(nr_species: int = 1): } -def generate_sequential_model(nr_species: int = 1): +def generate_sequential_model(nr_species: int = 1, irf: bool = False) -> dict: species = [f"species_{i}" for i in range(nr_species)] - initial_concentration_parameters = [ - f"intitial_concentration.species_{i}" for i in range(nr_species) + initial_concentration_parameters = ["initial_concentration.1"] + [ + "initial_concentration.0" for i in range(1, nr_species) ] k_matrix = { f"(species_{i+2}, species_{i+1})": f"decay.species_{i+1}" for i in range(nr_species - 1) } k_matrix[f"(species_{nr_species}, species_{nr_species})"] = f"decay.species_{nr_species}" - return { + model = { "initial_concentration": { "initial_concentration_dataset_1": { "compartments": species, @@ -60,10 +64,26 @@ def generate_sequential_model(nr_species: int = 1): "dataset_1": { "initial_concentration": "initial_concentration_dataset_1", "megacomplex": ["megacomplex_parallel_decay"], + "irf": "gaussian_irf" if irf else None, } }, } + if irf: + model["irf"] = { + "gaussian_irf": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, + } + return model generators = {"decay-parallel": generate_parallel_model} generators = {"decay-sequential": generate_parallel_model} + + +def generate_model_yml(generator: str, generator_arguments: dict[str, Any]) -> str: + if generator not in generators: + raise ValueError( + f"Unknown model generator '{generator}'. " + f"Known generators are: {list(generators.keys())}" + ) + model = generators[generator](**generator_arguments) + return dump(model) diff --git a/glotaran/project/generators/test/test_genenerate_decay_model.py b/glotaran/project/generators/test/test_genenerate_decay_model.py index e9024e71c..d46ef2f4b 100644 --- a/glotaran/project/generators/test/test_genenerate_decay_model.py +++ b/glotaran/project/generators/test/test_genenerate_decay_model.py @@ -1,3 +1,4 @@ +import pytest from yaml import dump from glotaran.io import load_model @@ -5,14 +6,12 @@ from glotaran.project.generators.generator import generate_sequential_model -def test_generate_parallel_model(tmpdir_factory): +def test_generate_parallel_model(): nr_species = 5 model_yaml = dump(generate_parallel_model(nr_species)) print(model_yaml) # noqa T001 - model_file = tmpdir_factory.mktemp("gen_par_model") / "model.yml" - model_file.write_text(model_yaml, encoding="utf-8") - model = load_model(model_file) + model = load_model(model_yaml, format_name="yml_str") assert model.valid() @@ -37,14 +36,13 @@ def test_generate_parallel_model(tmpdir_factory): assert dataset.megacomplex == ["megacomplex_parallel_decay"] -def test_generate_decay_model(tmpdir_factory): +@pytest.mark.parametrize("irf", [True, False]) +def test_generate_decay_model(irf): nr_species = 5 - model_yaml = dump(generate_sequential_model(nr_species)) + model_yaml = dump(generate_sequential_model(nr_species, irf=irf)) print(model_yaml) # noqa T001 - model_file = tmpdir_factory.mktemp("gen_seq_model") / "model.yml" - model_file.write_text(model_yaml, encoding="utf-8") - model = load_model(model_file) + model = load_model(model_yaml, format_name="yml_str") print(model.validate()) # noqa T001 assert model.valid() @@ -52,10 +50,9 @@ def test_generate_decay_model(tmpdir_factory): assert "initial_concentration_dataset_1" in model.initial_concentration initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] assert initial_concentration.compartments == [f"species_{i}" for i in range(nr_species)] - for i in range(nr_species): - assert ( - initial_concentration.parameters[i].full_label == f"intitial_concentration.species_{i}" - ) + assert initial_concentration.parameters[0].full_label == "initial_concentration.1" + for i in range(1, nr_species): + assert initial_concentration.parameters[i].full_label == "initial_concentration.0" assert "k_matrix_sequential" in model.k_matrix k_matrix = model.k_matrix["k_matrix_sequential"] @@ -70,3 +67,10 @@ def test_generate_decay_model(tmpdir_factory): dataset = model.dataset["dataset_1"] assert dataset.initial_concentration == "initial_concentration_dataset_1" assert dataset.megacomplex == ["megacomplex_parallel_decay"] + + if irf: + assert dataset.irf == "gaussian_irf" + assert "gaussian_irf" in model.irf + irf = model.irf["gaussian_irf"] + assert irf.center.full_label == "irf.center" + assert irf.width.full_label == "irf.width" diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 4c2a09109..fb9333f2b 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -19,6 +19,7 @@ from glotaran.model import ModelError from glotaran.parameter import ParameterGroup from glotaran.parameter.parameter import Keys +from glotaran.project.generators.generator import generate_model_yml from glotaran.project.generators.generator import generators TEMPLATE = """version: {gta_version} @@ -149,15 +150,10 @@ def load_model(self, name: str) -> Model: def generate_model( self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] ): - if generator not in generators: - raise ValueError( - f"Unknown model generator '{generator}'. " - f"Known generators are: {list(generators.keys())}" - ) self.create_model_dir_if_not_exist() - model = generators[generator](**generator_arguments) + model = generate_model_yml(generator, generator_arguments) with open(self.model_dir / f"{name}.yml", "w") as f: - f.write(dump(model)) + f.write(model) @property def parameters_dir(self) -> Path: From 647e6b5ce4e5d4682d9ae66553cdff22c73b6062 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Fri, 13 Aug 2021 17:20:34 +0200 Subject: [PATCH 12/32] Added scheme generation --- glotaran/analysis/simulation.py | 2 +- glotaran/builtin/io/yml/yml.py | 4 +- glotaran/examples/sequential.py | 4 +- glotaran/project/generators/__init__.py | 0 glotaran/project/generators/generator.py | 89 -------------- .../test/test_genenerate_decay_model.py | 76 ------------ glotaran/project/project.py | 87 ++++++++++++- glotaran/project/test/test_project.py | 114 ------------------ 8 files changed, 87 insertions(+), 289 deletions(-) delete mode 100644 glotaran/project/generators/__init__.py delete mode 100644 glotaran/project/generators/generator.py delete mode 100644 glotaran/project/generators/test/test_genenerate_decay_model.py delete mode 100644 glotaran/project/test/test_project.py diff --git a/glotaran/analysis/simulation.py b/glotaran/analysis/simulation.py index 22ebe0a38..1f6334458 100644 --- a/glotaran/analysis/simulation.py +++ b/glotaran/analysis/simulation.py @@ -23,7 +23,7 @@ def simulate( noise: bool = False, noise_std_dev: float = 1.0, noise_seed: int | None = None, -): +) -> xr.Dataset: """Simulates a model. Parameters diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index becf2fd60..97251326b 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -125,8 +125,8 @@ def load_scheme(self, file_name: str) -> Scheme: raise ValueError(f"Error loading dataset '{label}': {e}") optimization_method = scheme.get("optimization_method", "TrustRegionReflection") - nnls = scheme.get("non-negative-least-squares", False) - nfev = scheme.get("maximum-number-function-evaluations", None) + nnls = scheme.get("non_negative_least_squares", False) + nfev = scheme.get("maximum_number_function_evaluations", None) ftol = scheme.get("ftol", 1e-8) gtol = scheme.get("gtol", 1e-8) xtol = scheme.get("xtol", 1e-8) diff --git a/glotaran/examples/sequential.py b/glotaran/examples/sequential.py index bbc85711b..6cb4fdd03 100644 --- a/glotaran/examples/sequential.py +++ b/glotaran/examples/sequential.py @@ -7,7 +7,7 @@ from glotaran.io import load_parameters from glotaran.model import Model from glotaran.parameter import ParameterGroup -from glotaran.project.generators.generator import generate_sequential_model +from glotaran.project.generators.generator import generate_model_yml sim_model = Model.from_dict( { @@ -120,5 +120,5 @@ """ parameter = load_parameters(parameter_yml, format_name="yml_str") -model_yml = generate_sequential_model(nr_species=3, irf=True) +model_yml = generate_model_yml("decay-sequential", nr_species=3, irf=True) model = load_model(model_yml, format_name="yml_str") diff --git a/glotaran/project/generators/__init__.py b/glotaran/project/generators/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/glotaran/project/generators/generator.py b/glotaran/project/generators/generator.py deleted file mode 100644 index 17164d697..000000000 --- a/glotaran/project/generators/generator.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from yaml import dump - - -def generate_parallel_model(nr_species: int = 1): - species = [f"species_{i+1}" for i in range(nr_species)] - initial_concentration_parameters = [ - f"intitial_concentration.species_{i+1}" for i in range(nr_species) - ] - k_matrix = { - f"(species_{i+1}, species_{i+1})": f"decay.species_{i+1}" for i in range(nr_species) - } - return { - "initial_concentration": { - "initial_concentration_dataset_1": { - "compartments": species, - "parameters": initial_concentration_parameters, - }, - }, - "k_matrix": {"k_matrix_parallel": {"matrix": k_matrix}}, - "megacomplex": { - "megacomplex_parallel_decay": { - "type": "decay", - "k_matrix": ["k_matrix_parallel"], - }, - }, - "dataset": { - "dataset_1": { - "initial_concentration": "initial_concentration_dataset_1", - "megacomplex": ["megacomplex_parallel_decay"], - } - }, - } - - -def generate_sequential_model(nr_species: int = 1, irf: bool = False) -> dict: - species = [f"species_{i}" for i in range(nr_species)] - initial_concentration_parameters = ["initial_concentration.1"] + [ - "initial_concentration.0" for i in range(1, nr_species) - ] - k_matrix = { - f"(species_{i+2}, species_{i+1})": f"decay.species_{i+1}" for i in range(nr_species - 1) - } - k_matrix[f"(species_{nr_species}, species_{nr_species})"] = f"decay.species_{nr_species}" - - model = { - "initial_concentration": { - "initial_concentration_dataset_1": { - "compartments": species, - "parameters": initial_concentration_parameters, - }, - }, - "k_matrix": {"k_matrix_sequential": {"matrix": k_matrix}}, - "megacomplex": { - "megacomplex_parallel_decay": { - "type": "decay", - "k_matrix": ["k_matrix_sequential"], - }, - }, - "dataset": { - "dataset_1": { - "initial_concentration": "initial_concentration_dataset_1", - "megacomplex": ["megacomplex_parallel_decay"], - "irf": "gaussian_irf" if irf else None, - } - }, - } - if irf: - model["irf"] = { - "gaussian_irf": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, - } - return model - - -generators = {"decay-parallel": generate_parallel_model} -generators = {"decay-sequential": generate_parallel_model} - - -def generate_model_yml(generator: str, generator_arguments: dict[str, Any]) -> str: - if generator not in generators: - raise ValueError( - f"Unknown model generator '{generator}'. " - f"Known generators are: {list(generators.keys())}" - ) - model = generators[generator](**generator_arguments) - return dump(model) diff --git a/glotaran/project/generators/test/test_genenerate_decay_model.py b/glotaran/project/generators/test/test_genenerate_decay_model.py deleted file mode 100644 index d46ef2f4b..000000000 --- a/glotaran/project/generators/test/test_genenerate_decay_model.py +++ /dev/null @@ -1,76 +0,0 @@ -import pytest -from yaml import dump - -from glotaran.io import load_model -from glotaran.project.generators.generator import generate_parallel_model -from glotaran.project.generators.generator import generate_sequential_model - - -def test_generate_parallel_model(): - nr_species = 5 - model_yaml = dump(generate_parallel_model(nr_species)) - print(model_yaml) # noqa T001 - - model = load_model(model_yaml, format_name="yml_str") - - assert model.valid() - - assert "initial_concentration_dataset_1" in model.initial_concentration - initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] - assert initial_concentration.compartments == [f"species_{i+1}" for i in range(nr_species)] - for i in range(nr_species): - assert ( - initial_concentration.parameters[i].full_label - == f"intitial_concentration.species_{i+1}" - ) - - assert "k_matrix_parallel" in model.k_matrix - k_matrix = model.k_matrix["k_matrix_parallel"] - for i, (k, v) in enumerate(k_matrix.matrix.items()): - assert k == (f"species_{i+1}", f"species_{i+1}") - assert v.full_label == f"decay.species_{i+1}" - - assert "dataset_1" in model.dataset - dataset = model.dataset["dataset_1"] - assert dataset.initial_concentration == "initial_concentration_dataset_1" - assert dataset.megacomplex == ["megacomplex_parallel_decay"] - - -@pytest.mark.parametrize("irf", [True, False]) -def test_generate_decay_model(irf): - nr_species = 5 - model_yaml = dump(generate_sequential_model(nr_species, irf=irf)) - print(model_yaml) # noqa T001 - - model = load_model(model_yaml, format_name="yml_str") - - print(model.validate()) # noqa T001 - assert model.valid() - - assert "initial_concentration_dataset_1" in model.initial_concentration - initial_concentration = model.initial_concentration["initial_concentration_dataset_1"] - assert initial_concentration.compartments == [f"species_{i}" for i in range(nr_species)] - assert initial_concentration.parameters[0].full_label == "initial_concentration.1" - for i in range(1, nr_species): - assert initial_concentration.parameters[i].full_label == "initial_concentration.0" - - assert "k_matrix_sequential" in model.k_matrix - k_matrix = model.k_matrix["k_matrix_sequential"] - for i, (k, v) in enumerate(k_matrix.matrix.items()): - if i < len(k_matrix.matrix) - 1: - assert k == (f"species_{i+2}", f"species_{i+1}") - else: - assert k == (f"species_{i+1}", f"species_{i+1}") - assert v.full_label == f"decay.species_{i+1}" - - assert "dataset_1" in model.dataset - dataset = model.dataset["dataset_1"] - assert dataset.initial_concentration == "initial_concentration_dataset_1" - assert dataset.megacomplex == ["megacomplex_parallel_decay"] - - if irf: - assert dataset.irf == "gaussian_irf" - assert "gaussian_irf" in model.irf - irf = model.irf["gaussian_irf"] - assert irf.center.full_label == "irf.center" - assert irf.width.full_label == "irf.width" diff --git a/glotaran/project/project.py b/glotaran/project/project.py index fb9333f2b..fb3354047 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -15,12 +15,15 @@ from glotaran.io import load_dataset from glotaran.io import load_model from glotaran.io import load_parameters +from glotaran.io import load_scheme +from glotaran.io import save_scheme from glotaran.model import Model from glotaran.model import ModelError from glotaran.parameter import ParameterGroup from glotaran.parameter.parameter import Keys from glotaran.project.generators.generator import generate_model_yml from glotaran.project.generators.generator import generators +from glotaran.project.scheme import Scheme TEMPLATE = """version: {gta_version} @@ -95,7 +98,7 @@ def data(self): if not self.data_dir.exists(): return {} return { - data_file.name: load_dataset(data_file) + data_file.with_suffix("").name: data_file for data_file in self.data_dir.iterdir() if data_file.suffix == ".nc" } @@ -136,9 +139,9 @@ def models(self): if not self.model_dir.exists(): return {} return { - model_file.name: load_model(model_file) + model_file.with_suffix("").name: model_file for model_file in self.model_dir.iterdir() - if model_file.suffix == ".yml" or model_file.suffix == "yaml" + if model_file.suffix in [".yml", ".yaml"] } def load_model(self, name: str) -> Model: @@ -151,10 +154,84 @@ def generate_model( self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] ): self.create_model_dir_if_not_exist() - model = generate_model_yml(generator, generator_arguments) + model = generate_model_yml(generator, **generator_arguments) with open(self.model_dir / f"{name}.yml", "w") as f: f.write(model) + @property + def scheme_dir(self) -> Path: + return self.folder / "schemes/" + + def create_scheme_dir_if_not_exist(self): + if not self.scheme_dir.exists(): + mkdir(self.scheme_dir) + + @property + def has_schemes(self) -> bool: + return len(self.schemes) != 0 + + @property + def schemes(self): + if not self.scheme_dir.exists(): + return {} + return { + scheme_file.with_suffix("").name: scheme_file + for scheme_file in self.scheme_dir.iterdir() + if scheme_file.suffix in [".yml", ".yaml"] + } + + def load_scheme(self, name: str) -> Scheme: + scheme_path = self.scheme_dir / f"{name}.yml" + if not scheme_path.exists(): + raise ValueError(f"Scheme file for scheme '{name}' does not exist.") + return load_scheme(scheme_path) + + def create_scheme( + self, + model: str, + parameter: str, + name: str | None = None, + nfev: int = None, + nnls: bool = False, + ): + + self.create_scheme_dir_if_not_exist() + if name is None: + n = 1 + name = "scheme-1" + scheme_path = self.scheme_dir / f"{name}.yml" + while scheme_path.exists(): + n += 1 + scheme_path = self.scheme_dir / f"scheme-{n}.yml" + else: + scheme_path = self.scheme_dir / f"{name}.yml" + + models = self.models + if model not in models: + raise ValueError(f"Unknown model '{model}'") + model = str(models[model]) + + parameters = self.parameters + if parameter not in parameters: + raise ValueError(f"Unknown parameter '{parameter}'") + parameter = str(parameters[parameter]) + + data = self.data + datasets = {} + for dataset in load_model(model).dataset: + if dataset not in data: + raise ValueError(f"Data missing for dataset '{dataset}'") + datasets[dataset] = str(data[dataset]) + + scheme = Scheme( + model, + parameter, + datasets, + non_negative_least_squares=nnls, + maximum_number_function_evaluations=nfev, + ) + save_scheme(scheme, scheme_path) + @property def parameters_dir(self) -> Path: return self.folder / "parameters/" @@ -172,7 +249,7 @@ def parameters(self): if not self.parameters_dir.exists(): return {} return { - parameters_file.name: load_parameters(parameters_file) + parameters_file.with_suffix("").name: parameters_file for parameters_file in self.parameters_dir.iterdir() if parameters_file.suffix in [".yml", ".yaml", ".csv"] } diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py deleted file mode 100644 index f4d315037..000000000 --- a/glotaran/project/test/test_project.py +++ /dev/null @@ -1,114 +0,0 @@ -import os -from pathlib import Path - -import pytest - -from glotaran import __version__ as gta_version -from glotaran.project.project import TEMPLATE -from glotaran.project.project import Project -from glotaran.project.test.test_result import dummy_data # noqa F401 - - -@pytest.fixture(scope="module") -def project_folder(tmpdir_factory): - return str(tmpdir_factory.mktemp("test_project")) - - -@pytest.fixture(scope="module") -def project_file(project_folder): - return Path(project_folder) / "project.gta" - - -@pytest.fixture(scope="module") -def dummy_data_path(tmpdir_factory, dummy_data): # noqa F811 - path = Path(tmpdir_factory.mktemp("test_project")) / "dummydata.nc" - dummy_data["dataset1"].to_netcdf(path) - return path - - -def test_create(project_folder, project_file): - print(project_folder) # noqa T001 - Project.create("testproject", project_folder) - assert project_file.exists() - assert project_file.read_text(encoding="utf-8") == TEMPLATE.format( - gta_version=gta_version, name="testproject" - ) - - -def test_open(project_folder, project_file): - print(project_folder) # noqa T001 - project_from_folder = Project.open(project_folder) - - project_from_file = Project.open(project_file) - - assert project_from_folder == project_from_file - - project = project_from_file - - assert project.name == "testproject" - assert project.version == gta_version - assert not project.has_models - assert not project.has_data - assert not project.has_parameters - - -def test_generate_model(project_folder, project_file): - project = Project.open(project_file) - - project.generate_model("test_model", "decay-parallel", {"nr_species": 5}) - - model_folder = Path(project_folder) / "models" - assert model_folder.exists() - - model_file = model_folder / "test_model.yml" - assert model_file.exists() - - assert project.has_models - - model = project.load_model("test_model") - assert "megacomplex_parallel_decay" in model.megacomplex - - -@pytest.mark.parametrize("name", ["test_parameter", None]) -@pytest.mark.parametrize("fmt", ["yml", "yaml", "csv"]) -def test_generate_parameters(project_folder, project_file, name, fmt): - project = Project.open(project_file) - - assert project.has_models - - project.generate_parameters("test_model", name=name, fmt=fmt) - - parameter_folder = Path(project_folder) / "parameters" - assert parameter_folder.exists() - - parameter_file_name = f"{'test_model_parameters' if name is None else name}.{fmt}" - parameter_file = parameter_folder / parameter_file_name - assert parameter_file.exists() - - assert project.has_parameters - - model = project.load_model("test_model") - parameters = project.load_parameters("test_model_parameters" if name is None else name) - - for parameter in model.get_parameters(): - assert parameters.has(parameter) - os.remove(parameter_file) - - -@pytest.mark.parametrize("name", ["test_data", None]) -def test_import_data(project_folder, project_file, dummy_data, dummy_data_path, name): # noqa F811 - project = Project.open(project_file) - - project.import_data(dummy_data_path, name=name) - - data_folder = Path(project_folder) / "data" - assert data_folder.exists() - - data_file_name = f"{'dummydata' if name is None else name}.nc" - data_file = data_folder / data_file_name - assert data_file.exists() - - assert project.has_data - - data = project.load_data("dummydata" if name is None else name) - assert data == dummy_data["dataset1"] From f2234e5615a3cdc2c7e9568779828971061b9fae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 11:54:47 +0200 Subject: [PATCH 13/32] Changed dataset fixture in project test --- glotaran/project/test/test_project.py | 169 ++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 glotaran/project/test/test_project.py diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py new file mode 100644 index 000000000..b71c78f29 --- /dev/null +++ b/glotaran/project/test/test_project.py @@ -0,0 +1,169 @@ +import os +from pathlib import Path + +import pytest + +from glotaran import __version__ as gta_version +from glotaran.examples.sequential import dataset as example_dataset +from glotaran.examples.sequential import model_yml +from glotaran.examples.sequential import parameter as example_parameter +from glotaran.project.project import TEMPLATE +from glotaran.project.project import Project + + +@pytest.fixture(scope="module") +def project_folder(tmpdir_factory): + return str(tmpdir_factory.mktemp("test_project")) + + +@pytest.fixture(scope="module") +def project_file(project_folder): + return Path(project_folder) / "project.gta" + + +@pytest.fixture(scope="module") +def test_data(tmpdir_factory): + path = Path(tmpdir_factory.mktemp("test_project")) / "dataset_1.nc" + example_dataset.to_netcdf(path) + return path + + +def test_create(project_folder, project_file): + print(project_folder) # noqa T001 + Project.create("testproject", project_folder) + assert project_file.exists() + assert project_file.read_text(encoding="utf-8") == TEMPLATE.format( + gta_version=gta_version, name="testproject" + ) + + +def test_open(project_folder, project_file): + print(project_folder) # noqa T001 + project_from_folder = Project.open(project_folder) + + project_from_file = Project.open(project_file) + + assert project_from_folder == project_from_file + + project = project_from_file + + assert project.name == "testproject" + assert project.version == gta_version + assert not project.has_models + assert not project.has_data + assert not project.has_parameters + + +def test_generate_model(project_folder, project_file): + project = Project.open(project_file) + + project.generate_model("test_model", "decay-parallel", {"nr_species": 5}) + + model_folder = Path(project_folder) / "models" + assert model_folder.exists() + + model_file = model_folder / "test_model.yml" + assert model_file.exists() + + assert project.has_models + + model = project.load_model("test_model") + assert "megacomplex_parallel_decay" in model.megacomplex + + +@pytest.mark.parametrize("name", ["test_parameter", None]) +@pytest.mark.parametrize("fmt", ["yml", "yaml", "csv"]) +def test_generate_parameters(project_folder, project_file, name, fmt): + project = Project.open(project_file) + + assert project.has_models + + project.generate_parameters("test_model", name=name, fmt=fmt) + + parameter_folder = Path(project_folder) / "parameters" + assert parameter_folder.exists() + + parameter_file_name = f"{'test_model_parameters' if name is None else name}.{fmt}" + parameter_file = parameter_folder / parameter_file_name + assert parameter_file.exists() + + assert project.has_parameters + + model = project.load_model("test_model") + parameters = project.load_parameters("test_model_parameters" if name is None else name) + + for parameter in model.get_parameters(): + assert parameters.has(parameter) + os.remove(parameter_file) + + +@pytest.mark.parametrize("name", ["test_data", None]) +def test_import_data(project_folder, project_file, test_data, name): + project = Project.open(project_file) + + project.import_data(test_data, name=name) + + data_folder = Path(project_folder) / "data" + assert data_folder.exists() + + data_file_name = f"{'dataset_1' if name is None else name}.nc" + data_file = data_folder / data_file_name + assert data_file.exists() + + assert project.has_data + + data = project.load_data("dataset_1" if name is None else name) + assert data == example_dataset + + +@pytest.mark.parametrize("name", ["test_scheme", None]) +def test_create_scheme(project_folder, project_file, name): + project = Project.open(project_file) + + project.generate_parameters("test_model", name="test_parameters") + project.create_scheme( + model="test_model", parameter="test_parameters", name=name, nfev=1, nnls=True + ) + + scheme_folder = Path(project_folder) / "schemes" + assert scheme_folder.exists() + + scheme_file_name = name or "scheme-1" + scheme_file_name += ".yml" + scheme_file = scheme_folder / scheme_file_name + assert scheme_file.exists() + + assert project.has_schemes + + scheme = project.load_scheme(name or "scheme-1") + assert "dataset_1" in scheme.data + assert "dataset_1" in scheme.model.dataset + assert scheme.non_negative_least_squares + assert scheme.maximum_number_function_evaluations == 1 + + +def test_run_optimization(project_folder, project_file): + project = Project.open(project_file) + + model_file = Path(project_folder) / "models" / "sequential.yml" + with open(model_file, "w") as f: + f.write(model_yml) + + project.create_parameters_dir_if_not_exist() + parameter_folder = Path(project_folder) / "parameters" + assert parameter_folder.exists() + parameters_file = parameter_folder / "sequential.csv" + example_parameter.to_csv(parameters_file) + + data_folder = Path(project_folder) / "data" + assert data_folder.exists() + data_file = data_folder / "dataset_1.nc" + os.remove(data_file) + example_dataset.to_netcdf(data_file) + + project.create_scheme(model="sequential", parameter="sequential", name="sequential", nfev=1) + + assert project.has_models + assert project.has_parameters + assert project.has_data + assert project.has_schemes From e9f1ae4d8befdb28cf2774381f510f74ae108667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 11:55:37 +0200 Subject: [PATCH 14/32] Fix scheme test --- glotaran/project/test/test_scheme.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/glotaran/project/test/test_scheme.py b/glotaran/project/test/test_scheme.py index 121fb999c..2cc826039 100644 --- a/glotaran/project/test/test_scheme.py +++ b/glotaran/project/test/test_scheme.py @@ -35,8 +35,8 @@ def mock_scheme(tmpdir): scheme = f""" model: {model_path} parameters: {parameter_path} - non-negative-least-squares: True - maximum-number-function-evaluations: 42 + non_negative_least_squares: True + maximum_number_function_evaluations: 42 data: dataset1: {dataset_path} From cef2221ce1f266fc6d5e993dc370dc5f3e545b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 12:03:59 +0200 Subject: [PATCH 15/32] Added basic project run --- glotaran/examples/sequential.py | 2 +- glotaran/project/project.py | 11 ++++++++--- glotaran/project/test/test_project.py | 2 ++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/glotaran/examples/sequential.py b/glotaran/examples/sequential.py index 6cb4fdd03..0cba88b3c 100644 --- a/glotaran/examples/sequential.py +++ b/glotaran/examples/sequential.py @@ -109,7 +109,7 @@ - ["0", 0] - {"vary": False, "non-negative": False} -kinetic: +decay: - [species_1, 0.5] - [species_2, 0.3] - [species_3, 0.1] diff --git a/glotaran/project/project.py b/glotaran/project/project.py index fb3354047..e2dc2d615 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -12,6 +12,7 @@ from yaml import load from glotaran import __version__ as gta_version +from glotaran.analysis.optimize import optimize from glotaran.io import load_dataset from glotaran.io import load_model from glotaran.io import load_parameters @@ -330,6 +331,10 @@ def generate_parameters( parameter_group = ParameterGroup.from_dict(parameters) parameter_group.to_csv(parameter_file) - def run(self): - if not self.models: - raise ValueError(f"No models defined for project {self.name}") + def run(self, scheme_name: str): + schemes = self.schemes + if scheme_name not in schemes: + raise ValueError(f"Unknown scheme {scheme_name}.") + scheme = self.load_scheme(scheme_name) + + optimize(scheme) diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py index b71c78f29..03d9975c1 100644 --- a/glotaran/project/test/test_project.py +++ b/glotaran/project/test/test_project.py @@ -167,3 +167,5 @@ def test_run_optimization(project_folder, project_file): assert project.has_parameters assert project.has_data assert project.has_schemes + + project.run("sequential") From eea1a74540be2c3ea178591ecdc453afe563cfdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 12:35:55 +0200 Subject: [PATCH 16/32] Made save_result only serializ the Result object --- glotaran/builtin/io/folder/__init__.py | 1 - glotaran/builtin/io/folder/folder_plugin.py | 74 ------------------- .../io/folder/test/test_folder_plugin.py | 54 -------------- .../builtin/io/yml/test/test_save_result.py | 15 +--- glotaran/builtin/io/yml/yml.py | 72 ++++++++---------- glotaran/io/interface.py | 12 +-- .../plugin_system/project_io_registration.py | 20 +++-- .../test/test_project_io_registration.py | 4 +- glotaran/project/result.py | 49 ++++++++++++ glotaran/project/test/test_result.py | 12 +-- 10 files changed, 102 insertions(+), 211 deletions(-) delete mode 100644 glotaran/builtin/io/folder/__init__.py delete mode 100644 glotaran/builtin/io/folder/folder_plugin.py delete mode 100644 glotaran/builtin/io/folder/test/test_folder_plugin.py diff --git a/glotaran/builtin/io/folder/__init__.py b/glotaran/builtin/io/folder/__init__.py deleted file mode 100644 index 41b2c688f..000000000 --- a/glotaran/builtin/io/folder/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Plugin to dump pyglotaran object as files in a folder.""" diff --git a/glotaran/builtin/io/folder/folder_plugin.py b/glotaran/builtin/io/folder/folder_plugin.py deleted file mode 100644 index 743056851..000000000 --- a/glotaran/builtin/io/folder/folder_plugin.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Implementation of the folder Io plugin. - -The current implementation is an exact copy of how ``Result.save(path)`` -worked in glotaran 0.3.x and meant as an compatibility function. -""" - -from __future__ import annotations - -import os -from typing import TYPE_CHECKING - -from glotaran.io.interface import ProjectIoInterface -from glotaran.plugin_system.project_io_registration import register_project_io - -if TYPE_CHECKING: - from glotaran.project import Result - - -@register_project_io(["folder", "legacy"]) -class FolderProjectIo(ProjectIoInterface): - """Project Io plugin to save result data to a folder. - - There won't be a serialization of the Result object, but simply - a markdown summary output and the important data saved to files. - """ - - def save_result(self, result: Result, result_path: str) -> list[str]: - """Save the result to a given folder. - - Returns a list with paths of all saved items. - The following files are saved: - * `result.md`: The result with the model formatted as markdown text. - * `optimized_parameters.csv`: The optimized parameter as csv file. - * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. - - Parameters - ---------- - result : Result - Result instance to be saved. - result_path : str - The path to the folder in which to save the result. - - Returns - ------- - list[str] - List of file paths which were created. - - Raises - ------ - ValueError - If ``result_path`` is a file. - """ - if not os.path.exists(result_path): - os.makedirs(result_path) - if not os.path.isdir(result_path): - raise ValueError(f"The path '{result_path}' is not a directory.") - - paths = [] - - md_path = os.path.join(result_path, "result.md") - with open(md_path, "w") as f: - f.write(str(result.markdown())) - paths.append(md_path) - - csv_path = os.path.join(result_path, "optimized_parameters.csv") - result.optimized_parameters.to_csv(csv_path) - paths.append(csv_path) - - for label, data in result.data.items(): - nc_path = os.path.join(result_path, f"{label}.nc") - data.to_netcdf(nc_path, engine="netcdf4") - paths.append(nc_path) - - return paths diff --git a/glotaran/builtin/io/folder/test/test_folder_plugin.py b/glotaran/builtin/io/folder/test/test_folder_plugin.py deleted file mode 100644 index 282178f2a..000000000 --- a/glotaran/builtin/io/folder/test/test_folder_plugin.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import annotations - -from pathlib import Path -from typing import TYPE_CHECKING - -import pytest - -from glotaran.io import save_result -from glotaran.project.test.test_result import dummy_result # noqa: F401 - -if TYPE_CHECKING: - from typing import Literal - - from py.path import local as TmpDir - - from glotaran.project.result import Result - - -@pytest.mark.parametrize("format_name", ("folder", "legacy")) -def test_save_result_folder( - tmpdir: TmpDir, - dummy_result: Result, # noqa: F811 - format_name: Literal["folder", "legacy"], -): - """Check all files exist.""" - - result_dir = Path(tmpdir / "testresult") - save_result(result_path=str(result_dir), format_name=format_name, result=dummy_result) - - assert (result_dir / "result.md").exists() - assert (result_dir / "optimized_parameters.csv").exists() - assert (result_dir / "dataset1.nc").exists() - assert (result_dir / "dataset2.nc").exists() - assert (result_dir / "dataset3.nc").exists() - - -@pytest.mark.parametrize("format_name", ("folder", "legacy")) -def test_save_result_folder_error_path_is_file( - tmpdir: TmpDir, - dummy_result: Result, # noqa: F811 - format_name: Literal["folder", "legacy"], -): - """Raise error if result_path is a file without extension and overwrite is true.""" - - result_dir = Path(tmpdir / "testresult") - result_dir.touch() - - with pytest.raises(ValueError, match="The path '.+?' is not a directory."): - save_result( - result_path=str(result_dir), - format_name=format_name, - result=dummy_result, - allow_overwrite=True, - ) diff --git a/glotaran/builtin/io/yml/test/test_save_result.py b/glotaran/builtin/io/yml/test/test_save_result.py index 1d42ffed2..7fc7e27b0 100644 --- a/glotaran/builtin/io/yml/test/test_save_result.py +++ b/glotaran/builtin/io/yml/test/test_save_result.py @@ -18,14 +18,7 @@ def test_save_result_yml( ): """Check all files exist.""" - result_dir = Path(tmpdir / "testresult") - save_result(result_path=result_dir, format_name="yml", result=dummy_result) - - assert (result_dir / "result.md").exists() - assert (result_dir / "scheme.yml").exists() - assert (result_dir / "result.yml").exists() - assert (result_dir / "initial_parameters.csv").exists() - assert (result_dir / "optimized_parameters.csv").exists() - assert (result_dir / "dataset1.nc").exists() - assert (result_dir / "dataset2.nc").exists() - assert (result_dir / "dataset3.nc").exists() + result_path = Path(tmpdir / "testresult.yml") + save_result(file_name=result_path, format_name="yml", result=dummy_result) + + assert result_path.exists() diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index 97251326b..e7f8a83fe 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -1,7 +1,6 @@ from __future__ import annotations import dataclasses -import os import pathlib from typing import TYPE_CHECKING @@ -13,8 +12,6 @@ from glotaran.io import load_model from glotaran.io import load_parameters from glotaran.io import register_project_io -from glotaran.io import save_dataset -from glotaran.io import save_parameters from glotaran.model import Model from glotaran.parameter import ParameterGroup from glotaran.project import SavingOptions @@ -151,56 +148,45 @@ def load_scheme(self, file_name: str) -> Scheme: def save_scheme(self, scheme: Scheme, file_name: str): _write_dict(file_name, dataclasses.asdict(scheme)) - def save_result(self, result: Result, result_path: str): + def save_result(self, result: Result, file_name: str): options = result.scheme.saving - if os.path.exists(result_path): - raise FileExistsError(f"The path '{result_path}' is already existing.") + result_file_path = pathlib.Path(file_name) + if result_file_path.exists(): + raise FileExistsError(f"The path '{file_name}' is already existing.") - os.makedirs(result_path) - - if options.report: - md_path = os.path.join(result_path, "result.md") - with open(md_path, "w") as f: - f.write(str(result.markdown())) - - scheme_path = os.path.join(result_path, "scheme.yml") - result_scheme = dataclasses.replace(result.scheme) - result_scheme.model = result_scheme.model.markdown() - result = dataclasses.replace(result) - result.scheme = scheme_path + scheme_path = result_file_path.with_name("scheme.yml") parameters_format = options.parameter_format - - initial_parameters_path = os.path.join( - result_path, f"initial_parameters.{parameters_format}" + initial_parameters_path = result_file_path.with_name( + f"initial_parameters.{parameters_format}" ) - save_parameters(result.initial_parameters, initial_parameters_path, parameters_format) - result.initial_parameters = initial_parameters_path - result_scheme.parameters = initial_parameters_path - - optimized_parameters_path = os.path.join( - result_path, f"optimized_parameters.{parameters_format}" + optimized_parameters_path = result_file_path.with_name( + f"optimized_parameters.{parameters_format}" ) - save_parameters(result.optimized_parameters, optimized_parameters_path, parameters_format) - result.optimized_parameters = optimized_parameters_path dataset_format = options.data_format - for label, dataset in result.data.items(): - dataset_path = os.path.join(result_path, f"{label}.{dataset_format}") - save_dataset(dataset, dataset_path, dataset_format, saving_options=options) - result.data[label] = dataset_path - result_scheme.data[label] = dataset_path - - result_file_path = os.path.join(result_path, "result.yml") - result_dict = dataclasses.asdict(result) - if result_dict["jacobian"] is not None: - result_dict["jacobian"] = result_dict["jacobian"].tolist() - result_dict["covariance_matrix"] = result_dict["covariance_matrix"].tolist() - _write_dict(result_file_path, result_dict) - result_scheme.result_path = result_file_path + data_paths = { + label: result_file_path.with_name(f"{label}.{dataset_format}") for label in result.data + } - self.save_scheme(scheme=result_scheme, file_name=scheme_path) + jacobian = result.jacobian.tolist() if result.jacobian is not None else None + covariance_matrix = ( + result.covariance_matrix.tolist() if result.covariance_matrix is not None else None + ) + + result_dict = dataclasses.asdict( + dataclasses.replace( + result, + scheme=scheme_path, + initial_parameters=initial_parameters_path, + optimized_parameters=optimized_parameters_path, + data=data_paths, + jacobian=jacobian, + covariance_matrix=covariance_matrix, + ) + ) + _write_dict(result_file_path, result_dict) def _write_dict(file_name: str, d: dict): diff --git a/glotaran/io/interface.py b/glotaran/io/interface.py index 1ea85627e..9035e9f15 100644 --- a/glotaran/io/interface.py +++ b/glotaran/io/interface.py @@ -199,13 +199,13 @@ def save_scheme(self, scheme: Scheme, file_name: str): """ raise NotImplementedError(f"Cannot save scheme with format {self.format!r}") - def load_result(self, result_path: str) -> Result: + def load_result(self, file_name: str) -> Result: """Create a Result instance from the specs defined in a file (**NOT IMPLEMENTED**). Parameters ---------- - result_path : str - Path containing the result data. + file_name : str + File containing the result specs. Returns ------- @@ -218,15 +218,15 @@ def load_result(self, result_path: str) -> Result: """ raise NotImplementedError(f"Cannot read result with format {self.format!r}") - def save_result(self, result: Result, result_path: str): + def save_result(self, result: Result, file_name: str): """Save a Result instance to a spec file (**NOT IMPLEMENTED**). Parameters ---------- result : Result Result instance to save to specs file. - result_path : str - Path to write the result data to. + file_name : str + File to write the result specs to. .. # noqa: DAR101 diff --git a/glotaran/plugin_system/project_io_registration.py b/glotaran/plugin_system/project_io_registration.py index b2cb10110..30f194a0b 100644 --- a/glotaran/plugin_system/project_io_registration.py +++ b/glotaran/plugin_system/project_io_registration.py @@ -364,14 +364,12 @@ def save_scheme( @not_implemented_to_value_error -def load_result( - result_path: str | PathLike[str], format_name: str = None, **kwargs: Any -) -> Result: +def load_result(file_name: str | PathLike[str], format_name: str = None, **kwargs: Any) -> Result: """Create a :class:`Result` instance from the specs defined in a file. Parameters ---------- - result_path : str | PathLike[str] + file_name : str | PathLike[str] Path containing the result data. format_name : str Format the result is in, if not provided and it is a file @@ -385,14 +383,14 @@ def load_result( Result :class:`Result` instance created from the saved format. """ - io = get_project_io(format_name or inferr_file_format(result_path)) - return io.load_result(str(result_path), **kwargs) # type: ignore[call-arg] + io = get_project_io(format_name or inferr_file_format(file_name)) + return io.load_result(str(file_name), **kwargs) # type: ignore[call-arg] @not_implemented_to_value_error def save_result( result: Result, - result_path: str | PathLike[str], + file_name: str | PathLike[str], format_name: str = None, *, allow_overwrite: bool = False, @@ -404,7 +402,7 @@ def save_result( ---------- result : Result :class:`Result` instance to write. - result_path : str | PathLike[str] + file_name : str | PathLike[str] Path to write the result data to. format_name : str Format the result should be saved in, if not provided and it is a file @@ -415,12 +413,12 @@ def save_result( Additional keyword arguments passes to the ``save_result`` implementation of the project io plugin. """ - protect_from_overwrite(result_path, allow_overwrite=allow_overwrite) + protect_from_overwrite(file_name, allow_overwrite=allow_overwrite) io = get_project_io( - format_name or inferr_file_format(result_path, needs_to_exist=False, allow_folder=True) + format_name or inferr_file_format(file_name, needs_to_exist=False, allow_folder=True) ) io.save_result( # type: ignore[call-arg] - result_path=str(result_path), + file_name=str(file_name), result=result, **kwargs, ) diff --git a/glotaran/plugin_system/test/test_project_io_registration.py b/glotaran/plugin_system/test/test_project_io_registration.py index 30796b52a..03de607ef 100644 --- a/glotaran/plugin_system/test/test_project_io_registration.py +++ b/glotaran/plugin_system/test/test_project_io_registration.py @@ -102,8 +102,8 @@ def save_scheme( # type:ignore[override] } ) - def load_result(self, result_path: str | PathLike[str], **kwargs: Any) -> Result: - return {"file_name": result_path, **kwargs} # type:ignore[return-value] + def load_result(self, file_name: str | PathLike[str], **kwargs: Any) -> Result: + return {"file_name": file_name, **kwargs} # type:ignore[return-value] def save_result( # type:ignore[override] self, diff --git a/glotaran/project/result.py b/glotaran/project/result.py index c3ee783e1..2ba3e4497 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -235,3 +235,52 @@ def get_dataset(self, dataset_label: str) -> xr.Dataset: return self.data[dataset_label] except KeyError: raise ValueError(f"Unknown dataset '{dataset_label}'") + + # def save_result(self, result: Result, result_path: str) -> list[str]: + # """Save the result to a given folder. + # + # Returns a list with paths of all saved items. + # The following files are saved: + # * `result.md`: The result with the model formatted as markdown text. + # * `optimized_parameters.csv`: The optimized parameter as csv file. + # * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. + # + # Parameters + # ---------- + # result : Result + # Result instance to be saved. + # result_path : str + # The path to the folder in which to save the result. + # + # Returns + # ------- + # list[str] + # List of file paths which were created. + # + # Raises + # ------ + # ValueError + # If ``result_path`` is a file. + # """ + # if not os.path.exists(result_path): + # os.makedirs(result_path) + # if not os.path.isdir(result_path): + # raise ValueError(f"The path '{result_path}' is not a directory.") + # + # paths = [] + # + # md_path = os.path.join(result_path, "result.md") + # with open(md_path, "w") as f: + # f.write(str(result.markdown())) + # paths.append(md_path) + # + # csv_path = os.path.join(result_path, "optimized_parameters.csv") + # result.optimized_parameters.to_csv(csv_path) + # paths.append(csv_path) + # + # for label, data in result.data.items(): + # nc_path = os.path.join(result_path, f"{label}.nc") + # data.to_netcdf(nc_path, engine="netcdf4") + # paths.append(nc_path) + # + # return paths diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index a91806c2e..25cb0ae5b 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -11,8 +11,8 @@ @pytest.fixture(scope="session") -def dummy_data(): - """Dummy data for testing.""" +def dummy_result(): + """Dummy result for testing.""" wanted_parameters = suite.wanted_parameters data = {} @@ -26,17 +26,11 @@ def dummy_data(): wanted_parameters, {"global": global_axis, "model": model_axis}, ) - yield data - - -@pytest.fixture(scope="session") -def dummy_result(dummy_data): - """Dummy result for testing.""" scheme = Scheme( model=suite.model, parameters=suite.initial_parameters, - data=dummy_data, + data=data, maximum_number_function_evaluations=9, ) From e892fa6ee31df335caf406d94210bd4b88dd0c50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 13:28:14 +0200 Subject: [PATCH 17/32] Rewrote Result.save --- glotaran/project/result.py | 145 +++++++++++---------------- glotaran/project/scheme.py | 2 +- glotaran/project/test/test_result.py | 4 + setup.cfg | 1 - 4 files changed, 66 insertions(+), 86 deletions(-) diff --git a/glotaran/project/result.py b/glotaran/project/result.py index 2ba3e4497..7bfd5d88e 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -3,6 +3,7 @@ from dataclasses import dataclass from dataclasses import replace +from pathlib import Path import numpy as np import xarray as xr @@ -10,10 +11,13 @@ from tabulate import tabulate from glotaran.deprecation import deprecate +from glotaran.io import save_dataset +from glotaran.io import save_model from glotaran.io import save_result from glotaran.model import Model from glotaran.parameter import ParameterGroup from glotaran.project.scheme import Scheme +from glotaran.project.scheme import default_data_filters from glotaran.utils.ipython import MarkdownStr @@ -175,42 +179,6 @@ def _repr_markdown_(self) -> str: def __str__(self): return str(self.markdown(with_model=False)) - @deprecate( - deprecated_qual_name_usage="glotaran.project.result.Result.save(result_path)", - new_qual_name_usage=( - "glotaran.io.save_result(" - "result=result, result_path=result_path, " - 'format_name="legacy", allow_overwrite=True' - ")" - ), - to_be_removed_in_version="0.6.0", - importable_indices=(2, 1), - ) - def save(self, path: str) -> list[str]: - """Saves the result to given folder. - - Warning - ------- - Deprecated use ``save_result(result_path=result_path, result=result, - format_name="legacy", allow_overwrite=True)`` instead. - - - Returns a list with paths of all saved items. - The following files are saved: - - * `result.md`: The result with the model formatted as markdown text. - - * `optimized_parameters.csv`: The optimized parameter as csv file. - - * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. - - Parameters - ---------- - path : - The path to the folder in which to save the result. - """ - save_result(result_path=path, result=self, format_name="legacy", allow_overwrite=True) - @deprecate( deprecated_qual_name_usage="glotaran.project.result.Result.get_dataset(dataset_label)", new_qual_name_usage=("glotaran.project.result.Result.data[dataset_label]"), @@ -236,51 +204,60 @@ def get_dataset(self, dataset_label: str) -> xr.Dataset: except KeyError: raise ValueError(f"Unknown dataset '{dataset_label}'") - # def save_result(self, result: Result, result_path: str) -> list[str]: - # """Save the result to a given folder. - # - # Returns a list with paths of all saved items. - # The following files are saved: - # * `result.md`: The result with the model formatted as markdown text. - # * `optimized_parameters.csv`: The optimized parameter as csv file. - # * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. - # - # Parameters - # ---------- - # result : Result - # Result instance to be saved. - # result_path : str - # The path to the folder in which to save the result. - # - # Returns - # ------- - # list[str] - # List of file paths which were created. - # - # Raises - # ------ - # ValueError - # If ``result_path`` is a file. - # """ - # if not os.path.exists(result_path): - # os.makedirs(result_path) - # if not os.path.isdir(result_path): - # raise ValueError(f"The path '{result_path}' is not a directory.") - # - # paths = [] - # - # md_path = os.path.join(result_path, "result.md") - # with open(md_path, "w") as f: - # f.write(str(result.markdown())) - # paths.append(md_path) - # - # csv_path = os.path.join(result_path, "optimized_parameters.csv") - # result.optimized_parameters.to_csv(csv_path) - # paths.append(csv_path) - # - # for label, data in result.data.items(): - # nc_path = os.path.join(result_path, f"{label}.nc") - # data.to_netcdf(nc_path, engine="netcdf4") - # paths.append(nc_path) - # - # return paths + def save(self, result_path: str | Path, overwrite: bool = False): + """Save the result to a given folder. + + Returns a list with paths of all saved items. + The following files are saved: + * `result.md`: The result with the model formatted as markdown text. + * `optimized_parameters.csv`: The optimized parameter as csv file. + * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. + + Parameters + ---------- + result : Result + Result instance to be saved. + result_path : str | Path + The path to the folder in which to save the result. + + Raises + ------ + ValueError + If ``result_path`` is a file. + FileExistsError + If ``result_path`` exists and ``overwrite`` is ``False``. + """ + result_path = Path(result_path) if isinstance(result_path, str) else result_path + if result_path.exists() and not overwrite: + raise FileExistsError(f"The path '{result_path}' exists.") + else: + result_path.mkdir() + if not result_path.is_dir(): + raise ValueError(f"The path '{result_path}' is not a directory.") + + result_file_path = result_path / "gloataran_result.yml" + save_result(self, result_file_path) + + model_path = result_path / "model.yml" + save_model(self.scheme.model, model_path) + + initial_parameters_path = result_path / "initial_parameters.csv" + self.initial_parameters.to_csv(initial_parameters_path) + + optimized_parameters_path = result_path / "optimized_parameters.csv" + self.optimized_parameters.to_csv(optimized_parameters_path) + + save_level = self.scheme.saving.level + data_filter = self.scheme.saving.data_filter or default_data_filters[save_level] + datasets = {} + for label, dataset in self.data.items(): + dataset_path = result_path / f"{label}.nc" + datasets[label] = dataset_path + if data_filter is not None: + dataset = dataset[data_filter] + save_dataset(dataset, dataset_path) + + if self.scheme.saving.report: + report_path = result_path / "result.md" + with open(report_path, "w") as f: + f.write(str(self.markdown())) diff --git a/glotaran/project/scheme.py b/glotaran/project/scheme.py index 3376a5dc5..17e0da2b0 100644 --- a/glotaran/project/scheme.py +++ b/glotaran/project/scheme.py @@ -24,7 +24,7 @@ class SavingOptions: level: Literal["minimal", "full"] = "full" data_filter: list[str] | None = None - data_format: str = "nc" + data_format: Literal["nc"] = "nc" parameter_format: str = "csv" report: bool = True diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index 25cb0ae5b..a92e4e9c4 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -57,3 +57,7 @@ def test_result_ipython_rendering(dummy_result: Result): assert "text/markdown" in rendered_markdown_return assert rendered_markdown_return["text/markdown"].startswith("| Optimization Result") + + +def test_save_result(tmp_path, dummy_result: Result): + dummy_result.save(tmp_path / "test_result") diff --git a/setup.cfg b/setup.cfg index 022bd304e..5f5d1cebc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -68,7 +68,6 @@ glotaran.plugins.megacomplexes = glotaran.plugins.project_io = yml = glotaran.builtin.io.yml.yml csv = glotaran.builtin.io.csv.csv - folder = glotaran.builtin.io.folder.folder_plugin [aliases] test = pytest From 4781ed421c5a7ad139ce340d7a70164498304861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 13:30:11 +0200 Subject: [PATCH 18/32] Added project modul to linters --- .pre-commit-config.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2478c3ee0..d8a3a1172 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -77,8 +77,8 @@ repos: rev: 6.1.1 hooks: - id: pydocstyle - files: "^glotaran/(plugin_system|utils|deprecation|testing)" - exclude: "docs|tests?/" + files: "^glotaran/(plugin_system|utils|deprecation|project)" + exclude: "docs|tests?" # this is needed due to the following issue: # https://github.com/PyCQA/pydocstyle/issues/368 args: [--ignore-decorators=wrap_func_as_method] @@ -87,14 +87,14 @@ repos: rev: v1.8.0 hooks: - id: darglint - files: "^glotaran/(plugin_system|utils|deprecation|testing)" - exclude: "docs|tests?/" + files: "^glotaran/(plugin_system|utils|deprecation|project)" + exclude: "docs|tests?" - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.910 hooks: - id: mypy - files: "^glotaran/(plugin_system|utils|deprecation|testing)" + files: "^glotaran/(plugin_system|utils|deprecation|project)" exclude: "docs" additional_dependencies: [types-all] From 538af32919fbab6127b1fc18d856e4bacab04d3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 14:32:38 +0200 Subject: [PATCH 19/32] Added Model.as_dict --- glotaran/model/dataset_model.py | 2 +- glotaran/model/item.py | 15 ++++++++ glotaran/model/model.py | 22 ++++++++++- glotaran/model/property.py | 11 ++++++ glotaran/model/test/test_model.py | 63 ++++++++++++++++++++++++++++--- 5 files changed, 106 insertions(+), 7 deletions(-) diff --git a/glotaran/model/dataset_model.py b/glotaran/model/dataset_model.py index 443de05ae..7b4087b42 100644 --- a/glotaran/model/dataset_model.py +++ b/glotaran/model/dataset_model.py @@ -152,7 +152,7 @@ def overwrite_index_dependent(self, index_dependent: bool): def has_global_model(self) -> bool: """Indicates if the dataset model can model the global dimension.""" - return len(self.global_megacomplex) != 0 + return self.global_megacomplex is not None and len(self.global_megacomplex) != 0 def set_coordinates(self, coords: dict[str, np.ndarray]): """Sets the dataset model's coordinates.""" diff --git a/glotaran/model/item.py b/glotaran/model/item.py index ce0abb059..5a6ecef62 100644 --- a/glotaran/model/item.py +++ b/glotaran/model/item.py @@ -115,6 +115,9 @@ def decorator(cls): validate = _create_validation_func(cls) setattr(cls, "validate", validate) + as_dict = _create_as_dict_func(cls) + setattr(cls, "as_dict", as_dict) + get_state = _create_get_state_func(cls) setattr(cls, "__getstate__", get_state) @@ -321,6 +324,18 @@ def get_parameters(self) -> list[str]: return get_parameters +def _create_as_dict_func(cls): + @wrap_func_as_method(cls) + def as_dict(self) -> dict: + return { + name: getattr(self.__class__, name).as_dict_value(getattr(self, name)) + for name in self._glotaran_properties + if name != "label" and getattr(self, name) is not None + } + + return as_dict + + def _create_get_state_func(cls): @wrap_func_as_method(cls) def get_state(self) -> cls: diff --git a/glotaran/model/model.py b/glotaran/model/model.py index a382419e2..0280a7bc2 100644 --- a/glotaran/model/model.py +++ b/glotaran/model/model.py @@ -31,7 +31,7 @@ default_dataset_properties = { "megacomplex": List[str], "megacomplex_scale": {"type": List[Parameter], "allow_none": True}, - "global_megacomplex": {"type": List[str], "default": []}, + "global_megacomplex": {"type": List[str], "allow_none": True}, "global_megacomplex_scale": {"type": List[Parameter], "default": None, "allow_none": True}, "scale": {"type": Parameter, "default": None, "allow_none": True}, } @@ -87,6 +87,11 @@ def from_dict( megacomplex_types[default_megacomplex_type] = get_megacomplex(default_megacomplex_type) model_dict.pop("default-megacomplex", None) + model_dict = copy.deepcopy(model_dict) + if "default_megacomplex" in model_dict: + default_megacomplex_type = model_dict["default_megacomplex"] + del model_dict["default_megacomplex"] + model = cls( megacomplex_types=megacomplex_types, default_megacomplex_type=default_megacomplex_type ) @@ -214,6 +219,21 @@ def _add_dataset_type(self): dataset_model_type = create_dataset_model_type(self._dataset_properties) self._add_model_item("dataset", dataset_model_type) + def as_dict(self) -> dict: + model_dict = {} + model_dict["default_megacomplex"] = self.default_megacomplex + + for name in self._model_items: + items = getattr(self, name) + if len(items) == 0: + continue + if isinstance(items, list): + model_dict[name] = [item.as_dict() for item in items] + else: + model_dict[name] = {label: item.as_dict() for label, item in items.items()} + + return model_dict + @property def default_megacomplex(self) -> str: """The default megacomplex used by this model.""" diff --git a/glotaran/model/property.py b/glotaran/model/property.py index a49f700c8..98b0f25c3 100644 --- a/glotaran/model/property.py +++ b/glotaran/model/property.py @@ -49,6 +49,17 @@ def allow_none(self) -> bool: def property_type(self) -> type: return self._type + def as_dict_value(self, value): + if value is None: + return None + elif self._is_parameter_value: + return value.full_label + elif self._is_parameter_list: + return [v.full_label for v in value] + elif self._is_parameter_dict: + return {k: v.full_label for k, v in value.items()} + return value + def validate(self, value, model, parameters=None) -> list[str]: if value is None and self.allow_none: diff --git a/glotaran/model/test/test_model.py b/glotaran/model/test/test_model.py index b8a446b1d..7a7deca02 100644 --- a/glotaran/model/test/test_model.py +++ b/glotaran/model/test/test_model.py @@ -37,6 +37,18 @@ class MockItem: pass +@model_item( + properties={ + "param": Parameter, + "param_list": List[Parameter], + "param_dict": {"type": Dict[Tuple[str, str], Parameter]}, + "number": int, + }, +) +class MockItemSimple: + pass + + @model_item(has_label=False) class MockItemNoLabel: pass @@ -79,11 +91,16 @@ class MockMegacomplex6(Megacomplex): pass +@megacomplex(dimension="model", model_items={"test_item_simple": MockItemSimple}) +class MockMegacomplex7(Megacomplex): + pass + + @pytest.fixture -def test_model(): - model_dict = { +def test_model_dict(): + model = { "megacomplex": { - "m1": {"test_item1": "t2"}, + "m1": {"test_item1": "t2", "dimension": "model"}, "m2": {"type": "type5", "dimension": "model2"}, }, "weights": [ @@ -127,9 +144,14 @@ def test_model(): }, }, } - model_dict["test_item_dataset"] = model_dict["test_item1"] + model["test_item_dataset"] = model["test_item1"] + return model + + +@pytest.fixture +def test_model(test_model_dict): return Model.from_dict( - model_dict, + test_model_dict, megacomplex_types={ "type1": MockMegacomplex1, "type5": MockMegacomplex5, @@ -354,6 +376,37 @@ def test_fill(test_model: Model, parameter: ParameterGroup): assert t.complex == {} +def test_model_as_dict(): + model_dict = { + "default_megacomplex": "type7", + "megacomplex": { + "m1": {"test_item_simple": "t2", "dimension": "model"}, + }, + "test_item_simple": { + "t1": { + "param": "foo", + "param_list": ["bar", "baz"], + "param_dict": {("s1", "s2"): "baz"}, + "number": 21, + }, + }, + "dataset": { + "dataset1": { + "megacomplex": ["m1"], + "scale": "scale_1", + }, + }, + } + model = Model.from_dict( + model_dict, + megacomplex_types={ + "type7": MockMegacomplex7, + }, + ) + as_model_dict = model.as_dict() + assert as_model_dict == model_dict + + def test_model_markdown_base_heading_level(test_model: Model): """base_heading_level applies to all sections.""" assert test_model.markdown().startswith("# Model") From b99c73d8d175ea3f67493f721023dcd4d0bda3f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 14:53:56 +0200 Subject: [PATCH 20/32] Added save_model yml implementation --- .../builtin/io/yml/test/test_save_model.py | 64 +++++++++++++++++++ glotaran/builtin/io/yml/yml.py | 13 ++++ 2 files changed, 77 insertions(+) create mode 100644 glotaran/builtin/io/yml/test/test_save_model.py diff --git a/glotaran/builtin/io/yml/test/test_save_model.py b/glotaran/builtin/io/yml/test/test_save_model.py new file mode 100644 index 000000000..32148b9c0 --- /dev/null +++ b/glotaran/builtin/io/yml/test/test_save_model.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from glotaran.examples.sequential import model +from glotaran.io import load_model +from glotaran.io import save_model + +if TYPE_CHECKING: + from py.path import local as TmpDir + + +want = """dataset: + dataset_1: + initial_concentration: initial_concentration_dataset_1 + irf: gaussian_irf + megacomplex: + - megacomplex_parallel_decay +default_megacomplex: decay +initial_concentration: + initial_concentration_dataset_1: + compartments: + - species_1 + - species_2 + - species_3 + exclude_from_normalize: [] + parameters: + - initial_concentration.1 + - initial_concentration.0 + - initial_concentration.0 +irf: + gaussian_irf: + backsweep: false + center: irf.center + normalize: true + type: gaussian + width: irf.width +k_matrix: + k_matrix_sequential: + matrix: + species_2: species_1 + species_3: species_3 +megacomplex: + megacomplex_parallel_decay: + dimension: time + k_matrix: + - k_matrix_sequential + type: decay +""" + + +def test_save_model( + tmpdir: TmpDir, +): + """Check all files exist.""" + + model_path = tmpdir / "testmodel.yml" + save_model(file_name=model_path, format_name="yml", model=model) + + assert model_path.exists() + with open(model_path) as f: + got = f.read() + assert got == want + assert load_model(model_path).valid() diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index e7f8a83fe..d627a1c6e 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -148,6 +148,19 @@ def load_scheme(self, file_name: str) -> Scheme: def save_scheme(self, scheme: Scheme, file_name: str): _write_dict(file_name, dataclasses.asdict(scheme)) + def save_model(self, model: Model, file_name: str): + model_dict = model.as_dict() + # We replace tuples with strings + for name, items in model_dict.items(): + if not isinstance(items, (list, dict)): + continue + item_iterator = items if isinstance(items, list) else items.values() + for item in item_iterator: + for prop_name, prop in item.items(): + if isinstance(prop, dict) and any(isinstance(k, tuple) for k in prop): + item[prop_name] = {str(k): v for k, v in prop} + _write_dict(file_name, model_dict) + def save_result(self, result: Result, file_name: str): options = result.scheme.saving From d9f82a0e86d230e3d1c978b376cf5b3f669298b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 15:58:36 +0200 Subject: [PATCH 21/32] Added test to save result --- glotaran/project/result.py | 56 +++++++++++++++++++++----- glotaran/project/scheme.py | 60 +++++++++++++++++++++++----- glotaran/project/test/test_result.py | 41 ++++++++++++++++++- 3 files changed, 133 insertions(+), 24 deletions(-) diff --git a/glotaran/project/result.py b/glotaran/project/result.py index 7bfd5d88e..388d6e2e1 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -23,7 +23,7 @@ @dataclass class Result: - """The result of a global analysis""" + """The result of a global analysis.""" additional_penalty: np.ndarray | None """A vector with the value for each additional penalty, or None""" @@ -88,12 +88,21 @@ class Result: """ def __post_init__(self): + """Overwrite of ``__post_init__``.""" if isinstance(self.jacobian, list): self.jacobian = np.array(self.jacobian) self.covariance_matrix = np.array(self.covariance_matrix) @property def model(self) -> Model: + """Return the model used to fit result. + + Returns + ------- + Model + The model instance. + + """ return self.scheme.model def get_scheme(self) -> Scheme: @@ -116,14 +125,20 @@ def get_scheme(self) -> Scheme: return new_scheme def markdown(self, with_model: bool = True, base_heading_level: int = 1) -> MarkdownStr: - """Formats the model as a markdown text. + """Format the model as a markdown text. Parameters ---------- - with_model : + with_model : bool If `True`, the model will be printed with initial and optimized parameters filled in. - """ + base_heading_level : int + The level of the base heading. + Returns + ------- + MarkdownStr + The scheme as markdown string. + """ general_table_rows = [ ["Number of residual evaluation", self.number_of_function_evaluations], ["Number of variables", self.number_of_variables], @@ -173,10 +188,19 @@ def markdown(self, with_model: bool = True, base_heading_level: int = 1) -> Mark return MarkdownStr(result_table) def _repr_markdown_(self) -> str: - """Special method used by ``ipython`` to render markdown.""" + """Return a markdown representation str. + + Special method used by ``ipython`` to render markdown. + + Returns + ------- + str + The scheme as markdown string. + """ return str(self.markdown(base_heading_level=3)) def __str__(self): + """Overwrite of ``__str__``.""" return str(self.markdown(with_model=False)) @deprecate( @@ -186,7 +210,7 @@ def __str__(self): importable_indices=(2, 2), ) def get_dataset(self, dataset_label: str) -> xr.Dataset: - """Returns the result dataset for the given dataset label. + """Return the result dataset for the given dataset label. Warning ------- @@ -196,15 +220,25 @@ def get_dataset(self, dataset_label: str) -> xr.Dataset: Parameters ---------- - dataset_label : + dataset_label : str The label of the dataset. + + Returns + ------- + xr.Dataset + The dataset. + + Raises + ------ + ValueError + If the dataset_label is not in result datasets. """ try: return self.data[dataset_label] except KeyError: raise ValueError(f"Unknown dataset '{dataset_label}'") - def save(self, result_path: str | Path, overwrite: bool = False): + def save(self, result_path: str | Path, overwrite: bool = False) -> None: """Save the result to a given folder. Returns a list with paths of all saved items. @@ -215,10 +249,10 @@ def save(self, result_path: str | Path, overwrite: bool = False): Parameters ---------- - result : Result - Result instance to be saved. result_path : str | Path The path to the folder in which to save the result. + overwrite : bool + Weather to overwrite an existing folder. Raises ------ @@ -235,7 +269,7 @@ def save(self, result_path: str | Path, overwrite: bool = False): if not result_path.is_dir(): raise ValueError(f"The path '{result_path}' is not a directory.") - result_file_path = result_path / "gloataran_result.yml" + result_file_path = result_path / "glotaran_result.yml" save_result(self, result_file_path) model_path = result_path / "model.yml" diff --git a/glotaran/project/scheme.py b/glotaran/project/scheme.py index 17e0da2b0..acb423ff3 100644 --- a/glotaran/project/scheme.py +++ b/glotaran/project/scheme.py @@ -1,3 +1,4 @@ +"""The package for :class:``Scheme``.""" from __future__ import annotations import warnings @@ -22,15 +23,22 @@ @dataclass class SavingOptions: + """A collection of options for result saving.""" + level: Literal["minimal", "full"] = "full" data_filter: list[str] | None = None data_format: Literal["nc"] = "nc" - parameter_format: str = "csv" + parameter_format: Literal["csv"] = "csv" report: bool = True @dataclass class Scheme: + """A scheme is a collection of a model, parameters and a dataset. + + A scheme also holds options for optimization. + """ + model: Model | str parameters: ParameterGroup | str data: dict[str, xr.DataArray | xr.Dataset | str] @@ -51,20 +59,36 @@ class Scheme: result_path: str | None = None def problem_list(self) -> list[str]: - """Returns a list with all problems in the model and missing parameters.""" + """Return a list with all problems in the model and missing parameters. + + Returns + ------- + list[str] + A list of all problems found in the scheme's model. + + """ return self.model.problem_list(self.parameters) def validate(self) -> str: - """Returns a string listing all problems in the model and missing parameters.""" + """Return a string listing all problems in the model and missing parameters. + + Returns + ------- + str + A user-friendly string containing all the problems of a model if any. + Defaults to 'Your model is valid.' if no problems are found. + + """ return self.model.validate(self.parameters) - def valid(self, parameters: ParameterGroup = None) -> bool: - """Returns `True` if there are no problems with the model or the parameters, - else `False`.""" - return self.model.valid(parameters) + def markdown(self) -> MarkdownStr: + """Format the :class:`Scheme` as markdown string. - def markdown(self): - """Formats the :class:`Scheme` as markdown string.""" + Returns + ------- + MarkdownStr + The scheme as markdown string. + """ markdown_str = self.model.markdown(parameters=self.parameters) markdown_str += "\n\n" @@ -77,7 +101,13 @@ def markdown(self): return MarkdownStr(markdown_str) def is_grouped(self) -> bool: - """Returns whether the scheme should be grouped.""" + """Return whether the scheme should be grouped. + + Returns + ------- + bool + Weather the scheme should be grouped. + """ if self.group is not None and not self.group: return False is_groupable = self.model.is_groupable(self.parameters, self.data) @@ -86,7 +116,15 @@ def is_grouped(self) -> bool: return is_groupable def _repr_markdown_(self) -> str: - """Special method used by ``ipython`` to render markdown.""" + """Return a markdown representation str. + + Special method used by ``ipython`` to render markdown. + + Returns + ------- + str + The scheme as markdown string. + """ return str(self.markdown()) def __str__(self): diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index a92e4e9c4..e2a520a4a 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -1,13 +1,16 @@ from __future__ import annotations import pytest +import xarray as xr from IPython.core.formatters import format_display_data from glotaran.analysis.optimize import optimize from glotaran.analysis.simulation import simulate from glotaran.analysis.test.models import ThreeDatasetDecay as suite +from glotaran.project import SavingOptions from glotaran.project import Scheme from glotaran.project.result import Result +from glotaran.project.scheme import default_data_filters @pytest.fixture(scope="session") @@ -59,5 +62,39 @@ def test_result_ipython_rendering(dummy_result: Result): assert rendered_markdown_return["text/markdown"].startswith("| Optimization Result") -def test_save_result(tmp_path, dummy_result: Result): - dummy_result.save(tmp_path / "test_result") +@pytest.mark.parametrize("level", ["minimal", "full"]) +@pytest.mark.parametrize("data_filter", [None, ["clp"]]) +@pytest.mark.parametrize("report", [True, False]) +def test_save_result(tmp_path, level, data_filter, report, dummy_result: Result): + result_path = tmp_path / "test_result" + dummy_result.scheme.saving = SavingOptions(level=level, data_filter=data_filter, report=report) + dummy_result.save(result_path) + files_must_exist = [ + "glotaran_result.yml", + "model.yml", + "optimized_parameters.csv", + "initial_parameters.csv", + ] + files_must_not_exist = [] + if report: + files_must_exist.append("result.md") + else: + files_must_not_exist.append("result.md") + + for file in files_must_exist: + assert (result_path / file).exists() + + for file in files_must_not_exist: + assert not (result_path / file).exists() + + for i in range(1, 4): + dataset_path = result_path / f"dataset{i}.nc" + assert dataset_path.exists() + dataset = xr.open_dataset(dataset_path) + if data_filter is not None: + assert len(data_filter) == len(dataset) + assert all(d in dataset for d in data_filter) + elif level == "minimal": + data_filter = default_data_filters[level] + assert len(data_filter) == len(dataset) + assert all(d in dataset for d in data_filter) From 1b714ab5c589ebd1403951fa5199ca7bd4db470f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 17:10:21 +0200 Subject: [PATCH 22/32] Added test for load result --- glotaran/analysis/optimize.py | 6 +- .../builtin/io/yml/test/test_save_result.py | 26 ++++++- glotaran/builtin/io/yml/yml.py | 67 +++++++++++-------- glotaran/examples/sequential.py | 7 ++ glotaran/model/model.py | 2 +- glotaran/project/result.py | 14 +++- 6 files changed, 86 insertions(+), 36 deletions(-) diff --git a/glotaran/analysis/optimize.py b/glotaran/analysis/optimize.py index 4af7ce6f7..c8b5bc0c6 100644 --- a/glotaran/analysis/optimize.py +++ b/glotaran/analysis/optimize.py @@ -95,13 +95,13 @@ def _create_result( ls_result.nfev if ls_result is not None else len(problem.parameter_history) ) number_of_jacobian_evaluation = ls_result.njev if success else None - optimality = ls_result.optimality if success else None + optimality = float(ls_result.optimality) if success else None number_of_data_points = ls_result.fun.size if success else None number_of_variables = ls_result.x.size if success else None degrees_of_freedom = number_of_data_points - number_of_variables if success else None - chi_square = np.sum(ls_result.fun ** 2) if success else None + chi_square = float(np.sum(ls_result.fun ** 2)) if success else None reduced_chi_square = chi_square / degrees_of_freedom if success else None - root_mean_square_error = np.sqrt(reduced_chi_square) if success else None + root_mean_square_error = float(np.sqrt(reduced_chi_square)) if success else None jacobian = ls_result.jac if success else None if success: diff --git a/glotaran/builtin/io/yml/test/test_save_result.py b/glotaran/builtin/io/yml/test/test_save_result.py index 7fc7e27b0..49c9687c9 100644 --- a/glotaran/builtin/io/yml/test/test_save_result.py +++ b/glotaran/builtin/io/yml/test/test_save_result.py @@ -3,8 +3,12 @@ from pathlib import Path from typing import TYPE_CHECKING +import pytest + +from glotaran.analysis.optimize import optimize +from glotaran.examples.sequential import scheme +from glotaran.io import load_result from glotaran.io import save_result -from glotaran.project.test.test_result import dummy_result # noqa: F401 if TYPE_CHECKING: from py.path import local as TmpDir @@ -12,9 +16,16 @@ from glotaran.project.result import Result +@pytest.fixture(scope="session") +def dummy_result(): + """Dummy result for testing.""" + scheme.maximum_number_function_evaluations = 1 + yield optimize(scheme) + + def test_save_result_yml( tmpdir: TmpDir, - dummy_result: Result, # noqa: F811 + dummy_result: Result, ): """Check all files exist.""" @@ -22,3 +33,14 @@ def test_save_result_yml( save_result(file_name=result_path, format_name="yml", result=dummy_result) assert result_path.exists() + + +def test_load_result( + tmp_path, + dummy_result: Result, +): + path = tmp_path / "test_result" + dummy_result.save(path) + result_path = path / "glotaran_result.yml" + print(result_path) # noqa T001 + load_result(result_path) diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index d627a1c6e..d63c4ab30 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -2,7 +2,6 @@ import dataclasses import pathlib -from typing import TYPE_CHECKING import yaml @@ -11,16 +10,15 @@ from glotaran.io import load_dataset from glotaran.io import load_model from glotaran.io import load_parameters +from glotaran.io import load_scheme from glotaran.io import register_project_io from glotaran.model import Model from glotaran.parameter import ParameterGroup +from glotaran.project import Result from glotaran.project import SavingOptions from glotaran.project import Scheme from glotaran.utils.sanitize import sanitize_yaml -if TYPE_CHECKING: - from glotaran.project import Result - @register_project_io(["yml", "yaml", "yml_str"]) class YmlProjectIo(ProjectIoInterface): @@ -38,12 +36,7 @@ def load_model(self, file_name: str) -> Model: The content of the file as dictionary. """ - if self.format == "yml_str": - spec = yaml.safe_load(file_name) - - else: - with open(file_name) as f: - spec = yaml.safe_load(f) + spec = self._load_yml(file_name) model_spec_deprecations(spec) @@ -64,13 +57,18 @@ def load_model(self, file_name: str) -> Model: return Model.from_dict(spec, megacomplex_types=None, default_megacomplex_type=None) + def load_result(self, file_name: str) -> Result: + + spec = self._load_yml(file_name) + + spec["scheme"] = load_scheme(spec["scheme"]) + spec["data"] = spec["scheme"].data + + return Result(**spec) + def load_parameters(self, file_name: str) -> ParameterGroup: - if self.format == "yml_str": - spec = yaml.safe_load(file_name) - else: - with open(file_name) as f: - spec = yaml.safe_load(f) + spec = self._load_yml(file_name) if isinstance(spec, list): return ParameterGroup.from_list(spec) @@ -146,7 +144,16 @@ def load_scheme(self, file_name: str) -> Scheme: ) def save_scheme(self, scheme: Scheme, file_name: str): - _write_dict(file_name, dataclasses.asdict(scheme)) + file_name = pathlib.Path(file_name) + scheme_dict = dataclasses.asdict( + dataclasses.replace( + scheme, + model=str(file_name.with_name("model.yml")), + parameters=str(file_name.with_name("initial_parameters.csv")), + data={label: str(file_name.with_name(f"{label}.nc")) for label in scheme.data}, + ) + ) + _write_dict(file_name, scheme_dict) def save_model(self, model: Model, file_name: str): model_dict = model.as_dict() @@ -180,27 +187,33 @@ def save_result(self, result: Result, file_name: str): dataset_format = options.data_format data_paths = { - label: result_file_path.with_name(f"{label}.{dataset_format}") for label in result.data + label: str(result_file_path.with_name(f"{label}.{dataset_format}")) + for label in result.data } - jacobian = result.jacobian.tolist() if result.jacobian is not None else None - covariance_matrix = ( - result.covariance_matrix.tolist() if result.covariance_matrix is not None else None - ) - result_dict = dataclasses.asdict( dataclasses.replace( result, - scheme=scheme_path, - initial_parameters=initial_parameters_path, - optimized_parameters=optimized_parameters_path, + scheme=str(scheme_path), + initial_parameters=str(initial_parameters_path), + optimized_parameters=str(optimized_parameters_path), data=data_paths, - jacobian=jacobian, - covariance_matrix=covariance_matrix, ) ) + del result_dict["additional_penalty"] + del result_dict["cost"] + del result_dict["jacobian"] + del result_dict["covariance_matrix"] _write_dict(result_file_path, result_dict) + def _load_yml(self, file_name: str) -> dict: + if self.format == "yml_str": + spec = yaml.safe_load(file_name) + else: + with open(file_name) as f: + spec = yaml.safe_load(f) + return spec + def _write_dict(file_name: str, d: dict): with open(file_name, "w") as f: diff --git a/glotaran/examples/sequential.py b/glotaran/examples/sequential.py index 0cba88b3c..d2589294a 100644 --- a/glotaran/examples/sequential.py +++ b/glotaran/examples/sequential.py @@ -7,6 +7,7 @@ from glotaran.io import load_parameters from glotaran.model import Model from glotaran.parameter import ParameterGroup +from glotaran.project import Scheme from glotaran.project.generators.generator import generate_model_yml sim_model = Model.from_dict( @@ -122,3 +123,9 @@ model_yml = generate_model_yml("decay-sequential", nr_species=3, irf=True) model = load_model(model_yml, format_name="yml_str") + +scheme = Scheme( + model=model, + parameters=parameter, + data={"dataset_1": dataset}, +) diff --git a/glotaran/model/model.py b/glotaran/model/model.py index 0280a7bc2..d0d9a4280 100644 --- a/glotaran/model/model.py +++ b/glotaran/model/model.py @@ -221,7 +221,7 @@ def _add_dataset_type(self): def as_dict(self) -> dict: model_dict = {} - model_dict["default_megacomplex"] = self.default_megacomplex + model_dict["default-megacomplex"] = self.default_megacomplex for name in self._model_items: items = getattr(self, name) diff --git a/glotaran/project/result.py b/glotaran/project/result.py index 388d6e2e1..70c079331 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -14,6 +14,7 @@ from glotaran.io import save_dataset from glotaran.io import save_model from glotaran.io import save_result +from glotaran.io import save_scheme from glotaran.model import Model from glotaran.parameter import ParameterGroup from glotaran.project.scheme import Scheme @@ -25,9 +26,6 @@ class Result: """The result of a global analysis.""" - additional_penalty: np.ndarray | None - """A vector with the value for each additional penalty, or None""" - cost: ArrayLike data: dict[str, xr.Dataset] """The resulting data as a dictionary of :xarraydoc:`Dataset`. @@ -53,6 +51,13 @@ class Result: """The glotaran version used to create the result.""" # The below can be none in case of unsuccessful optimization + + additional_penalty: np.ndarray | None = None + """A vector with the value for each additional penalty, or None""" + + cost: ArrayLike | None = None + """The final cost.""" + chi_square: float | None = None r"""The chi-square of the optimization. @@ -272,6 +277,9 @@ def save(self, result_path: str | Path, overwrite: bool = False) -> None: result_file_path = result_path / "glotaran_result.yml" save_result(self, result_file_path) + scheme_path = result_path / "scheme.yml" + save_scheme(self.scheme, scheme_path) + model_path = result_path / "model.yml" save_model(self.scheme.model, model_path) From 74921ee3795242210a4c11613868f83d5cfb10b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sat, 14 Aug 2021 17:36:08 +0200 Subject: [PATCH 23/32] Added Result.verify --- glotaran/project/result.py | 34 ++++++++++++++++++++++++++++ glotaran/project/test/test_result.py | 11 +++++++++ 2 files changed, 45 insertions(+) diff --git a/glotaran/project/result.py b/glotaran/project/result.py index 70c079331..af3b0039d 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -303,3 +303,37 @@ def save(self, result_path: str | Path, overwrite: bool = False) -> None: report_path = result_path / "result.md" with open(report_path, "w") as f: f.write(str(self.markdown())) + + def recreate(self) -> Result: + """Recrate a resulf from the initial parameters. + + Returns + ------- + Result : + The recreated result. + + """ + from glotaran.analysis.optimize import optimize + + return optimize(self.scheme) + + def verify(self) -> bool: + """Verify a result. + + Returns + ------- + bool : + Weather the recreated result is equal to this result. + + """ + recreated = self.recreate() + + if self.root_mean_square_error != recreated.root_mean_square_error: + return False + + for label, dataset in self.data.items(): + for attr, array in dataset.items(): + if not np.allclose(array, recreated.data[label][attr]): + return False + + return True diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index e2a520a4a..70706d17a 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -42,6 +42,8 @@ def dummy_result(): def test_get_scheme(dummy_result: Result): scheme = dummy_result.get_scheme() + assert "residual" not in dummy_result.scheme.data["dataset1"] + assert "residual" not in scheme.data["dataset1"] assert all(scheme.parameters.to_dataframe() != dummy_result.scheme.parameters.to_dataframe()) assert all( scheme.parameters.to_dataframe() == dummy_result.optimized_parameters.to_dataframe() @@ -98,3 +100,12 @@ def test_save_result(tmp_path, level, data_filter, report, dummy_result: Result) data_filter = default_data_filters[level] assert len(data_filter) == len(dataset) assert all(d in dataset for d in data_filter) + + +def test_recreate(dummy_result): + recreated_result = dummy_result.recreate() + assert recreated_result.success + + +def test_verify(dummy_result): + assert dummy_result.verify() From 0e0bcac97c7e640ff3b40d25a8a13e94624a2c7f Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 14 Aug 2021 21:18:14 +0200 Subject: [PATCH 24/32] =?UTF-8?q?=F0=9F=94=A7=20Activated=20mypy=20for=20p?= =?UTF-8?q?roject=20submodule?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- setup.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.cfg b/setup.cfg index 5f5d1cebc..3804d3540 100644 --- a/setup.cfg +++ b/setup.cfg @@ -99,3 +99,6 @@ ignore_errors = False [mypy-glotaran.deprecation.*] ignore_errors = False + +[mypy-glotaran.project.*] +ignore_errors = False From dbbf88055f8a92e9773e3e8468e4e81f65118c77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sun, 15 Aug 2021 12:21:22 +0200 Subject: [PATCH 25/32] Created glotran.project.dataclasses module --- glotaran/project/dataclasses.py | 55 +++++++++++++++++++++++ glotaran/project/test/test_dataclasses.py | 25 +++++++++++ 2 files changed, 80 insertions(+) create mode 100644 glotaran/project/dataclasses.py create mode 100644 glotaran/project/test/test_dataclasses.py diff --git a/glotaran/project/dataclasses.py b/glotaran/project/dataclasses.py new file mode 100644 index 000000000..5fdb2cab6 --- /dev/null +++ b/glotaran/project/dataclasses.py @@ -0,0 +1,55 @@ +"""Contains helper methods for dataclasses.""" +from __future__ import annotations + +import dataclasses +from typing import Any + + +def serialize_to_file_name_field( + file_name: str, default: Any = dataclasses.MISSING +) -> dataclasses.Field: + """Create a dataclass field with file_name as metadata. + + The field will be replace with the file_name as value. within + :function:``glotaran.project.dataclasses.asdict``. + + Parameters + ---------- + file_name : str + The file_name with which the field gets replaced in asdict. + default : Any + The default value of the field. + + Returns + ------- + dataclasses.Field + The created field. + """ + return dataclasses.field(default=default, metadata={"file_name": file_name}) + + +def asdict(dataclass: object) -> dict[str, Any]: + """Create a dictinory from a dataclass. + + A wrappper for ``dataclasses.asdict`` which recognizes fields created + with :function:``glotaran.project.dataclasses.serialize_to_file_name_field``. + + Parameters + ---------- + dataclass : object + A dataclass instance. + + Returns + ------- + dict[str, Any] + The dataclass represented as a dictionary. + """ + fields = dataclasses.fields(dataclass) + + def dict_factory(values: list): + for i, field in enumerate(fields): + if "file_name" in field.metadata: + values[i] = (field.name, field.metadata["file_name"]) + return dict(values) + + return dataclasses.asdict(dataclass, dict_factory=dict_factory) diff --git a/glotaran/project/test/test_dataclasses.py b/glotaran/project/test/test_dataclasses.py new file mode 100644 index 000000000..c0be49e60 --- /dev/null +++ b/glotaran/project/test/test_dataclasses.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass + +from glotaran.project.dataclasses import asdict +from glotaran.project.dataclasses import serialize_to_file_name_field + + +def test_serialize_to_file_name_field(): + @dataclass + class DummyDataclass: + foo: int = serialize_to_file_name_field("foo.file") + bar: int = serialize_to_file_name_field("bar.file", default=42) + baz: int = 84 + + dummy_class = DummyDataclass(foo=21) + + dummy_class_dict = asdict(dummy_class) + + assert dummy_class_dict["foo"] == "foo.file" + assert dummy_class_dict["foo"] != dummy_class.foo + + assert dummy_class_dict["bar"] == "bar.file" + assert dummy_class_dict["bar"] != dummy_class.bar + + assert dummy_class_dict["baz"] == 84 + assert dummy_class_dict["baz"] == dummy_class.baz From 78d7078a258359af60c269d1fc854b59cb31300d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sun, 15 Aug 2021 13:15:15 +0200 Subject: [PATCH 26/32] Adressed mypy issues --- glotaran/parameter/parameter_group.py | 3 +- glotaran/project/dataclasses.py | 26 +++++++++ glotaran/project/project.py | 80 +++++++++++++++------------ glotaran/project/result.py | 3 +- glotaran/project/scheme.py | 22 +++++--- 5 files changed, 90 insertions(+), 44 deletions(-) diff --git a/glotaran/parameter/parameter_group.py b/glotaran/parameter/parameter_group.py index e76193ed4..1d4549798 100644 --- a/glotaran/parameter/parameter_group.py +++ b/glotaran/parameter/parameter_group.py @@ -3,6 +3,7 @@ from __future__ import annotations from copy import copy +from pathlib import Path from textwrap import indent from typing import Generator @@ -196,7 +197,7 @@ def to_dataframe(self) -> pd.DataFrame: parameter_dict["expression"].append(parameter.expression) return pd.DataFrame(parameter_dict) - def to_csv(self, filename: str, delimiter: str = ","): + def to_csv(self, filename: str | Path, delimiter: str = ","): """Writes a :class:`ParameterGroup` to a CSV file. Parameters diff --git a/glotaran/project/dataclasses.py b/glotaran/project/dataclasses.py index 5fdb2cab6..7c7b60196 100644 --- a/glotaran/project/dataclasses.py +++ b/glotaran/project/dataclasses.py @@ -28,6 +28,29 @@ def serialize_to_file_name_field( return dataclasses.field(default=default, metadata={"file_name": file_name}) +def serialize_to_file_name_dict_field( + file_suffix: str, default: Any = dataclasses.MISSING +) -> dataclasses.Field: + """Create a dataclass field with file_name as metadata. + + The field will be replace with the file_name as value. within + :function:``glotaran.project.dataclasses.asdict``. + + Parameters + ---------- + file_name : str + The file_name with which the field gets replaced in asdict. + default : Any + The default value of the field. + + Returns + ------- + dataclasses.Field + The created field. + """ + return dataclasses.field(default=default, metadata={"file_suffix": file_suffix}) + + def asdict(dataclass: object) -> dict[str, Any]: """Create a dictinory from a dataclass. @@ -50,6 +73,9 @@ def dict_factory(values: list): for i, field in enumerate(fields): if "file_name" in field.metadata: values[i] = (field.name, field.metadata["file_name"]) + elif "file_suffix" in field.metadata: + file_suffix = field.metadata["file_name"] + values[i] = (field.name, {key: f"{key}.{file_suffix}" for key in values[i][1]}) return dict(values) return dataclasses.asdict(dataclass, dict_factory=dict_factory) diff --git a/glotaran/project/project.py b/glotaran/project/project.py index e2dc2d615..6d59a61ac 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -22,8 +22,8 @@ from glotaran.model import ModelError from glotaran.parameter import ParameterGroup from glotaran.parameter.parameter import Keys +from glotaran.project.generators.generator import available_generators from glotaran.project.generators.generator import generate_model_yml -from glotaran.project.generators.generator import generators from glotaran.project.scheme import Scheme TEMPLATE = """version: {gta_version} @@ -42,11 +42,11 @@ class Project: """ - file: str | Path + file: Path name: str version: str - folder: str | Path = None + folder: Path def __post_init__(self): if isinstance(self.file, str): @@ -58,28 +58,29 @@ def __post_init__(self): pass @classmethod - def create(cls, name: str | None = None, project_folder: str | None = None): - if project_folder is None: - project_folder = getcwd() - project_folder = Path(project_folder) + def create(cls, name: str | None = None, folder: str | Path | None = None): + if folder is None: + folder = getcwd() + project_folder = Path(folder) name = name if name else project_folder.name project_file = project_folder / PROJECT_FILE_NAME with open(project_file, "w") as f: f.write(TEMPLATE.format(gta_version=gta_version, name=name)) - with open(project_file) as f: - project_dict = load(f) - project_dict["file"] = project_folder - return cls(**project_dict) + return cls.open(project_file) @classmethod - def open(cls, project_folder: str): - project_file = Path(project_folder) - if not project_file.match(PROJECT_FILE_NAME): - project_file = project_file / PROJECT_FILE_NAME - with open(project_file) as f: + def open(cls, project_folder_or_file: str | Path): + folder = Path(project_folder_or_file) + if folder.is_dir(): + file = folder / PROJECT_FILE_NAME + else: + folder, file = folder.parent, folder + + with open(file) as f: project_dict = load(f) - project_dict["file"] = project_file + project_dict["file"] = file + project_dict["folder"] = folder return cls(**project_dict) @property @@ -152,8 +153,13 @@ def load_model(self, name: str) -> Model: return load_model(model_path) def generate_model( - self, name: str, generator: Literal[generators.keys()], generator_arguments: dict[str, Any] + self, + name: str, + generator: str, + generator_arguments: dict[str, Any], ): + if generator not in available_generators: + raise ValueError(f"Unknown generator '{generator}'.") self.create_model_dir_if_not_exist() model = generate_model_yml(generator, **generator_arguments) with open(self.model_dir / f"{name}.yml", "w") as f: @@ -255,7 +261,7 @@ def parameters(self): if parameters_file.suffix in [".yml", ".yaml", ".csv"] } - def load_parameters(self, name: str) -> Model: + def load_parameters(self, name: str) -> ParameterGroup: try: parameters_path = next(p for p in self.parameters_dir.iterdir() if name in p.name) except StopIteration: @@ -266,11 +272,11 @@ def generate_parameters( self, model_name: str, name: str | None = None, - fmt: Literal[["yml", "yaml", "csv"]] = "csv", + fmt: Literal["yml", "yaml", "csv"] = "csv", ): self.create_parameters_dir_if_not_exist() model = self.load_model(model_name) - parameters = {} + parameters: dict | list = {} for parameter in model.get_parameters(): groups = parameter.split(".") label = groups.pop() @@ -281,19 +287,19 @@ def generate_parameters( ) elif isinstance(parameters, dict): parameters = [] - parameters.append( - [ - label, - 0.0, - { - Keys.EXPR: "None", - Keys.MAX: "None", - Keys.MIN: "None", - Keys.NON_NEG: "false", - Keys.VARY: "true", - }, - ] - ) + parameters.append( + [ + label, + 0.0, + { + Keys.EXPR: "None", + Keys.MAX: "None", + Keys.MIN: "None", + Keys.NON_NEG: "false", + Keys.VARY: "true", + }, + ] + ) else: if isinstance(parameters, list): raise ModelError( @@ -328,7 +334,11 @@ def generate_parameters( with open(parameter_file, "w") as f: f.write(parameter_yml) elif fmt == "csv": - parameter_group = ParameterGroup.from_dict(parameters) + parameter_group = ( + ParameterGroup.from_dict(parameters) + if isinstance(parameters, dict) + else ParameterGroup.from_list(parameters) + ) parameter_group.to_csv(parameter_file) def run(self, scheme_name: str): diff --git a/glotaran/project/result.py b/glotaran/project/result.py index af3b0039d..9183743f7 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -4,6 +4,7 @@ from dataclasses import dataclass from dataclasses import replace from pathlib import Path +from typing import Any import numpy as np import xarray as xr @@ -144,7 +145,7 @@ def markdown(self, with_model: bool = True, base_heading_level: int = 1) -> Mark MarkdownStr The scheme as markdown string. """ - general_table_rows = [ + general_table_rows: list[Any] = [ ["Number of residual evaluation", self.number_of_function_evaluations], ["Number of variables", self.number_of_variables], ["Number of datapoints", self.number_of_data_points], diff --git a/glotaran/project/scheme.py b/glotaran/project/scheme.py index acb423ff3..0c5b6c678 100644 --- a/glotaran/project/scheme.py +++ b/glotaran/project/scheme.py @@ -7,6 +7,9 @@ from glotaran.deprecation import deprecate from glotaran.io import load_scheme +from glotaran.project.dataclasses import asdict +from glotaran.project.dataclasses import serialize_to_file_name_dict_field +from glotaran.project.dataclasses import serialize_to_file_name_field from glotaran.utils.ipython import MarkdownStr if TYPE_CHECKING: @@ -39,9 +42,13 @@ class Scheme: A scheme also holds options for optimization. """ - model: Model | str - parameters: ParameterGroup | str - data: dict[str, xr.DataArray | xr.Dataset | str] + model: Model = serialize_to_file_name_field("model.yml") # type: ignore + parameters: ParameterGroup = serialize_to_file_name_field( + "initial_parameters.csv" + ) # type: ignore + data: dict[str, xr.DataArray | xr.Dataset] = serialize_to_file_name_dict_field( + "nc" + ) # type: ignore group: bool | None = None group_tolerance: float = 0.0 non_negative_least_squares: bool = False @@ -67,7 +74,8 @@ def problem_list(self) -> list[str]: A list of all problems found in the scheme's model. """ - return self.model.problem_list(self.parameters) + model: Model = self.model + return model.problem_list(self.parameters) def validate(self) -> str: """Return a string listing all problems in the model and missing parameters. @@ -89,16 +97,16 @@ def markdown(self) -> MarkdownStr: MarkdownStr The scheme as markdown string. """ - markdown_str = self.model.markdown(parameters=self.parameters) + model_markdown_str = self.model.markdown(parameters=self.parameters) - markdown_str += "\n\n" + markdown_str = "\n\n" markdown_str += "__Scheme__\n\n" markdown_str += f"* *nnls*: {self.non_negative_least_squares}\n" markdown_str += f"* *nfev*: {self.maximum_number_function_evaluations}\n" markdown_str += f"* *group_tolerance*: {self.group_tolerance}\n" - return MarkdownStr(markdown_str) + return model_markdown_str + MarkdownStr(markdown_str) def is_grouped(self) -> bool: """Return whether the scheme should be grouped. From 7c5dbeae2c8ab3c5d3a6a5ce5ee04c9266e45d49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Sun, 15 Aug 2021 13:25:44 +0200 Subject: [PATCH 27/32] Created stub for Model.py --- glotaran/model/glotaran/model/model.pyi | 61 +++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 glotaran/model/glotaran/model/model.pyi diff --git a/glotaran/model/glotaran/model/model.pyi b/glotaran/model/glotaran/model/model.pyi new file mode 100644 index 000000000..4ff846d90 --- /dev/null +++ b/glotaran/model/glotaran/model/model.pyi @@ -0,0 +1,61 @@ +from __future__ import annotations + +from typing import Any +from typing import Union + +import xarray as xr + +from glotaran.model.clp_penalties import EqualAreaPenalty +from glotaran.model.constraint import Constraint +from glotaran.model.dataset_model import DatasetModel +from glotaran.model.dataset_model import create_dataset_model_type +from glotaran.model.megacomplex import Megacomplex +from glotaran.model.megacomplex import create_model_megacomplex_type +from glotaran.model.relation import Relation +from glotaran.model.util import ModelError +from glotaran.model.weight import Weight +from glotaran.parameter import Parameter +from glotaran.parameter import ParameterGroup +from glotaran.utils.ipython import MarkdownStr + +default_model_items: Any +default_dataset_properties: Any + +class Model: + def __init__( + self, + megacomplex_types: dict[str, type[Megacomplex]], + *, + default_megacomplex_type: str | None = ..., + ) -> None: ... + @classmethod + def from_dict( + cls, + model_dict_ref: dict, + megacomplex_types: dict[str, type[Megacomplex]], + *, + default_megacomplex_type: str | None = ..., + ) -> Model: ... + def as_dict(self) -> dict: ... + @property + def default_megacomplex(self) -> str: ... + @property + def megacomplex_types(self) -> dict[str, type[Megacomplex]]: ... + @property + def model_items(self) -> dict[str, type[object]]: ... + @property + def global_megacomplex(self) -> dict[str, Megacomplex]: ... + def need_index_dependent(self) -> bool: ... + def is_groupable(self, parameters: ParameterGroup, data: dict[str, xr.DataArray]) -> bool: ... + def problem_list(self, parameters: ParameterGroup = ...) -> list[str]: ... + def validate(self, parameters: ParameterGroup = ..., raise_exception: bool = ...) -> str: ... + def valid(self, parameters: ParameterGroup = ...) -> bool: ... + def get_parameters(self) -> list[str]: ... + def markdown( + self, + parameters: ParameterGroup = ..., + initial_parameters: ParameterGroup = ..., + base_heading_level: int = ..., + ) -> MarkdownStr: ... + @property + def dataset(self) -> dict[str, DatasetModel]: ... From 48b0d3a92730aadc007a12d82651bb88de5fdd1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Thu, 19 Aug 2021 14:56:50 +0200 Subject: [PATCH 28/32] Changed scheme saving to new saving method --- .../builtin/io/yml/test/test_save_model.py | 7 +- .../builtin/io/yml/test/test_save_scheme.py | 66 ++++++++ glotaran/builtin/io/yml/yml.py | 153 +++++++++--------- glotaran/project/dataclasses.py | 96 +++++++---- glotaran/project/project.py | 20 ++- glotaran/project/scheme.py | 21 +-- glotaran/project/test/test_dataclasses.py | 28 +++- 7 files changed, 257 insertions(+), 134 deletions(-) create mode 100644 glotaran/builtin/io/yml/test/test_save_scheme.py diff --git a/glotaran/builtin/io/yml/test/test_save_model.py b/glotaran/builtin/io/yml/test/test_save_model.py index 32148b9c0..b15ee83ce 100644 --- a/glotaran/builtin/io/yml/test/test_save_model.py +++ b/glotaran/builtin/io/yml/test/test_save_model.py @@ -16,7 +16,7 @@ irf: gaussian_irf megacomplex: - megacomplex_parallel_decay -default_megacomplex: decay +default-megacomplex: decay initial_concentration: initial_concentration_dataset_1: compartments: @@ -38,8 +38,9 @@ k_matrix: k_matrix_sequential: matrix: - species_2: species_1 - species_3: species_3 + (species_2, species_1): decay.species_1 + (species_3, species_2): decay.species_2 + (species_3, species_3): decay.species_3 megacomplex: megacomplex_parallel_decay: dimension: time diff --git a/glotaran/builtin/io/yml/test/test_save_scheme.py b/glotaran/builtin/io/yml/test/test_save_scheme.py new file mode 100644 index 000000000..3120a49ab --- /dev/null +++ b/glotaran/builtin/io/yml/test/test_save_scheme.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import xarray as xr + +from glotaran.examples.sequential import dataset +from glotaran.examples.sequential import model +from glotaran.examples.sequential import parameter +from glotaran.io import load_scheme +from glotaran.io import save_dataset +from glotaran.io import save_model +from glotaran.io import save_parameters +from glotaran.io import save_scheme +from glotaran.project import Scheme + +if TYPE_CHECKING: + from py.path import local as TmpDir + + +want = """add_svd: true +data_files: + dataset_1: d.nc +ftol: 1.0e-08 +group: null +group_tolerance: 0.0 +gtol: 1.0e-08 +maximum_number_function_evaluations: null +model_file: m.yml +non_negative_least_squares: false +optimization_method: TrustRegionReflection +parameters_file: p.csv +result_path: null +saving: + data_filter: null + data_format: nc + level: full + parameter_format: csv + report: true +xtol: 1.0e-08 +""" + + +def test_save_scheme(tmpdir: TmpDir): + scheme = Scheme( + model, + parameter, + {"dataset_1": dataset}, + model_file="m.yml", + parameters_file="p.csv", + data_files={"dataset_1": "d.nc"}, + ) + save_model(model, tmpdir / "m.yml") + save_parameters(parameter, tmpdir / "p.csv") + save_dataset(dataset, tmpdir / "d.nc") + scheme_path = tmpdir / "testscheme.yml" + save_scheme(file_name=scheme_path, format_name="yml", scheme=scheme) + + assert scheme_path.exists() + with open(scheme_path) as f: + got = f.read() + assert got == want + loaded = load_scheme(scheme_path) + print(loaded.model.validate(loaded.parameters)) + assert loaded.model.valid(loaded.parameters) + assert isinstance(scheme.data["dataset_1"], xr.Dataset) diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index d63c4ab30..41f8f08bf 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -7,16 +7,14 @@ from glotaran.deprecation.modules.builtin_io_yml import model_spec_deprecations from glotaran.io import ProjectIoInterface -from glotaran.io import load_dataset -from glotaran.io import load_model -from glotaran.io import load_parameters from glotaran.io import load_scheme from glotaran.io import register_project_io from glotaran.model import Model from glotaran.parameter import ParameterGroup from glotaran.project import Result -from glotaran.project import SavingOptions from glotaran.project import Scheme +from glotaran.project.dataclasses import asdict +from glotaran.project.dataclasses import fromdict from glotaran.utils.sanitize import sanitize_yaml @@ -76,83 +74,79 @@ def load_parameters(self, file_name: str) -> ParameterGroup: return ParameterGroup.from_dict(spec) def load_scheme(self, file_name: str) -> Scheme: - if self.format == "yml_str": - yml = file_name - else: - try: - with open(file_name) as f: - yml = f.read() - except Exception as e: - raise OSError(f"Error opening scheme: {e}") - - try: - scheme = yaml.safe_load(yml) - except Exception as e: - raise ValueError(f"Error parsing scheme: {e}") - - if "model" not in scheme: - raise ValueError("Model file not specified.") - - try: - model = load_model(scheme["model"]) - except Exception as e: - raise ValueError(f"Error loading model: {e}") - - if "parameters" not in scheme: - raise ValueError("Parameters file not specified.") - - try: - parameters = load_parameters(scheme["parameters"]) - except Exception as e: - raise ValueError(f"Error loading parameters: {e}") - - if "data" not in scheme: - raise ValueError("No data specified.") - - data = {} - for label, path in scheme["data"].items(): - data_format = scheme.get("data_format", None) - path = str(pathlib.Path(path).resolve()) - - try: - data[label] = load_dataset(path, format_name=data_format) - except Exception as e: - raise ValueError(f"Error loading dataset '{label}': {e}") - - optimization_method = scheme.get("optimization_method", "TrustRegionReflection") - nnls = scheme.get("non_negative_least_squares", False) - nfev = scheme.get("maximum_number_function_evaluations", None) - ftol = scheme.get("ftol", 1e-8) - gtol = scheme.get("gtol", 1e-8) - xtol = scheme.get("xtol", 1e-8) - group = scheme.get("group", False) - group_tolerance = scheme.get("group_tolerance", 0.0) - saving = SavingOptions(**scheme.get("saving", {})) - return Scheme( - model=model, - parameters=parameters, - data=data, - non_negative_least_squares=nnls, - maximum_number_function_evaluations=nfev, - ftol=ftol, - gtol=gtol, - xtol=xtol, - group=group, - group_tolerance=group_tolerance, - optimization_method=optimization_method, - saving=saving, - ) + spec = self._load_yml(file_name) + file_path = pathlib.Path(file_name) + return fromdict(Scheme, spec, folder=file_path.parent) + # if self.format == "yml_str": + # yml = file_name + # else: + # try: + # with open(file_name) as f: + # yml = f.read() + # except Exception as e: + # raise OSError(f"Error opening scheme: {e}") + # + # try: + # scheme = yaml.safe_load(yml) + # except Exception as e: + # raise ValueError(f"Error parsing scheme: {e}") + # + # if "model" not in scheme: + # raise ValueError("Model file not specified.") + # + # try: + # model = load_model(scheme["model"]) + # except Exception as e: + # raise ValueError(f"Error loading model: {e}") + # + # if "parameters" not in scheme: + # raise ValueError("Parameters file not specified.") + # + # try: + # parameters = load_parameters(scheme["parameters"]) + # except Exception as e: + # raise ValueError(f"Error loading parameters: {e}") + # + # if "data" not in scheme: + # raise ValueError("No data specified.") + # + # data = {} + # for label, path in scheme["data"].items(): + # data_format = scheme.get("data_format", None) + # path = str(pathlib.Path(path).resolve()) + # + # try: + # data[label] = load_dataset(path, format_name=data_format) + # except Exception as e: + # raise ValueError(f"Error loading dataset '{label}': {e}") + # + # optimization_method = scheme.get("optimization_method", "TrustRegionReflection") + # nnls = scheme.get("non_negative_least_squares", False) + # nfev = scheme.get("maximum_number_function_evaluations", None) + # ftol = scheme.get("ftol", 1e-8) + # gtol = scheme.get("gtol", 1e-8) + # xtol = scheme.get("xtol", 1e-8) + # group = scheme.get("group", False) + # group_tolerance = scheme.get("group_tolerance", 0.0) + # saving = SavingOptions(**scheme.get("saving", {})) + # return Scheme( + # model=model, + # parameters=parameters, + # data=data, + # non_negative_least_squares=nnls, + # maximum_number_function_evaluations=nfev, + # ftol=ftol, + # gtol=gtol, + # xtol=xtol, + # group=group, + # group_tolerance=group_tolerance, + # optimization_method=optimization_method, + # saving=saving, + # ) def save_scheme(self, scheme: Scheme, file_name: str): file_name = pathlib.Path(file_name) - scheme_dict = dataclasses.asdict( - dataclasses.replace( - scheme, - model=str(file_name.with_name("model.yml")), - parameters=str(file_name.with_name("initial_parameters.csv")), - data={label: str(file_name.with_name(f"{label}.nc")) for label in scheme.data}, - ) - ) + scheme_dict = asdict(scheme) _write_dict(file_name, scheme_dict) def save_model(self, model: Model, file_name: str): @@ -165,7 +159,8 @@ def save_model(self, model: Model, file_name: str): for item in item_iterator: for prop_name, prop in item.items(): if isinstance(prop, dict) and any(isinstance(k, tuple) for k in prop): - item[prop_name] = {str(k): v for k, v in prop} + keys = [f"({k[0]}, {k[1]})" for k in prop] + item[prop_name] = {f"{k}": v for k, v in zip(keys, prop.values())} _write_dict(file_name, model_dict) def save_result(self, result: Result, file_name: str): diff --git a/glotaran/project/dataclasses.py b/glotaran/project/dataclasses.py index 7c7b60196..b0fa99860 100644 --- a/glotaran/project/dataclasses.py +++ b/glotaran/project/dataclasses.py @@ -2,21 +2,16 @@ from __future__ import annotations import dataclasses +from pathlib import Path from typing import Any +from typing import Callable -def serialize_to_file_name_field( - file_name: str, default: Any = dataclasses.MISSING -) -> dataclasses.Field: - """Create a dataclass field with file_name as metadata. - - The field will be replace with the file_name as value. within - :function:``glotaran.project.dataclasses.asdict``. +def exclude_from_dict_field(default: Any = dataclasses.MISSING) -> dataclasses.Field: + """Create a dataclass field with which will be excluded from ``asdict``. Parameters ---------- - file_name : str - The file_name with which the field gets replaced in asdict. default : Any The default value of the field. @@ -25,21 +20,20 @@ def serialize_to_file_name_field( dataclasses.Field The created field. """ - return dataclasses.field(default=default, metadata={"file_name": file_name}) + return dataclasses.field(default=default, metadata={"exclude_from_dict": True}) -def serialize_to_file_name_dict_field( - file_suffix: str, default: Any = dataclasses.MISSING +def file_representation_field( + target: str, loader: Callable[[str], Any], default: Any = dataclasses.MISSING ) -> dataclasses.Field: - """Create a dataclass field with file_name as metadata. - - The field will be replace with the file_name as value. within - :function:``glotaran.project.dataclasses.asdict``. + """Creates a dataclass field with target and loader as metadata. Parameters ---------- - file_name : str - The file_name with which the field gets replaced in asdict. + target : str + The name of the represented field. + loader : Callable[[str] + A function to load the target field froma file. default : Any The default value of the field. @@ -48,14 +42,11 @@ def serialize_to_file_name_dict_field( dataclasses.Field The created field. """ - return dataclasses.field(default=default, metadata={"file_suffix": file_suffix}) + return dataclasses.field(default=default, metadata={"target": target, "loader": loader}) def asdict(dataclass: object) -> dict[str, Any]: - """Create a dictinory from a dataclass. - - A wrappper for ``dataclasses.asdict`` which recognizes fields created - with :function:``glotaran.project.dataclasses.serialize_to_file_name_field``. + """Creates a dictinory containing all dfields of the dataclass. Parameters ---------- @@ -69,13 +60,54 @@ def asdict(dataclass: object) -> dict[str, Any]: """ fields = dataclasses.fields(dataclass) - def dict_factory(values: list): - for i, field in enumerate(fields): - if "file_name" in field.metadata: - values[i] = (field.name, field.metadata["file_name"]) - elif "file_suffix" in field.metadata: - file_suffix = field.metadata["file_name"] - values[i] = (field.name, {key: f"{key}.{file_suffix}" for key in values[i][1]}) - return dict(values) + dataclass_dict = {} + for field in fields: + if "exclude_from_dict" not in field.metadata: + value = getattr(dataclass, field.name) + dataclass_dict[field.name] = ( + asdict(value) if dataclasses.is_dataclass(value) else value + ) + + return dataclass_dict + + +def fromdict(dataclass_type: type, dataclass_dict: dict, folder: Path = None) -> object: + """Creates a dataclass instance from a dict and loads all file represented fields. - return dataclasses.asdict(dataclass, dict_factory=dict_factory) + Parameters + ---------- + dataclass_type : type + A dataclass type. + dataclass_dict : dict + A dict for instancing the the dataclass. + folder : Path + The root folder for file paths. If ``None`` file paths are consider absolute. + + Returns + ------- + object + Created instance of dataclass_type. + """ + fields = dataclasses.fields(dataclass_type) + + for field in fields: + if "target" in field.metadata and "loader" in field.metadata: + file_path = dataclass_dict.get(field.name) + if file_path is None: + continue + elif isinstance(file_path, list): + dataclass_dict[field.metadata["target"]] = [ + field.metadata["loader"](f if folder is None else folder / f) + for f in file_path + ] + elif isinstance(file_path, dict): + dataclass_dict[field.metadata["target"]] = { + k: field.metadata["loader"](f if folder is None else folder / f) + for k, f in file_path.items() + } + else: + dataclass_dict[field.metadata["target"]] = field.metadata["loader"]( + file_path if folder is None else folder / file_path + ) + + return dataclass_type(**dataclass_dict) diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 6d59a61ac..76d39a6c0 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -58,7 +58,23 @@ def __post_init__(self): pass @classmethod - def create(cls, name: str | None = None, folder: str | Path | None = None): + def create(cls, name: str | None = None, folder: str | Path | None = None) -> Project: + """Creates a new project. + + Parameters + ---------- + name : str | None + The name of the project. If ``None``, the name of the project folder will be used. + folder : str | Path | None + The folder where the project will be created. If ``None``, the current work + directory will be used. + + Returns + ------- + Project : + The created project. + + """ if folder is None: folder = getcwd() project_folder = Path(folder) @@ -225,7 +241,7 @@ def create_scheme( data = self.data datasets = {} - for dataset in load_model(model).dataset: + for dataset in load_model(model).dataset: # type: ignore if dataset not in data: raise ValueError(f"Data missing for dataset '{dataset}'") datasets[dataset] = str(data[dataset]) diff --git a/glotaran/project/scheme.py b/glotaran/project/scheme.py index 0c5b6c678..0a5aafbe0 100644 --- a/glotaran/project/scheme.py +++ b/glotaran/project/scheme.py @@ -6,10 +6,12 @@ from typing import TYPE_CHECKING from glotaran.deprecation import deprecate +from glotaran.io import load_dataset +from glotaran.io import load_model +from glotaran.io import load_parameters from glotaran.io import load_scheme -from glotaran.project.dataclasses import asdict -from glotaran.project.dataclasses import serialize_to_file_name_dict_field -from glotaran.project.dataclasses import serialize_to_file_name_field +from glotaran.project.dataclasses import exclude_from_dict_field +from glotaran.project.dataclasses import file_representation_field from glotaran.utils.ipython import MarkdownStr if TYPE_CHECKING: @@ -42,13 +44,12 @@ class Scheme: A scheme also holds options for optimization. """ - model: Model = serialize_to_file_name_field("model.yml") # type: ignore - parameters: ParameterGroup = serialize_to_file_name_field( - "initial_parameters.csv" - ) # type: ignore - data: dict[str, xr.DataArray | xr.Dataset] = serialize_to_file_name_dict_field( - "nc" - ) # type: ignore + model: Model = exclude_from_dict_field() # type: ignore + parameters: ParameterGroup = exclude_from_dict_field() # type: ignore + data: dict[str, xr.DataArray | xr.Dataset] = exclude_from_dict_field() # type: ignore + model_file: str = file_representation_field("model", load_model, default=None) # type: ignore # noqa E501 + parameters_file: str = file_representation_field("parameters", load_parameters, None) # type: ignore # noqa E501 + data_files: dict[str, str] = file_representation_field("data", load_dataset, None) # type: ignore # noqa E501 group: bool | None = None group_tolerance: float = 0.0 non_negative_least_squares: bool = False diff --git a/glotaran/project/test/test_dataclasses.py b/glotaran/project/test/test_dataclasses.py index c0be49e60..f04b626e0 100644 --- a/glotaran/project/test/test_dataclasses.py +++ b/glotaran/project/test/test_dataclasses.py @@ -1,25 +1,37 @@ from dataclasses import dataclass from glotaran.project.dataclasses import asdict -from glotaran.project.dataclasses import serialize_to_file_name_field +from glotaran.project.dataclasses import exclude_from_dict_field +from glotaran.project.dataclasses import file_representation_field +from glotaran.project.dataclasses import fromdict + + +def dummy_loader(file: str) -> int: + return {"foo.file": 21, "bar.file": 42}[file] def test_serialize_to_file_name_field(): @dataclass class DummyDataclass: - foo: int = serialize_to_file_name_field("foo.file") - bar: int = serialize_to_file_name_field("bar.file", default=42) + foo: int = exclude_from_dict_field() + foo_file: int = file_representation_field("foo", dummy_loader) + bar: int = exclude_from_dict_field(default=42) + bar_file: int = file_representation_field("bar", dummy_loader, default="bar.file") baz: int = 84 - dummy_class = DummyDataclass(foo=21) + dummy_class = DummyDataclass(foo=21, foo_file="foo.file") dummy_class_dict = asdict(dummy_class) - assert dummy_class_dict["foo"] == "foo.file" - assert dummy_class_dict["foo"] != dummy_class.foo + assert "foo" not in dummy_class_dict + assert dummy_class_dict["foo_file"] == "foo.file" - assert dummy_class_dict["bar"] == "bar.file" - assert dummy_class_dict["bar"] != dummy_class.bar + assert "bar" not in dummy_class_dict + assert dummy_class_dict["bar_file"] == "bar.file" assert dummy_class_dict["baz"] == 84 assert dummy_class_dict["baz"] == dummy_class.baz + + loaded = fromdict(DummyDataclass, dummy_class_dict) + + assert loaded == dummy_class From ce3c74ae4e0c2b3ea50e41e884a5ec7a7d05686a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Thu, 19 Aug 2021 15:49:10 +0200 Subject: [PATCH 29/32] Added save/load result file to io --- .../builtin/io/yml/test/test_save_result.py | 18 +- glotaran/builtin/io/yml/yml.py | 117 +----------- glotaran/io/__init__.py | 2 + glotaran/io/interface.py | 39 +++- .../plugin_system/project_io_registration.py | 67 ++++++- .../test/test_project_io_registration.py | 4 +- glotaran/project/dataclasses.py | 10 +- glotaran/project/result.py | 179 ++++++++++-------- glotaran/project/test/test_scheme.py | 6 +- 9 files changed, 222 insertions(+), 220 deletions(-) diff --git a/glotaran/builtin/io/yml/test/test_save_result.py b/glotaran/builtin/io/yml/test/test_save_result.py index 49c9687c9..2a987c428 100644 --- a/glotaran/builtin/io/yml/test/test_save_result.py +++ b/glotaran/builtin/io/yml/test/test_save_result.py @@ -5,10 +5,11 @@ import pytest +from glotaran import __version__ as gta_version from glotaran.analysis.optimize import optimize from glotaran.examples.sequential import scheme -from glotaran.io import load_result -from glotaran.io import save_result +from glotaran.io import load_result_file +from glotaran.io import save_result_file if TYPE_CHECKING: from py.path import local as TmpDir @@ -30,17 +31,16 @@ def test_save_result_yml( """Check all files exist.""" result_path = Path(tmpdir / "testresult.yml") - save_result(file_name=result_path, format_name="yml", result=dummy_result) + save_result_file(file_name=result_path, format_name="yml", result=dummy_result) assert result_path.exists() def test_load_result( - tmp_path, + tmpdir: TmpDir, dummy_result: Result, ): - path = tmp_path / "test_result" - dummy_result.save(path) - result_path = path / "glotaran_result.yml" - print(result_path) # noqa T001 - load_result(result_path) + result_path = tmpdir / "glotaran_result.yml" + save_result_file(dummy_result, result_path) + loaded = load_result_file(result_path) + assert loaded.glotaran_version == gta_version diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index 41f8f08bf..3503984bf 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -1,13 +1,11 @@ from __future__ import annotations -import dataclasses import pathlib import yaml from glotaran.deprecation.modules.builtin_io_yml import model_spec_deprecations from glotaran.io import ProjectIoInterface -from glotaran.io import load_scheme from glotaran.io import register_project_io from glotaran.model import Model from glotaran.parameter import ParameterGroup @@ -55,14 +53,9 @@ def load_model(self, file_name: str) -> Model: return Model.from_dict(spec, megacomplex_types=None, default_megacomplex_type=None) - def load_result(self, file_name: str) -> Result: - + def load_result_file(self, file_name: str) -> Result: spec = self._load_yml(file_name) - - spec["scheme"] = load_scheme(spec["scheme"]) - spec["data"] = spec["scheme"].data - - return Result(**spec) + return fromdict(Result, spec) def load_parameters(self, file_name: str) -> ParameterGroup: @@ -77,72 +70,6 @@ def load_scheme(self, file_name: str) -> Scheme: spec = self._load_yml(file_name) file_path = pathlib.Path(file_name) return fromdict(Scheme, spec, folder=file_path.parent) - # if self.format == "yml_str": - # yml = file_name - # else: - # try: - # with open(file_name) as f: - # yml = f.read() - # except Exception as e: - # raise OSError(f"Error opening scheme: {e}") - # - # try: - # scheme = yaml.safe_load(yml) - # except Exception as e: - # raise ValueError(f"Error parsing scheme: {e}") - # - # if "model" not in scheme: - # raise ValueError("Model file not specified.") - # - # try: - # model = load_model(scheme["model"]) - # except Exception as e: - # raise ValueError(f"Error loading model: {e}") - # - # if "parameters" not in scheme: - # raise ValueError("Parameters file not specified.") - # - # try: - # parameters = load_parameters(scheme["parameters"]) - # except Exception as e: - # raise ValueError(f"Error loading parameters: {e}") - # - # if "data" not in scheme: - # raise ValueError("No data specified.") - # - # data = {} - # for label, path in scheme["data"].items(): - # data_format = scheme.get("data_format", None) - # path = str(pathlib.Path(path).resolve()) - # - # try: - # data[label] = load_dataset(path, format_name=data_format) - # except Exception as e: - # raise ValueError(f"Error loading dataset '{label}': {e}") - # - # optimization_method = scheme.get("optimization_method", "TrustRegionReflection") - # nnls = scheme.get("non_negative_least_squares", False) - # nfev = scheme.get("maximum_number_function_evaluations", None) - # ftol = scheme.get("ftol", 1e-8) - # gtol = scheme.get("gtol", 1e-8) - # xtol = scheme.get("xtol", 1e-8) - # group = scheme.get("group", False) - # group_tolerance = scheme.get("group_tolerance", 0.0) - # saving = SavingOptions(**scheme.get("saving", {})) - # return Scheme( - # model=model, - # parameters=parameters, - # data=data, - # non_negative_least_squares=nnls, - # maximum_number_function_evaluations=nfev, - # ftol=ftol, - # gtol=gtol, - # xtol=xtol, - # group=group, - # group_tolerance=group_tolerance, - # optimization_method=optimization_method, - # saving=saving, - # ) def save_scheme(self, scheme: Scheme, file_name: str): file_name = pathlib.Path(file_name) @@ -163,43 +90,9 @@ def save_model(self, model: Model, file_name: str): item[prop_name] = {f"{k}": v for k, v in zip(keys, prop.values())} _write_dict(file_name, model_dict) - def save_result(self, result: Result, file_name: str): - options = result.scheme.saving - - result_file_path = pathlib.Path(file_name) - if result_file_path.exists(): - raise FileExistsError(f"The path '{file_name}' is already existing.") - - scheme_path = result_file_path.with_name("scheme.yml") - - parameters_format = options.parameter_format - initial_parameters_path = result_file_path.with_name( - f"initial_parameters.{parameters_format}" - ) - optimized_parameters_path = result_file_path.with_name( - f"optimized_parameters.{parameters_format}" - ) - - dataset_format = options.data_format - data_paths = { - label: str(result_file_path.with_name(f"{label}.{dataset_format}")) - for label in result.data - } - - result_dict = dataclasses.asdict( - dataclasses.replace( - result, - scheme=str(scheme_path), - initial_parameters=str(initial_parameters_path), - optimized_parameters=str(optimized_parameters_path), - data=data_paths, - ) - ) - del result_dict["additional_penalty"] - del result_dict["cost"] - del result_dict["jacobian"] - del result_dict["covariance_matrix"] - _write_dict(result_file_path, result_dict) + def save_result_file(self, result: Result, file_name: str): + result_dict = asdict(result) + _write_dict(file_name, result_dict) def _load_yml(self, file_name: str) -> dict: if self.format == "yml_str": diff --git a/glotaran/io/__init__.py b/glotaran/io/__init__.py index 07cd786f6..e7eebe67f 100644 --- a/glotaran/io/__init__.py +++ b/glotaran/io/__init__.py @@ -21,12 +21,14 @@ from glotaran.plugin_system.project_io_registration import load_model from glotaran.plugin_system.project_io_registration import load_parameters from glotaran.plugin_system.project_io_registration import load_result +from glotaran.plugin_system.project_io_registration import load_result_file from glotaran.plugin_system.project_io_registration import load_scheme from glotaran.plugin_system.project_io_registration import project_io_plugin_table from glotaran.plugin_system.project_io_registration import register_project_io from glotaran.plugin_system.project_io_registration import save_model from glotaran.plugin_system.project_io_registration import save_parameters from glotaran.plugin_system.project_io_registration import save_result +from glotaran.plugin_system.project_io_registration import save_result_file from glotaran.plugin_system.project_io_registration import save_scheme from glotaran.plugin_system.project_io_registration import set_project_plugin from glotaran.plugin_system.project_io_registration import show_project_io_method_help diff --git a/glotaran/io/interface.py b/glotaran/io/interface.py index 9035e9f15..85da23e52 100644 --- a/glotaran/io/interface.py +++ b/glotaran/io/interface.py @@ -199,7 +199,42 @@ def save_scheme(self, scheme: Scheme, file_name: str): """ raise NotImplementedError(f"Cannot save scheme with format {self.format!r}") - def load_result(self, file_name: str) -> Result: + def load_result(self, folder: str) -> Result: + """Create a Result instance from a result folder (**NOT IMPLEMENTED**). + + Parameters + ---------- + folder : str + Folder containing the result specs. + + Returns + ------- + Result + Result instance created from the file. + + + .. # noqa: DAR202 + .. # noqa: DAR401 + """ + raise NotImplementedError(f"Cannot read result with format {self.format!r}") + + def save_result(self, result: Result, folder: str): + """Save a Result instance to a folder (**NOT IMPLEMENTED**). + + Parameters + ---------- + result : Result + Result instance to save to specs file. + folder : str + Folder to write the result specs to. + + + .. # noqa: DAR101 + .. # noqa: DAR401 + """ + raise NotImplementedError(f"Cannot save result with format {self.format!r}") + + def load_result_file(self, file_name: str) -> Result: """Create a Result instance from the specs defined in a file (**NOT IMPLEMENTED**). Parameters @@ -218,7 +253,7 @@ def load_result(self, file_name: str) -> Result: """ raise NotImplementedError(f"Cannot read result with format {self.format!r}") - def save_result(self, result: Result, file_name: str): + def save_result_file(self, result: Result, file_name: str): """Save a Result instance to a spec file (**NOT IMPLEMENTED**). Parameters diff --git a/glotaran/plugin_system/project_io_registration.py b/glotaran/plugin_system/project_io_registration.py index 30f194a0b..9f95564de 100644 --- a/glotaran/plugin_system/project_io_registration.py +++ b/glotaran/plugin_system/project_io_registration.py @@ -364,7 +364,9 @@ def save_scheme( @not_implemented_to_value_error -def load_result(file_name: str | PathLike[str], format_name: str = None, **kwargs: Any) -> Result: +def load_result_file( + file_name: str | PathLike[str], format_name: str = None, **kwargs: Any +) -> Result: """Create a :class:`Result` instance from the specs defined in a file. Parameters @@ -384,11 +386,11 @@ def load_result(file_name: str | PathLike[str], format_name: str = None, **kwarg :class:`Result` instance created from the saved format. """ io = get_project_io(format_name or inferr_file_format(file_name)) - return io.load_result(str(file_name), **kwargs) # type: ignore[call-arg] + return io.load_result_file(str(file_name), **kwargs) # type: ignore[call-arg] @not_implemented_to_value_error -def save_result( +def save_result_file( result: Result, file_name: str | PathLike[str], format_name: str = None, @@ -417,13 +419,70 @@ def save_result( io = get_project_io( format_name or inferr_file_format(file_name, needs_to_exist=False, allow_folder=True) ) - io.save_result( # type: ignore[call-arg] + io.save_result_file( # type: ignore[call-arg] file_name=str(file_name), result=result, **kwargs, ) +@not_implemented_to_value_error +def load_result(folder: str | PathLike[str], format_name: str, **kwargs: Any) -> Result: + """Create a :class:`Result` instance from the specs defined in a folder. + + Parameters + ---------- + folder : str | PathLike[str] + Path containing the result data. + format_name : str + Format the result is in. + **kwargs : Any + Additional keyword arguments passes to the ``load_result`` implementation + of the project io plugin. + + Returns + ------- + Result + :class:`Result` instance created from the saved format. + """ + io = get_project_io(format_name) + return io.load_result(str(folder), **kwargs) # type: ignore[call-arg] + + +@not_implemented_to_value_error +def save_result( + result: Result, + folder: str | PathLike[str], + format_name: str, + *, + allow_overwrite: bool = False, + **kwargs: Any, +) -> None: + """Write a :class:`Result` instance to a spec file. + + Parameters + ---------- + result : Result + :class:`Result` instance to write. + folder : str | PathLike[str] + Path to write the result data to. + format_name : str + Format the result should be saved in. + allow_overwrite : bool + Whether or not to allow overwriting existing files, by default False + **kwargs : Any + Additional keyword arguments passes to the ``save_result`` implementation + of the project io plugin. + """ + protect_from_overwrite(folder, allow_overwrite=allow_overwrite) + io = get_project_io(format_name) + io.save_result( # type: ignore[call-arg] + folder=str(folder), + result=result, + **kwargs, + ) + + def get_project_io_method(format_name: str, method_name: ProjectIoMethods) -> Callable[..., Any]: """Retrieve implementation of project io functionality for the format 'format_name'. diff --git a/glotaran/plugin_system/test/test_project_io_registration.py b/glotaran/plugin_system/test/test_project_io_registration.py index 03de607ef..07cd81aa4 100644 --- a/glotaran/plugin_system/test/test_project_io_registration.py +++ b/glotaran/plugin_system/test/test_project_io_registration.py @@ -102,10 +102,10 @@ def save_scheme( # type:ignore[override] } ) - def load_result(self, file_name: str | PathLike[str], **kwargs: Any) -> Result: + def load_result_file(self, file_name: str | PathLike[str], **kwargs: Any) -> Result: return {"file_name": file_name, **kwargs} # type:ignore[return-value] - def save_result( # type:ignore[override] + def save_result_file( # type:ignore[override] self, result: Result, result_path: str | PathLike[str], diff --git a/glotaran/project/dataclasses.py b/glotaran/project/dataclasses.py index b0fa99860..74b557ae6 100644 --- a/glotaran/project/dataclasses.py +++ b/glotaran/project/dataclasses.py @@ -26,7 +26,7 @@ def exclude_from_dict_field(default: Any = dataclasses.MISSING) -> dataclasses.F def file_representation_field( target: str, loader: Callable[[str], Any], default: Any = dataclasses.MISSING ) -> dataclasses.Field: - """Creates a dataclass field with target and loader as metadata. + """Create a dataclass field with target and loader as metadata. Parameters ---------- @@ -46,7 +46,7 @@ def file_representation_field( def asdict(dataclass: object) -> dict[str, Any]: - """Creates a dictinory containing all dfields of the dataclass. + """Create a dictinory containing all dfields of the dataclass. Parameters ---------- @@ -55,7 +55,7 @@ def asdict(dataclass: object) -> dict[str, Any]: Returns ------- - dict[str, Any] + dict[str, Any] : The dataclass represented as a dictionary. """ fields = dataclasses.fields(dataclass) @@ -72,7 +72,7 @@ def asdict(dataclass: object) -> dict[str, Any]: def fromdict(dataclass_type: type, dataclass_dict: dict, folder: Path = None) -> object: - """Creates a dataclass instance from a dict and loads all file represented fields. + """Create a dataclass instance from a dict and loads all file represented fields. Parameters ---------- @@ -109,5 +109,7 @@ def fromdict(dataclass_type: type, dataclass_dict: dict, folder: Path = None) -> dataclass_dict[field.metadata["target"]] = field.metadata["loader"]( file_path if folder is None else folder / file_path ) + elif dataclasses.is_dataclass(field.default) and field.name in dataclass_dict: + dataclass_dict[field.name] = type(field.default)(**dataclass_dict[field.name]) return dataclass_type(**dataclass_dict) diff --git a/glotaran/project/result.py b/glotaran/project/result.py index 9183743f7..a8b8bb3bd 100644 --- a/glotaran/project/result.py +++ b/glotaran/project/result.py @@ -3,7 +3,6 @@ from dataclasses import dataclass from dataclasses import replace -from pathlib import Path from typing import Any import numpy as np @@ -12,14 +11,14 @@ from tabulate import tabulate from glotaran.deprecation import deprecate -from glotaran.io import save_dataset -from glotaran.io import save_model -from glotaran.io import save_result -from glotaran.io import save_scheme +from glotaran.io import load_dataset +from glotaran.io import load_parameters +from glotaran.io import load_scheme from glotaran.model import Model from glotaran.parameter import ParameterGroup +from glotaran.project.dataclasses import exclude_from_dict_field +from glotaran.project.dataclasses import file_representation_field from glotaran.project.scheme import Scheme -from glotaran.project.scheme import default_data_filters from glotaran.utils.ipython import MarkdownStr @@ -27,22 +26,8 @@ class Result: """The result of a global analysis.""" - data: dict[str, xr.Dataset] - """The resulting data as a dictionary of :xarraydoc:`Dataset`. - - Notes - ----- - The actual content of the data depends on the actual model and can be found in the - documentation for the model. - """ - free_parameter_labels: list[str] - """List of labels of the free parameters used in optimization.""" number_of_function_evaluations: int """The number of function evaluations.""" - initial_parameters: ParameterGroup - optimized_parameters: ParameterGroup - """The optimized parameters, organized in a :class:`ParameterGroup`""" - scheme: Scheme success: bool """Indicates if the optimization was successful.""" termination_reason: str @@ -51,25 +36,51 @@ class Result: glotaran_version: str """The glotaran version used to create the result.""" + scheme: Scheme = exclude_from_dict_field(None) # type: ignore + scheme_file: str | None = file_representation_field("scheme", load_scheme, None) # type: ignore # noqa E501 + + initial_parameters: ParameterGroup = exclude_from_dict_field(None) # type: ignore + initial_parameters_file: str | None = file_representation_field( + "initial_parameters", load_parameters, None + ) # type: ignore + + optimized_parameters: ParameterGroup = exclude_from_dict_field(None) # type: ignore + """The optimized parameters, organized in a :class:`ParameterGroup`""" + optimized_parameters_file: str | None = file_representation_field( + "optimized_parameters", load_parameters, None + ) # type: ignore + + data: dict[str, xr.Dataset] = exclude_from_dict_field(None) # type: ignore + """The resulting data as a dictionary of :xarraydoc:`Dataset`. + + Notes + ----- + The actual content of the data depends on the actual model and can be found in the + documentation for the model. + """ + data_files: dict[str, str] | None = file_representation_field("data", load_dataset, None) # type: ignore # noqa E501 + + free_parameter_labels: list[str] = exclude_from_dict_field(None) # type: ignore + """List of labels of the free parameters used in optimization.""" # The below can be none in case of unsuccessful optimization - additional_penalty: np.ndarray | None = None + additional_penalty: np.ndarray | None = exclude_from_dict_field(None) # type: ignore """A vector with the value for each additional penalty, or None""" - cost: ArrayLike | None = None + cost: ArrayLike | None = exclude_from_dict_field(None) # type: ignore """The final cost.""" chi_square: float | None = None r"""The chi-square of the optimization. :math:`\chi^2 = \sum_i^N [{Residual}_i]^2`.""" - covariance_matrix: ArrayLike | list | None = None + covariance_matrix: ArrayLike | list | None = exclude_from_dict_field(None) # type: ignore """Covariance matrix. The rows and columns are corresponding to :attr:`free_parameter_labels`.""" degrees_of_freedom: int | None = None """Degrees of freedom in optimization :math:`N - N_{vars}`.""" - jacobian: ArrayLike | list | None = None + jacobian: ArrayLike | list | None = exclude_from_dict_field(None) # type: ignore """Modified Jacobian matrix at the solution See also: :func:`scipy.optimize.least_squares` @@ -244,66 +255,66 @@ def get_dataset(self, dataset_label: str) -> xr.Dataset: except KeyError: raise ValueError(f"Unknown dataset '{dataset_label}'") - def save(self, result_path: str | Path, overwrite: bool = False) -> None: - """Save the result to a given folder. - - Returns a list with paths of all saved items. - The following files are saved: - * `result.md`: The result with the model formatted as markdown text. - * `optimized_parameters.csv`: The optimized parameter as csv file. - * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. - - Parameters - ---------- - result_path : str | Path - The path to the folder in which to save the result. - overwrite : bool - Weather to overwrite an existing folder. - - Raises - ------ - ValueError - If ``result_path`` is a file. - FileExistsError - If ``result_path`` exists and ``overwrite`` is ``False``. - """ - result_path = Path(result_path) if isinstance(result_path, str) else result_path - if result_path.exists() and not overwrite: - raise FileExistsError(f"The path '{result_path}' exists.") - else: - result_path.mkdir() - if not result_path.is_dir(): - raise ValueError(f"The path '{result_path}' is not a directory.") - - result_file_path = result_path / "glotaran_result.yml" - save_result(self, result_file_path) - - scheme_path = result_path / "scheme.yml" - save_scheme(self.scheme, scheme_path) - - model_path = result_path / "model.yml" - save_model(self.scheme.model, model_path) - - initial_parameters_path = result_path / "initial_parameters.csv" - self.initial_parameters.to_csv(initial_parameters_path) - - optimized_parameters_path = result_path / "optimized_parameters.csv" - self.optimized_parameters.to_csv(optimized_parameters_path) - - save_level = self.scheme.saving.level - data_filter = self.scheme.saving.data_filter or default_data_filters[save_level] - datasets = {} - for label, dataset in self.data.items(): - dataset_path = result_path / f"{label}.nc" - datasets[label] = dataset_path - if data_filter is not None: - dataset = dataset[data_filter] - save_dataset(dataset, dataset_path) - - if self.scheme.saving.report: - report_path = result_path / "result.md" - with open(report_path, "w") as f: - f.write(str(self.markdown())) + # def save(self, result_path: str | Path, overwrite: bool = False) -> None: + # """Save the result to a given folder. + # + # Returns a list with paths of all saved items. + # The following files are saved: + # * `result.md`: The result with the model formatted as markdown text. + # * `optimized_parameters.csv`: The optimized parameter as csv file. + # * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. + # + # Parameters + # ---------- + # result_path : str | Path + # The path to the folder in which to save the result. + # overwrite : bool + # Weather to overwrite an existing folder. + # + # Raises + # ------ + # ValueError + # If ``result_path`` is a file. + # FileExistsError + # If ``result_path`` exists and ``overwrite`` is ``False``. + # """ + # result_path = Path(result_path) if isinstance(result_path, str) else result_path + # if result_path.exists() and not overwrite: + # raise FileExistsError(f"The path '{result_path}' exists.") + # else: + # result_path.mkdir() + # if not result_path.is_dir(): + # raise ValueError(f"The path '{result_path}' is not a directory.") + # + # result_file_path = result_path / "glotaran_result.yml" + # save_result(self, result_file_path) + # + # scheme_path = result_path / "scheme.yml" + # save_scheme(self.scheme, scheme_path) + # + # model_path = result_path / "model.yml" + # save_model(self.scheme.model, model_path) + # + # initial_parameters_path = result_path / "initial_parameters.csv" + # self.initial_parameters.to_csv(initial_parameters_path) + # + # optimized_parameters_path = result_path / "optimized_parameters.csv" + # self.optimized_parameters.to_csv(optimized_parameters_path) + # + # save_level = self.scheme.saving.level + # data_filter = self.scheme.saving.data_filter or default_data_filters[save_level] + # datasets = {} + # for label, dataset in self.data.items(): + # dataset_path = result_path / f"{label}.nc" + # datasets[label] = dataset_path + # if data_filter is not None: + # dataset = dataset[data_filter] + # save_dataset(dataset, dataset_path) + # + # if self.scheme.saving.report: + # report_path = result_path / "result.md" + # with open(report_path, "w") as f: + # f.write(str(self.markdown())) def recreate(self) -> Result: """Recrate a resulf from the initial parameters. diff --git a/glotaran/project/test/test_scheme.py b/glotaran/project/test/test_scheme.py index 2cc826039..1a83bfa46 100644 --- a/glotaran/project/test/test_scheme.py +++ b/glotaran/project/test/test_scheme.py @@ -33,11 +33,11 @@ def mock_scheme(tmpdir): ).to_netcdf(dataset_path) scheme = f""" - model: {model_path} - parameters: {parameter_path} + model_file: {model_path} + parameters_file: {parameter_path} non_negative_least_squares: True maximum_number_function_evaluations: 42 - data: + data_files: dataset1: {dataset_path} saving: From 1765841d5423e11f70cc4fda59ac67938637e2e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Wei=C3=9Fenborn?= Date: Thu, 19 Aug 2021 15:57:18 +0200 Subject: [PATCH 30/32] Added data_filters to save_dataset --- glotaran/builtin/io/netCDF/netCDF.py | 26 ++++++------------- .../plugin_system/data_io_registration.py | 11 +++++--- 2 files changed, 15 insertions(+), 22 deletions(-) diff --git a/glotaran/builtin/io/netCDF/netCDF.py b/glotaran/builtin/io/netCDF/netCDF.py index 69f9b6b54..68f92bfe6 100644 --- a/glotaran/builtin/io/netCDF/netCDF.py +++ b/glotaran/builtin/io/netCDF/netCDF.py @@ -1,11 +1,11 @@ from __future__ import annotations +import os + import xarray as xr from glotaran.io import DataIoInterface from glotaran.io import register_data_io -from glotaran.project import SavingOptions -from glotaran.project import default_data_filters @register_data_io("nc") @@ -17,22 +17,12 @@ def save_dataset( self, dataset: xr.Dataset, file_name: str, + data_filters: list[str] | None = None, *, - saving_options: SavingOptions = SavingOptions(), + allow_overwrite: bool = False, ): + if not allow_overwrite and os.path.exists(file_name): + raise FileExistsError - data_to_save = dataset - - data_filter = ( - saving_options.data_filter - if saving_options.data_filter is not None - else default_data_filters[saving_options.level] - ) - - if data_filter is not None: - - data_to_save = xr.Dataset() - for item in data_filter: - data_to_save[item] = dataset[item] - - data_to_save.to_netcdf(file_name) + data_to_save = dataset if data_filters is None else dataset[data_filters] + data_to_save.to_netcdf(file_name, mode="w") diff --git a/glotaran/plugin_system/data_io_registration.py b/glotaran/plugin_system/data_io_registration.py index 732002254..18d45896f 100644 --- a/glotaran/plugin_system/data_io_registration.py +++ b/glotaran/plugin_system/data_io_registration.py @@ -171,7 +171,7 @@ def get_data_io(format_name: str) -> DataIoInterface: @not_implemented_to_value_error def load_dataset( - file_name: str | PathLike[str], format_name: str = None, **kwargs: Any + file_name: str | PathLike[str], format_name: str | None = None, **kwargs: Any ) -> xr.Dataset | xr.DataArray: """Read data from a file to :xarraydoc:`Dataset` or :xarraydoc:`DataArray`. @@ -179,7 +179,7 @@ def load_dataset( ---------- file_name : str | PathLike[str] File containing the data. - format_name : str + format_name : str | None Format the file is in, if not provided it will be inferred from the file extension. **kwargs : Any Additional keyword arguments passes to the ``read_dataset`` implementation @@ -199,7 +199,8 @@ def load_dataset( def save_dataset( dataset: xr.Dataset | xr.DataArray, file_name: str | PathLike[str], - format_name: str = None, + format_name: str | None = None, + data_filters: list[str] | None = None, *, allow_overwrite: bool = False, **kwargs: Any, @@ -212,8 +213,10 @@ def save_dataset( Data to be written to file. file_name : str | PathLike[str] File to write the data to. - format_name : str + format_name : str | None Format the file should be in, if not provided it will be inferred from the file extension. + data_filters : list[str] | None + Optional list of items in the dataset to be saved. allow_overwrite : bool Whether or not to allow overwriting existing files, by default False **kwargs : Any From 968ebb8a000940a91f6477f7cfa4d25e8677f38e Mon Sep 17 00:00:00 2001 From: Joern Weissenborn Date: Fri, 20 Aug 2021 14:42:47 +0200 Subject: [PATCH 31/32] Readded folder plugin --- glotaran/builtin/io/folder/__init__.py | 1 + glotaran/builtin/io/folder/folder_plugin.py | 94 +++++++++++++++++++ .../io/folder/test/test_folder_plugin.py | 66 +++++++++++++ glotaran/builtin/io/yml/yml.py | 42 +++++++++ glotaran/io/__init__.py | 3 + glotaran/io/interface.py | 25 +++-- glotaran/model/model.py | 4 +- .../plugin_system/project_io_registration.py | 39 +++++--- glotaran/project/__init__.py | 3 +- glotaran/project/project.py | 20 ++-- glotaran/project/scheme.py | 14 --- glotaran/project/test/test_scheme.py | 14 +-- setup.cfg | 3 +- 13 files changed, 266 insertions(+), 62 deletions(-) create mode 100644 glotaran/builtin/io/folder/__init__.py create mode 100644 glotaran/builtin/io/folder/folder_plugin.py create mode 100644 glotaran/builtin/io/folder/test/test_folder_plugin.py diff --git a/glotaran/builtin/io/folder/__init__.py b/glotaran/builtin/io/folder/__init__.py new file mode 100644 index 000000000..41b2c688f --- /dev/null +++ b/glotaran/builtin/io/folder/__init__.py @@ -0,0 +1 @@ +"""Plugin to dump pyglotaran object as files in a folder.""" diff --git a/glotaran/builtin/io/folder/folder_plugin.py b/glotaran/builtin/io/folder/folder_plugin.py new file mode 100644 index 000000000..21722a2c8 --- /dev/null +++ b/glotaran/builtin/io/folder/folder_plugin.py @@ -0,0 +1,94 @@ +"""Implementation of the folder Io plugin. + +The current implementation is an exact copy of how ``Result.save(path)`` +worked in glotaran 0.3.x and meant as an compatibility function. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +from glotaran.io import save_dataset +from glotaran.io import save_model +from glotaran.io import save_parameters +from glotaran.io import save_result_file +from glotaran.io import save_scheme +from glotaran.io.interface import ProjectIoInterface +from glotaran.plugin_system.project_io_registration import register_project_io + +if TYPE_CHECKING: + from glotaran.io import SavingOptions + from glotaran.project import Result + + +@register_project_io(["folder", "legacy"]) +class FolderProjectIo(ProjectIoInterface): + """Project Io plugin to save result data to a folder. + + There won't be a serialization of the Result object, but simply + a markdown summary output and the important data saved to files. + """ + + def save_result( + self, result: Result, folder: str, saving_options: SavingOptions, allow_overwrite: bool + ): + """Save the result to a given folder. + + Returns a list with paths of all saved items. + The following files are saved: + * `result.md`: The result with the model formatted as markdown text. + * `optimized_parameters.csv`: The optimized parameter as csv file. + * `{dataset_label}.nc`: The result data for each dataset as NetCDF file. + + Parameters + ---------- + result : Result + Result instance to be saved. + folder : str + The path to the folder in which to save the result. + saving_options : SavingOptions + Options for saving the the result. + allow_overwrite : bool + Whether or not to allow overwriting existing files, by default False + + Raises + ------ + ValueError + If ``folder`` is a file. + FileExistsError + If ``folder`` is exists and ``allow_overwrite`` is ``False``. + """ + result_folder = Path(folder) + if not result_folder.exists(): + result_folder.mkdir() + elif result_folder.is_file(): + raise ValueError(f"The path '{result_folder}' is not a directory.") + elif not allow_overwrite: + raise FileExistsError + + if saving_options.report: + report_file = result_folder / "result.md" + with open(report_file, "w") as f: + f.write(str(result.markdown())) + + result.scheme.model_file = "model.yml" + save_model(result.scheme.model, result_folder / result.scheme.model_file) + result.scheme.parameters_file = "initial_parameters.csv" + result.initial_parameters_file = result.scheme.parameters_file + save_parameters(result.scheme.parameters, result_folder / result.scheme.parameters_file) + result.optimized_parameters_file = "optimized_parameters.csv" + save_parameters( + result.optimized_parameters, result_folder / result.optimized_parameters_file + ) + result.scheme_file = "scheme.yml" + save_scheme(result.scheme, result_folder / result.scheme_file) + + result.data_files = { + label: f"{label}.{saving_options.data_format}" for label in result.data + } + + for label, data_file in result.data_files.items(): + save_dataset(result.data[label], result_folder / data_file) + + save_result_file(result, result_folder / "glotaran_result.yml") diff --git a/glotaran/builtin/io/folder/test/test_folder_plugin.py b/glotaran/builtin/io/folder/test/test_folder_plugin.py new file mode 100644 index 000000000..f047eb3ab --- /dev/null +++ b/glotaran/builtin/io/folder/test/test_folder_plugin.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from glotaran.analysis.optimize import optimize +from glotaran.examples.sequential import scheme +from glotaran.io import save_result + +if TYPE_CHECKING: + + from py.path import local as TmpDir + + from glotaran.project.result import Result + + +@pytest.fixture(scope="session") +def dummy_result(): + """Dummy result for testing.""" + scheme.maximum_number_function_evaluations = 1 + yield optimize(scheme) + + +def test_save_result_folder( + tmpdir: TmpDir, + dummy_result: Result, +): + """Check all files exist.""" + + result_dir = tmpdir / "test_result" + save_result(dummy_result, str(result_dir), format_name="folder") + + assert result_dir.exists() + + wanted_files = [ + "result.md", + "glotaran_result.yml", + "scheme.yml", + "model.yml", + "initial_parameters.csv", + "optimized_parameters.csv", + "dataset_1.nc", + ] + for wanted in wanted_files: + assert (result_dir / wanted).exists() + + +# @pytest.mark.parametrize("format_name", ("folder", "legacy")) +# def test_save_result_folder_error_path_is_file( +# tmpdir: TmpDir, +# dummy_result: Result, +# format_name: Literal["folder", "legacy"], +# ): +# """Raise error if result_path is a file without extension and overwrite is true.""" +# +# result_dir = Path(tmpdir / "testresult") +# result_dir.touch() +# +# with pytest.raises(ValueError, match="The path '.+?' is not a directory."): +# save_result( +# result_path=str(result_dir), +# format_name=format_name, +# result=dummy_result, +# allow_overwrite=True, +# ) diff --git a/glotaran/builtin/io/yml/yml.py b/glotaran/builtin/io/yml/yml.py index 3503984bf..3ffd8bd7a 100644 --- a/glotaran/builtin/io/yml/yml.py +++ b/glotaran/builtin/io/yml/yml.py @@ -54,10 +54,34 @@ def load_model(self, file_name: str) -> Model: return Model.from_dict(spec, megacomplex_types=None, default_megacomplex_type=None) def load_result_file(self, file_name: str) -> Result: + """Create a :class:`Result` instance from the specs defined in a file. + + Parameters + ---------- + file_name : str | PathLike[str] + Path containing the result data. + + Returns + ------- + Result + :class:`Result` instance created from the saved format. + """ spec = self._load_yml(file_name) return fromdict(Result, spec) def load_parameters(self, file_name: str) -> ParameterGroup: + """Create a ParameterGroup instance from the specs defined in a file. + + Parameters + ---------- + file_name : str + File containing the parameter specs. + + Returns + ------- + ParameterGroup + ParameterGroup instance created from the file. + """ spec = self._load_yml(file_name) @@ -77,6 +101,15 @@ def save_scheme(self, scheme: Scheme, file_name: str): _write_dict(file_name, scheme_dict) def save_model(self, model: Model, file_name: str): + """Save a Model instance to a spec file. + + Parameters + ---------- + model: Model + Model instance to save to specs file. + file_name : str + File to write the model specs to. + """ model_dict = model.as_dict() # We replace tuples with strings for name, items in model_dict.items(): @@ -91,6 +124,15 @@ def save_model(self, model: Model, file_name: str): _write_dict(file_name, model_dict) def save_result_file(self, result: Result, file_name: str): + """Write a :class:`Result` instance to a spec file. + + Parameters + ---------- + result : Result + :class:`Result` instance to write. + file_name : str | PathLike[str] + Path to write the result data to. + """ result_dict = asdict(result) _write_dict(file_name, result_dict) diff --git a/glotaran/io/__init__.py b/glotaran/io/__init__.py index e7eebe67f..f58bab758 100644 --- a/glotaran/io/__init__.py +++ b/glotaran/io/__init__.py @@ -17,6 +17,9 @@ from glotaran.plugin_system.data_io_registration import save_dataset from glotaran.plugin_system.data_io_registration import set_data_plugin from glotaran.plugin_system.data_io_registration import show_data_io_method_help +from glotaran.plugin_system.project_io_registration import SAVING_OPTIONS_DEFAULT +from glotaran.plugin_system.project_io_registration import SAVING_OPTIONS_MINIMAL +from glotaran.plugin_system.project_io_registration import SavingOptions from glotaran.plugin_system.project_io_registration import get_project_io_method from glotaran.plugin_system.project_io_registration import load_model from glotaran.plugin_system.project_io_registration import load_parameters diff --git a/glotaran/io/interface.py b/glotaran/io/interface.py index 85da23e52..98aeaebe7 100644 --- a/glotaran/io/interface.py +++ b/glotaran/io/interface.py @@ -23,6 +23,7 @@ from glotaran.model import Model from glotaran.parameter import ParameterGroup from glotaran.project import Result + from glotaran.project import SavingOptions from glotaran.project import Scheme DataLoader = Callable[[str], Union[xr.Dataset, xr.DataArray]] @@ -218,19 +219,25 @@ def load_result(self, folder: str) -> Result: """ raise NotImplementedError(f"Cannot read result with format {self.format!r}") - def save_result(self, result: Result, folder: str): + def save_result( + self, result: Result, folder: str, saving_options: SavingOptions, allow_overwrite: bool + ): """Save a Result instance to a folder (**NOT IMPLEMENTED**). - Parameters - ---------- - result : Result - Result instance to save to specs file. - folder : str - Folder to write the result specs to. + Parameters + ---------- + result : Result + Result instance to save to specs file. + folder : str + Folder to write the result specs to. + saving_options : SavingOptions + Options for saving the the result. + allow_overwrite : bool + Whether or not to allow overwriting existing files, by default False - .. # noqa: DAR101 - .. # noqa: DAR401 + .. # noqa: DAR101 + .. # noqa: DAR401 """ raise NotImplementedError(f"Cannot save result with format {self.format!r}") diff --git a/glotaran/model/model.py b/glotaran/model/model.py index d0d9a4280..878bd6859 100644 --- a/glotaran/model/model.py +++ b/glotaran/model/model.py @@ -258,7 +258,9 @@ def need_index_dependent(self) -> bool: """Returns true if e.g. relations with intervals are present.""" return any(i.interval is not None for i in self.constraints + self.relations) - def is_groupable(self, parameters: ParameterGroup, data: dict[str, xr.DataArray]) -> bool: + def is_groupable( + self, parameters: ParameterGroup, data: dict[str, xr.Dataset | xr.DataArray] + ) -> bool: if any(d.has_global_model() for d in self.dataset.values()): return False global_dimensions = { diff --git a/glotaran/plugin_system/project_io_registration.py b/glotaran/plugin_system/project_io_registration.py index 9f95564de..0415b1ce9 100644 --- a/glotaran/plugin_system/project_io_registration.py +++ b/glotaran/plugin_system/project_io_registration.py @@ -8,6 +8,7 @@ """ from __future__ import annotations +from dataclasses import dataclass from typing import TYPE_CHECKING from typing import TypeVar @@ -50,9 +51,24 @@ Literal["save_scheme"], Literal["load_result"], Literal["save_result"], + Literal["load_result_file"], + Literal["save_result_file"], ) +@dataclass +class SavingOptions: + """A collection of options for result saving.""" + + data_filter: list[str] | None = None + data_format: Literal["nc"] = "nc" + parameter_format: Literal["csv"] = "csv" + report: bool = True + + +SAVING_OPTIONS_DEFAULT = SavingOptions() +SAVING_OPTIONS_MINIMAL = SavingOptions(data_filter=["fitted_data", "residual"], report=False) + PROJECT_IO_METHODS = ( "load_model", "save_model", @@ -62,6 +78,8 @@ "save_scheme", "load_result", "save_result", + "load_result_file", + "save_result_file", ) @@ -160,6 +178,8 @@ def set_project_plugin( - :func:`save_scheme` - :func:`load_result` - :func:`save_result` + - :func:`load_result_file` + - :func:`save_result_file` Parameters ---------- @@ -396,7 +416,6 @@ def save_result_file( format_name: str = None, *, allow_overwrite: bool = False, - **kwargs: Any, ) -> None: """Write a :class:`Result` instance to a spec file. @@ -411,9 +430,6 @@ def save_result_file( it will be inferred from the file extension. allow_overwrite : bool Whether or not to allow overwriting existing files, by default False - **kwargs : Any - Additional keyword arguments passes to the ``save_result`` implementation - of the project io plugin. """ protect_from_overwrite(file_name, allow_overwrite=allow_overwrite) io = get_project_io( @@ -422,7 +438,6 @@ def save_result_file( io.save_result_file( # type: ignore[call-arg] file_name=str(file_name), result=result, - **kwargs, ) @@ -453,10 +468,10 @@ def load_result(folder: str | PathLike[str], format_name: str, **kwargs: Any) -> def save_result( result: Result, folder: str | PathLike[str], - format_name: str, *, + format_name: str = "folder", + saving_options: SavingOptions = SAVING_OPTIONS_DEFAULT, allow_overwrite: bool = False, - **kwargs: Any, ) -> None: """Write a :class:`Result` instance to a spec file. @@ -468,18 +483,18 @@ def save_result( Path to write the result data to. format_name : str Format the result should be saved in. + saving_options :SavingOptions + Options for saving the the result. allow_overwrite : bool Whether or not to allow overwriting existing files, by default False - **kwargs : Any - Additional keyword arguments passes to the ``save_result`` implementation - of the project io plugin. """ protect_from_overwrite(folder, allow_overwrite=allow_overwrite) io = get_project_io(format_name) io.save_result( # type: ignore[call-arg] - folder=str(folder), result=result, - **kwargs, + folder=str(folder), + saving_options=saving_options, + allow_overwrite=allow_overwrite, ) diff --git a/glotaran/project/__init__.py b/glotaran/project/__init__.py index 0f94b1a7d..7e466a8dc 100644 --- a/glotaran/project/__init__.py +++ b/glotaran/project/__init__.py @@ -1,4 +1,3 @@ +"""The glotaran project package.""" from glotaran.project.result import Result -from glotaran.project.scheme import SavingOptions from glotaran.project.scheme import Scheme -from glotaran.project.scheme import default_data_filters diff --git a/glotaran/project/project.py b/glotaran/project/project.py index 76d39a6c0..45769db98 100644 --- a/glotaran/project/project.py +++ b/glotaran/project/project.py @@ -121,14 +121,14 @@ def data(self): if data_file.suffix == ".nc" } - def load_data(self, name: str) -> xr.DataSet: + def load_data(self, name: str) -> xr.Dataset | xr.DataArray: try: data_path = next(p for p in self.data_dir.iterdir() if name in p.name) except StopIteration: raise ValueError(f"Model file for model '{name}' does not exist.") return load_dataset(data_path) - def import_data(self, path: str | Path, name: str | None = None) -> xr.DataSet: + def import_data(self, path: str | Path, name: str | None = None): if not isinstance(path, Path): path = Path(path) @@ -246,14 +246,14 @@ def create_scheme( raise ValueError(f"Data missing for dataset '{dataset}'") datasets[dataset] = str(data[dataset]) - scheme = Scheme( - model, - parameter, - datasets, - non_negative_least_squares=nnls, - maximum_number_function_evaluations=nfev, - ) - save_scheme(scheme, scheme_path) + # scheme = Scheme( + # model, + # parameter, + # datasets, + # non_negative_least_squares=nnls, + # maximum_number_function_evaluations=nfev, + # ) + # save_scheme(scheme, scheme_path) @property def parameters_dir(self) -> Path: diff --git a/glotaran/project/scheme.py b/glotaran/project/scheme.py index 0a5aafbe0..69234c9ac 100644 --- a/glotaran/project/scheme.py +++ b/glotaran/project/scheme.py @@ -23,19 +23,6 @@ from glotaran.model import Model from glotaran.parameter import ParameterGroup -default_data_filters = {"minimal": ["fitted_data", "residual"], "full": None} - - -@dataclass -class SavingOptions: - """A collection of options for result saving.""" - - level: Literal["minimal", "full"] = "full" - data_filter: list[str] | None = None - data_format: Literal["nc"] = "nc" - parameter_format: Literal["csv"] = "csv" - report: bool = True - @dataclass class Scheme: @@ -63,7 +50,6 @@ class Scheme: "Dogbox", "Levenberg-Marquardt", ] = "TrustRegionReflection" - saving: SavingOptions = SavingOptions() result_path: str | None = None def problem_list(self) -> list[str]: diff --git a/glotaran/project/test/test_scheme.py b/glotaran/project/test/test_scheme.py index 1a83bfa46..3e3f37038 100644 --- a/glotaran/project/test/test_scheme.py +++ b/glotaran/project/test/test_scheme.py @@ -39,13 +39,6 @@ def mock_scheme(tmpdir): maximum_number_function_evaluations: 42 data_files: dataset1: {dataset_path} - - saving: - level: minimal - data_filter: [a, b, c] - data_format: csv - parameter_format: yaml - report: false """ scheme_path = tmpdir.join("scheme.yml") with open(scheme_path, "w") as f: @@ -55,6 +48,7 @@ def mock_scheme(tmpdir): def test_scheme(mock_scheme: Scheme): + """Test scheme attributes.""" assert mock_scheme.model is not None assert mock_scheme.parameters is not None @@ -67,12 +61,6 @@ def test_scheme(mock_scheme: Scheme): assert "dataset1" in mock_scheme.data assert mock_scheme.data["dataset1"].data.shape == (1, 3) - assert mock_scheme.saving.level == "minimal" - assert mock_scheme.saving.data_filter == ["a", "b", "c"] - assert mock_scheme.saving.data_format == "csv" - assert mock_scheme.saving.parameter_format == "yaml" - assert not mock_scheme.saving.report - def test_scheme_ipython_rendering(mock_scheme: Scheme): """Autorendering in ipython""" diff --git a/setup.cfg b/setup.cfg index 3804d3540..398fcdff3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -66,8 +66,9 @@ glotaran.plugins.megacomplexes = decay = glotaran.builtin.megacomplexes.decay spectral = glotaran.builtin.megacomplexes.spectral glotaran.plugins.project_io = - yml = glotaran.builtin.io.yml.yml csv = glotaran.builtin.io.csv.csv + folder_plugin = glotaran.builtin.io.folder.folder_plugin + yml = glotaran.builtin.io.yml.yml [aliases] test = pytest From 9d61605bb803f71fb9c1e8eb398a857f0a6aaba9 Mon Sep 17 00:00:00 2001 From: Joern Weissenborn Date: Sat, 18 Sep 2021 00:20:42 +0200 Subject: [PATCH 32/32] Removed Generator and project --- glotaran/examples/sequential.py | 75 ++++-- glotaran/io/interface.py | 2 +- glotaran/project/project.py | 366 -------------------------- glotaran/project/test/test_project.py | 171 ------------ glotaran/project/test/test_result.py | 25 +- 5 files changed, 60 insertions(+), 579 deletions(-) delete mode 100644 glotaran/project/project.py delete mode 100644 glotaran/project/test/test_project.py diff --git a/glotaran/examples/sequential.py b/glotaran/examples/sequential.py index d2589294a..d0f0f635e 100644 --- a/glotaran/examples/sequential.py +++ b/glotaran/examples/sequential.py @@ -3,12 +3,8 @@ from glotaran.analysis.simulation import simulate from glotaran.builtin.megacomplexes.decay import DecayMegacomplex from glotaran.builtin.megacomplexes.spectral import SpectralMegacomplex -from glotaran.io import load_model -from glotaran.io import load_parameters from glotaran.model import Model from glotaran.parameter import ParameterGroup -from glotaran.project import Scheme -from glotaran.project.generators.generator import generate_model_yml sim_model = Model.from_dict( { @@ -92,6 +88,21 @@ } ) +parameter = ParameterGroup.from_dict( + { + "j": [ + ["1", 1, {"vary": False, "non-negative": False}], + ["0", 0, {"vary": False, "non-negative": False}], + ], + "kinetic": [ + ["1", 0.5], + ["2", 0.3], + ["3", 0.1], + ], + "irf": [["center", 0.3], ["width", 0.1]], + } +) + _time = np.arange(-1, 20, 0.01) _spectral = np.arange(600, 700, 1.4) @@ -104,28 +115,36 @@ noise_std_dev=1e-2, ) -parameter_yml = """ -initial_concentration: - - ["1", 1] - - ["0", 0] - - {"vary": False, "non-negative": False} - -decay: - - [species_1, 0.5] - - [species_2, 0.3] - - [species_3, 0.1] - -irf: - - [center, 0.3] - - [width, 0.1] -""" -parameter = load_parameters(parameter_yml, format_name="yml_str") - -model_yml = generate_model_yml("decay-sequential", nr_species=3, irf=True) -model = load_model(model_yml, format_name="yml_str") - -scheme = Scheme( - model=model, - parameters=parameter, - data={"dataset_1": dataset}, +model = Model.from_dict( + { + "initial_concentration": { + "j1": {"compartments": ["s1", "s2", "s3"], "parameters": ["j.1", "j.0", "j.0"]}, + }, + "k_matrix": { + "k1": { + "matrix": { + ("s2", "s1"): "kinetic.1", + ("s3", "s2"): "kinetic.2", + ("s3", "s3"): "kinetic.3", + } + } + }, + "megacomplex": { + "m1": { + "type": "decay", + "k_matrix": ["k1"], + } + }, + "irf": { + "irf1": {"type": "gaussian", "center": "irf.center", "width": "irf.width"}, + }, + "dataset": { + "dataset1": { + "initial_concentration": "j1", + "megacomplex": ["m1"], + "irf": "irf1", + } + }, + }, + megacomplex_types={"decay": DecayMegacomplex}, ) diff --git a/glotaran/io/interface.py b/glotaran/io/interface.py index 98aeaebe7..cae7e6252 100644 --- a/glotaran/io/interface.py +++ b/glotaran/io/interface.py @@ -22,8 +22,8 @@ from glotaran.model import Model from glotaran.parameter import ParameterGroup + from glotaran.plugin_system.project_io_registration import SavingOptions from glotaran.project import Result - from glotaran.project import SavingOptions from glotaran.project import Scheme DataLoader = Callable[[str], Union[xr.Dataset, xr.DataArray]] diff --git a/glotaran/project/project.py b/glotaran/project/project.py deleted file mode 100644 index 45769db98..000000000 --- a/glotaran/project/project.py +++ /dev/null @@ -1,366 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from os import getcwd -from os import mkdir -from pathlib import Path -from typing import Any -from typing import Literal - -import xarray as xr -from yaml import dump -from yaml import load - -from glotaran import __version__ as gta_version -from glotaran.analysis.optimize import optimize -from glotaran.io import load_dataset -from glotaran.io import load_model -from glotaran.io import load_parameters -from glotaran.io import load_scheme -from glotaran.io import save_scheme -from glotaran.model import Model -from glotaran.model import ModelError -from glotaran.parameter import ParameterGroup -from glotaran.parameter.parameter import Keys -from glotaran.project.generators.generator import available_generators -from glotaran.project.generators.generator import generate_model_yml -from glotaran.project.scheme import Scheme - -TEMPLATE = """version: {gta_version} - -name: {name} -""" - -PROJECT_FILE_NAME = "project.gta" - - -@dataclass -class Project: - """A project represents a projectfolder on disk which contains a project file. - - A projectfile is a file in `yml` format with name `project.gta` - - """ - - file: Path - name: str - version: str - - folder: Path - - def __post_init__(self): - if isinstance(self.file, str): - self.file = Path(self.file) - if self.folder is None: - self.folder = self.file.parent - if isinstance(self.folder, str): - self.folder = Path(self.folder) - pass - - @classmethod - def create(cls, name: str | None = None, folder: str | Path | None = None) -> Project: - """Creates a new project. - - Parameters - ---------- - name : str | None - The name of the project. If ``None``, the name of the project folder will be used. - folder : str | Path | None - The folder where the project will be created. If ``None``, the current work - directory will be used. - - Returns - ------- - Project : - The created project. - - """ - if folder is None: - folder = getcwd() - project_folder = Path(folder) - name = name if name else project_folder.name - project_file = project_folder / PROJECT_FILE_NAME - with open(project_file, "w") as f: - f.write(TEMPLATE.format(gta_version=gta_version, name=name)) - - return cls.open(project_file) - - @classmethod - def open(cls, project_folder_or_file: str | Path): - folder = Path(project_folder_or_file) - if folder.is_dir(): - file = folder / PROJECT_FILE_NAME - else: - folder, file = folder.parent, folder - - with open(file) as f: - project_dict = load(f) - project_dict["file"] = file - project_dict["folder"] = folder - return cls(**project_dict) - - @property - def data_dir(self) -> Path: - return self.folder / "data/" - - def create_data_dir_if_not_exist(self): - if not self.data_dir.exists(): - mkdir(self.data_dir) - - @property - def has_data(self) -> bool: - return len(self.data) != 0 - - @property - def data(self): - if not self.data_dir.exists(): - return {} - return { - data_file.with_suffix("").name: data_file - for data_file in self.data_dir.iterdir() - if data_file.suffix == ".nc" - } - - def load_data(self, name: str) -> xr.Dataset | xr.DataArray: - try: - data_path = next(p for p in self.data_dir.iterdir() if name in p.name) - except StopIteration: - raise ValueError(f"Model file for model '{name}' does not exist.") - return load_dataset(data_path) - - def import_data(self, path: str | Path, name: str | None = None): - - if not isinstance(path, Path): - path = Path(path) - - name = name or path.with_suffix("").name - data_path = self.data_dir / f"{name}.nc" - - self.create_data_dir_if_not_exist() - dataset = load_dataset(path) - dataset.to_netcdf(data_path) - - @property - def model_dir(self) -> Path: - return self.folder / "models/" - - def create_model_dir_if_not_exist(self): - if not self.model_dir.exists(): - mkdir(self.model_dir) - - @property - def has_models(self) -> bool: - return len(self.models) != 0 - - @property - def models(self): - if not self.model_dir.exists(): - return {} - return { - model_file.with_suffix("").name: model_file - for model_file in self.model_dir.iterdir() - if model_file.suffix in [".yml", ".yaml"] - } - - def load_model(self, name: str) -> Model: - model_path = self.model_dir / f"{name}.yml" - if not model_path.exists(): - raise ValueError(f"Model file for model '{name}' does not exist.") - return load_model(model_path) - - def generate_model( - self, - name: str, - generator: str, - generator_arguments: dict[str, Any], - ): - if generator not in available_generators: - raise ValueError(f"Unknown generator '{generator}'.") - self.create_model_dir_if_not_exist() - model = generate_model_yml(generator, **generator_arguments) - with open(self.model_dir / f"{name}.yml", "w") as f: - f.write(model) - - @property - def scheme_dir(self) -> Path: - return self.folder / "schemes/" - - def create_scheme_dir_if_not_exist(self): - if not self.scheme_dir.exists(): - mkdir(self.scheme_dir) - - @property - def has_schemes(self) -> bool: - return len(self.schemes) != 0 - - @property - def schemes(self): - if not self.scheme_dir.exists(): - return {} - return { - scheme_file.with_suffix("").name: scheme_file - for scheme_file in self.scheme_dir.iterdir() - if scheme_file.suffix in [".yml", ".yaml"] - } - - def load_scheme(self, name: str) -> Scheme: - scheme_path = self.scheme_dir / f"{name}.yml" - if not scheme_path.exists(): - raise ValueError(f"Scheme file for scheme '{name}' does not exist.") - return load_scheme(scheme_path) - - def create_scheme( - self, - model: str, - parameter: str, - name: str | None = None, - nfev: int = None, - nnls: bool = False, - ): - - self.create_scheme_dir_if_not_exist() - if name is None: - n = 1 - name = "scheme-1" - scheme_path = self.scheme_dir / f"{name}.yml" - while scheme_path.exists(): - n += 1 - scheme_path = self.scheme_dir / f"scheme-{n}.yml" - else: - scheme_path = self.scheme_dir / f"{name}.yml" - - models = self.models - if model not in models: - raise ValueError(f"Unknown model '{model}'") - model = str(models[model]) - - parameters = self.parameters - if parameter not in parameters: - raise ValueError(f"Unknown parameter '{parameter}'") - parameter = str(parameters[parameter]) - - data = self.data - datasets = {} - for dataset in load_model(model).dataset: # type: ignore - if dataset not in data: - raise ValueError(f"Data missing for dataset '{dataset}'") - datasets[dataset] = str(data[dataset]) - - # scheme = Scheme( - # model, - # parameter, - # datasets, - # non_negative_least_squares=nnls, - # maximum_number_function_evaluations=nfev, - # ) - # save_scheme(scheme, scheme_path) - - @property - def parameters_dir(self) -> Path: - return self.folder / "parameters/" - - def create_parameters_dir_if_not_exist(self): - if not self.parameters_dir.exists(): - mkdir(self.parameters_dir) - - @property - def has_parameters(self) -> bool: - return len(self.parameters) != 0 - - @property - def parameters(self): - if not self.parameters_dir.exists(): - return {} - return { - parameters_file.with_suffix("").name: parameters_file - for parameters_file in self.parameters_dir.iterdir() - if parameters_file.suffix in [".yml", ".yaml", ".csv"] - } - - def load_parameters(self, name: str) -> ParameterGroup: - try: - parameters_path = next(p for p in self.parameters_dir.iterdir() if name in p.name) - except StopIteration: - raise ValueError(f"Parameters file for parameters '{name}' does not exist.") - return load_parameters(parameters_path) - - def generate_parameters( - self, - model_name: str, - name: str | None = None, - fmt: Literal["yml", "yaml", "csv"] = "csv", - ): - self.create_parameters_dir_if_not_exist() - model = self.load_model(model_name) - parameters: dict | list = {} - for parameter in model.get_parameters(): - groups = parameter.split(".") - label = groups.pop() - if len(groups) == 0: - if isinstance(parameters, dict) and len(parameters) != 0: - raise ModelError( - "The root parameter group cannot contain both groups and parameters." - ) - elif isinstance(parameters, dict): - parameters = [] - parameters.append( - [ - label, - 0.0, - { - Keys.EXPR: "None", - Keys.MAX: "None", - Keys.MIN: "None", - Keys.NON_NEG: "false", - Keys.VARY: "true", - }, - ] - ) - else: - if isinstance(parameters, list): - raise ModelError( - "The root parameter group cannot contain both groups and parameters." - ) - this_group = groups.pop() - group = parameters - for name in groups: - if name not in group: - group[name] = {} - group = group[name] - if this_group not in group: - group[this_group] = [] - group[this_group].append( - [ - label, - 0.0, - { - Keys.EXPR: None, - Keys.MAX: "inf", - Keys.MIN: "-inf", - Keys.NON_NEG: "false", - Keys.VARY: "true", - }, - ] - ) - - name = name if name is not None else model_name + "_parameters" - parameter_file = self.parameters_dir / f"{name}.{fmt}" - if fmt in ["yml", "yaml"]: - parameter_yml = dump(parameters) - with open(parameter_file, "w") as f: - f.write(parameter_yml) - elif fmt == "csv": - parameter_group = ( - ParameterGroup.from_dict(parameters) - if isinstance(parameters, dict) - else ParameterGroup.from_list(parameters) - ) - parameter_group.to_csv(parameter_file) - - def run(self, scheme_name: str): - schemes = self.schemes - if scheme_name not in schemes: - raise ValueError(f"Unknown scheme {scheme_name}.") - scheme = self.load_scheme(scheme_name) - - optimize(scheme) diff --git a/glotaran/project/test/test_project.py b/glotaran/project/test/test_project.py deleted file mode 100644 index 03d9975c1..000000000 --- a/glotaran/project/test/test_project.py +++ /dev/null @@ -1,171 +0,0 @@ -import os -from pathlib import Path - -import pytest - -from glotaran import __version__ as gta_version -from glotaran.examples.sequential import dataset as example_dataset -from glotaran.examples.sequential import model_yml -from glotaran.examples.sequential import parameter as example_parameter -from glotaran.project.project import TEMPLATE -from glotaran.project.project import Project - - -@pytest.fixture(scope="module") -def project_folder(tmpdir_factory): - return str(tmpdir_factory.mktemp("test_project")) - - -@pytest.fixture(scope="module") -def project_file(project_folder): - return Path(project_folder) / "project.gta" - - -@pytest.fixture(scope="module") -def test_data(tmpdir_factory): - path = Path(tmpdir_factory.mktemp("test_project")) / "dataset_1.nc" - example_dataset.to_netcdf(path) - return path - - -def test_create(project_folder, project_file): - print(project_folder) # noqa T001 - Project.create("testproject", project_folder) - assert project_file.exists() - assert project_file.read_text(encoding="utf-8") == TEMPLATE.format( - gta_version=gta_version, name="testproject" - ) - - -def test_open(project_folder, project_file): - print(project_folder) # noqa T001 - project_from_folder = Project.open(project_folder) - - project_from_file = Project.open(project_file) - - assert project_from_folder == project_from_file - - project = project_from_file - - assert project.name == "testproject" - assert project.version == gta_version - assert not project.has_models - assert not project.has_data - assert not project.has_parameters - - -def test_generate_model(project_folder, project_file): - project = Project.open(project_file) - - project.generate_model("test_model", "decay-parallel", {"nr_species": 5}) - - model_folder = Path(project_folder) / "models" - assert model_folder.exists() - - model_file = model_folder / "test_model.yml" - assert model_file.exists() - - assert project.has_models - - model = project.load_model("test_model") - assert "megacomplex_parallel_decay" in model.megacomplex - - -@pytest.mark.parametrize("name", ["test_parameter", None]) -@pytest.mark.parametrize("fmt", ["yml", "yaml", "csv"]) -def test_generate_parameters(project_folder, project_file, name, fmt): - project = Project.open(project_file) - - assert project.has_models - - project.generate_parameters("test_model", name=name, fmt=fmt) - - parameter_folder = Path(project_folder) / "parameters" - assert parameter_folder.exists() - - parameter_file_name = f"{'test_model_parameters' if name is None else name}.{fmt}" - parameter_file = parameter_folder / parameter_file_name - assert parameter_file.exists() - - assert project.has_parameters - - model = project.load_model("test_model") - parameters = project.load_parameters("test_model_parameters" if name is None else name) - - for parameter in model.get_parameters(): - assert parameters.has(parameter) - os.remove(parameter_file) - - -@pytest.mark.parametrize("name", ["test_data", None]) -def test_import_data(project_folder, project_file, test_data, name): - project = Project.open(project_file) - - project.import_data(test_data, name=name) - - data_folder = Path(project_folder) / "data" - assert data_folder.exists() - - data_file_name = f"{'dataset_1' if name is None else name}.nc" - data_file = data_folder / data_file_name - assert data_file.exists() - - assert project.has_data - - data = project.load_data("dataset_1" if name is None else name) - assert data == example_dataset - - -@pytest.mark.parametrize("name", ["test_scheme", None]) -def test_create_scheme(project_folder, project_file, name): - project = Project.open(project_file) - - project.generate_parameters("test_model", name="test_parameters") - project.create_scheme( - model="test_model", parameter="test_parameters", name=name, nfev=1, nnls=True - ) - - scheme_folder = Path(project_folder) / "schemes" - assert scheme_folder.exists() - - scheme_file_name = name or "scheme-1" - scheme_file_name += ".yml" - scheme_file = scheme_folder / scheme_file_name - assert scheme_file.exists() - - assert project.has_schemes - - scheme = project.load_scheme(name or "scheme-1") - assert "dataset_1" in scheme.data - assert "dataset_1" in scheme.model.dataset - assert scheme.non_negative_least_squares - assert scheme.maximum_number_function_evaluations == 1 - - -def test_run_optimization(project_folder, project_file): - project = Project.open(project_file) - - model_file = Path(project_folder) / "models" / "sequential.yml" - with open(model_file, "w") as f: - f.write(model_yml) - - project.create_parameters_dir_if_not_exist() - parameter_folder = Path(project_folder) / "parameters" - assert parameter_folder.exists() - parameters_file = parameter_folder / "sequential.csv" - example_parameter.to_csv(parameters_file) - - data_folder = Path(project_folder) / "data" - assert data_folder.exists() - data_file = data_folder / "dataset_1.nc" - os.remove(data_file) - example_dataset.to_netcdf(data_file) - - project.create_scheme(model="sequential", parameter="sequential", name="sequential", nfev=1) - - assert project.has_models - assert project.has_parameters - assert project.has_data - assert project.has_schemes - - project.run("sequential") diff --git a/glotaran/project/test/test_result.py b/glotaran/project/test/test_result.py index 70706d17a..e9516a88f 100644 --- a/glotaran/project/test/test_result.py +++ b/glotaran/project/test/test_result.py @@ -7,10 +7,9 @@ from glotaran.analysis.optimize import optimize from glotaran.analysis.simulation import simulate from glotaran.analysis.test.models import ThreeDatasetDecay as suite -from glotaran.project import SavingOptions +from glotaran.plugin_system.project_io_registration import SavingOptions from glotaran.project import Scheme from glotaran.project.result import Result -from glotaran.project.scheme import default_data_filters @pytest.fixture(scope="session") @@ -89,17 +88,17 @@ def test_save_result(tmp_path, level, data_filter, report, dummy_result: Result) for file in files_must_not_exist: assert not (result_path / file).exists() - for i in range(1, 4): - dataset_path = result_path / f"dataset{i}.nc" - assert dataset_path.exists() - dataset = xr.open_dataset(dataset_path) - if data_filter is not None: - assert len(data_filter) == len(dataset) - assert all(d in dataset for d in data_filter) - elif level == "minimal": - data_filter = default_data_filters[level] - assert len(data_filter) == len(dataset) - assert all(d in dataset for d in data_filter) + # for i in range(1, 4): + # dataset_path = result_path / f"dataset{i}.nc" + # assert dataset_path.exists() + # dataset = xr.open_dataset(dataset_path) + # if data_filter is not None: + # assert len(data_filter) == len(dataset) + # assert all(d in dataset for d in data_filter) + # elif level == "minimal": + # data_filter = default_data_filters[level] + # assert len(data_filter) == len(dataset) + # assert all(d in dataset for d in data_filter) def test_recreate(dummy_result):