Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: Generators #866

Merged
merged 32 commits into from
Jan 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
e19e096
Added model generators
joernweissenborn Oct 15, 2021
61b09df
Added spectral decay model generator
joernweissenborn Oct 16, 2021
3d2c723
Refactored examples
joernweissenborn Oct 16, 2021
c546759
Fix test
joernweissenborn Oct 16, 2021
fbace9f
Changed result tests to use examples
joernweissenborn Oct 16, 2021
3eb4508
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 2, 2021
019bb46
Moved examples to testing and removed old model generator.
joernweissenborn Dec 3, 2021
c47a201
Updated quickstart.
joernweissenborn Dec 3, 2021
648bb41
🔧 Update codeowners file to reflect removed examples folder
jsnel Jan 16, 2022
54737e1
🔧 Disable quickstart test in examples 🧪
jsnel Jan 16, 2022
659748e
Update howto write plugin documentation notebook
jsnel Jan 16, 2022
6a52fbf
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 16, 2022
0bed922
Use builtin yml for generators
joernweissenborn Jan 20, 2022
7397780
Use _ in generator names.
joernweissenborn Jan 20, 2022
336920a
Rename argument of model generator to generator_name.
joernweissenborn Jan 20, 2022
bf7aaa0
Add See Also to generator doc.
joernweissenborn Jan 20, 2022
bd0a61f
Add See Also to generator doc.
joernweissenborn Jan 20, 2022
e9897f0
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 20, 2022
62c73df
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 20, 2022
b702f63
Added naming consitency.
joernweissenborn Jan 20, 2022
b3ad3ac
Refactored test simulations.
joernweissenborn Jan 20, 2022
0c454de
Prevent using positional arguments in generators.
joernweissenborn Jan 20, 2022
272d4cc
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 20, 2022
1293547
Updated changelog.
joernweissenborn Jan 20, 2022
05c1b5b
Partially reverted suggestions from code review.
joernweissenborn Jan 20, 2022
af866cd
🩹📚 Fix changed import in docs
s-weigand Jan 20, 2022
a904d12
🩹 Added __init __.py so glotaran.testing.simulated_data is a proper p…
s-weigand Jan 20, 2022
348d115
👌 Made arguments for generators keyword only
s-weigand Jan 20, 2022
5e2306e
🧪 Reactivated quick-start integration test
s-weigand Jan 20, 2022
72d6b21
✨ Added module_load_overwrite arg to deprecation functions for modules
s-weigand Jan 21, 2022
554a99e
🚧🗑️ Properly deprecated 'glotaran.examples'
s-weigand Jan 21, 2022
19439ef
♻️ Refactored by Sourcery
Jan 21, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ LICENSE @glotaran/pyglotaran_creators
# cli
/glotaran/cli/ @jsnel @glotaran/admins

# examples
/glotaran/examples/ @jsnel @glotaran/maintainers
# examples # Removed in PR #866
# /glotaran/examples/ @jsnel @glotaran/maintainers

# io
/glotaran/io/ @jsnel @glotaran/maintainers
Expand Down
1 change: 1 addition & 0 deletions changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
### ✨ Features

- ✨ Add simple decay megacomplexes (#860)
- ✨ Feature: Generators (#866)

### 👌 Minor Improvements:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,9 @@
"metadata": {},
"outputs": [],
"source": [
"from glotaran.examples.sequential import dataset\n",
"from glotaran.io import load_dataset\n",
"from glotaran.io import save_dataset"
"from glotaran.io import save_dataset\n",
"from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET as dataset"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/source/notebooks/quickstart/quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
"metadata": {},
"outputs": [],
"source": [
"from glotaran.examples.sequential import dataset\n",
"from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET as dataset\n",
"\n",
"dataset"
]
Expand Down
8 changes: 8 additions & 0 deletions glotaran/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Glotaran package __init__.py"""
from glotaran.deprecation.deprecation_utils import deprecate_submodule
from glotaran.deprecation.modules.glotaran_root import read_model_from_yaml
from glotaran.deprecation.modules.glotaran_root import read_model_from_yaml_file
from glotaran.deprecation.modules.glotaran_root import read_parameters_from_csv_file
Expand All @@ -10,6 +11,13 @@

__version__ = "0.6.0.dev0"

examples = deprecate_submodule(
deprecated_module_name="glotaran.examples",
new_module_name="glotaran.testing.simulated_data",
to_be_removed_in_version="0.8.0",
module_load_overwrite="glotaran.deprecation.modules.examples",
)


def __getattr__(attribute_name: str):
from glotaran.deprecation.deprecation_utils import deprecate_module_attribute
Expand Down
22 changes: 12 additions & 10 deletions glotaran/builtin/io/folder/test/test_folder_plugin.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,27 @@
from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Literal

import pytest

from glotaran.analysis.optimize import optimize
from glotaran.io import save_result
from glotaran.project.test.test_result import dummy_result # noqa: F401
from glotaran.project.result import Result
from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME

if TYPE_CHECKING:
from typing import Literal

from glotaran.project.result import Result
@pytest.fixture(scope="session")
def dummy_result():
"""Dummy result for testing."""
print(SCHEME.data["dataset_1"])
yield optimize(SCHEME, raise_exception=True)


@pytest.mark.parametrize("format_name", ("folder", "legacy"))
def test_save_result_folder(
tmp_path: Path,
dummy_result: Result, # noqa: F811
dummy_result: Result,
format_name: Literal["folder", "legacy"],
):
"""Check all files exist."""
Expand All @@ -34,9 +38,7 @@ def test_save_result_folder(
"initial_parameters.csv",
"optimized_parameters.csv",
"parameter_history.csv",
"dataset1.nc",
"dataset2.nc",
"dataset3.nc",
"dataset_1.nc",
]
for wanted in wanted_files:
assert (result_dir / wanted).exists()
Expand All @@ -46,7 +48,7 @@ def test_save_result_folder(
@pytest.mark.parametrize("format_name", ("folder", "legacy"))
def test_save_result_folder_error_path_is_file(
tmp_path: Path,
dummy_result: Result, # noqa: F811
dummy_result: Result,
format_name: Literal["folder", "legacy"],
):
"""Raise error if result_path is a file without extension and overwrite is true."""
Expand Down
47 changes: 17 additions & 30 deletions glotaran/builtin/io/yml/test/test_save_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,57 +2,44 @@

from typing import TYPE_CHECKING

from glotaran.examples.sequential import model
from glotaran.io import load_model
from glotaran.io import save_model
from glotaran.testing.simulated_data.sequential_spectral_decay import MODEL

if TYPE_CHECKING:
from pathlib import Path


want = """\
default_megacomplex: decay
want = """default_megacomplex: decay-sequential
dataset_groups:
default:
residual_function: variable_projection
link_clp: null
k_matrix:
k1:
matrix:
(s2, s1): kinetic.1
(s3, s2): kinetic.2
(s3, s3): kinetic.3
initial_concentration:
j1:
compartments:
- s1
- s2
- s3
parameters:
- j.1
- j.0
- j.0
exclude_from_normalize: []
irf:
irf1:
gaussian_irf:
type: gaussian
center: irf.center
width: irf.width
normalize: true
backsweep: false
megacomplex:
m1:
type: decay
megacomplex_sequential_decay:
type: decay-sequential
compartments:
- species_1
- species_2
- species_3
rates:
- rates.species_1
- rates.species_2
- rates.species_3
dimension: time
k_matrix:
- k1
dataset:
dataset1:
dataset_1:
group: default
megacomplex:
- m1
initial_concentration: j1
irf: irf1
- megacomplex_sequential_decay
irf: gaussian_irf
"""


Expand All @@ -62,7 +49,7 @@ def test_save_model(
"""Check all files exist."""

model_path = tmp_path / "testmodel.yml"
save_model(file_name=model_path, format_name="yml", model=model)
save_model(file_name=model_path, format_name="yml", model=MODEL)

assert model_path.is_file()
assert model_path.read_text() == want
Expand Down
34 changes: 21 additions & 13 deletions glotaran/builtin/io/yml/test/test_save_result.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,29 @@
from __future__ import annotations

from dataclasses import replace
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING

import pytest

from glotaran import __version__
from glotaran.analysis.optimize import optimize
from glotaran.io import save_result
from glotaran.project.test.test_result import dummy_result # noqa: F401
from glotaran.project.result import Result
from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME

if TYPE_CHECKING:

from glotaran.project.result import Result
@pytest.fixture(scope="session")
def dummy_result():
"""Dummy result for testing."""
scheme = replace(SCHEME, maximum_number_function_evaluations=1)
print(scheme.data["dataset_1"])
yield optimize(scheme, raise_exception=True)


def test_save_result_yml(
tmp_path: Path,
dummy_result: Result, # noqa: F811
dummy_result: Result,
):
"""Check all files exist."""
expected = dedent(
Expand All @@ -25,16 +33,17 @@ def test_save_result_yml(
termination_reason: The maximum number of function evaluations is exceeded.
glotaran_version: {__version__}
free_parameter_labels:
- '1'
- '2'
- rates.species_1
- rates.species_2
- rates.species_3
- irf.center
- irf.width
scheme: scheme.yml
initial_parameters: initial_parameters.csv
optimized_parameters: optimized_parameters.csv
parameter_history: parameter_history.csv
data:
dataset1: dataset1.nc
dataset2: dataset2.nc
dataset3: dataset3.nc
dataset_1: dataset_1.nc
"""
)

Expand All @@ -46,8 +55,7 @@ def test_save_result_yml(
assert (result_dir / "result.yml").exists()
assert (result_dir / "initial_parameters.csv").exists()
assert (result_dir / "optimized_parameters.csv").exists()
assert (result_dir / "dataset1.nc").exists()
assert (result_dir / "dataset2.nc").exists()
assert (result_dir / "dataset3.nc").exists()
assert (result_dir / "dataset_1.nc").exists()

# We can't check equality due to numerical fluctuations
assert expected in (result_dir / "result.yml").read_text()
18 changes: 9 additions & 9 deletions glotaran/builtin/io/yml/test/test_save_scheme.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,15 @@

import xarray as xr

from glotaran.examples.sequential import dataset
from glotaran.examples.sequential import model
from glotaran.examples.sequential import parameter
from glotaran.io import load_scheme
from glotaran.io import save_dataset
from glotaran.io import save_model
from glotaran.io import save_parameters
from glotaran.io import save_scheme
from glotaran.project import Scheme
from glotaran.testing.simulated_data.sequential_spectral_decay import DATASET
from glotaran.testing.simulated_data.sequential_spectral_decay import MODEL
from glotaran.testing.simulated_data.sequential_spectral_decay import PARAMETER
jsnel marked this conversation as resolved.
Show resolved Hide resolved

if TYPE_CHECKING:
from pathlib import Path
Expand All @@ -35,13 +35,13 @@


def test_save_scheme(tmp_path: Path):
save_model(model, tmp_path / "m.yml")
save_parameters(parameter, tmp_path / "p.csv")
save_dataset(dataset, tmp_path / "d.nc")
save_model(MODEL, tmp_path / "m.yml")
save_parameters(PARAMETER, tmp_path / "p.csv")
save_dataset(DATASET, tmp_path / "d.nc")
scheme = Scheme(
model,
parameter,
{"dataset_1": dataset},
MODEL,
PARAMETER,
{"dataset_1": DATASET},
)
scheme_path = tmp_path / "testscheme.yml"
save_scheme(file_name=scheme_path, format_name="yml", scheme=scheme)
Expand Down
18 changes: 12 additions & 6 deletions glotaran/builtin/io/yml/yml.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import TYPE_CHECKING

from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO

from glotaran.deprecation.modules.builtin_io_yml import model_spec_deprecations
from glotaran.deprecation.modules.builtin_io_yml import scheme_spec_deprecations
Expand Down Expand Up @@ -83,7 +84,7 @@ def save_model(self, model: Model, file_name: str):
if isinstance(prop, dict) and any(isinstance(k, tuple) for k in prop):
keys = [f"({k[0]}, {k[1]})" for k in prop]
item[prop_name] = {f"{k}": v for k, v in zip(keys, prop.values())}
_write_dict(file_name, model_dict)
write_dict(model_dict, file_name=file_name)

def load_parameters(self, file_name: str) -> ParameterGroup:
"""Create a ParameterGroup instance from the specs defined in a file.
Expand Down Expand Up @@ -111,7 +112,7 @@ def load_scheme(self, file_name: str) -> Scheme:

def save_scheme(self, scheme: Scheme, file_name: str):
scheme_dict = asdict(scheme, folder=Path(file_name).parent)
_write_dict(file_name, scheme_dict)
write_dict(scheme_dict, file_name=file_name)

def load_result(self, result_path: str) -> Result:
"""Create a :class:`Result` instance from the specs defined in a file.
Expand Down Expand Up @@ -141,7 +142,7 @@ def save_result(self, result: Result, result_path: str):
"""
save_result(result, Path(result_path).parent.as_posix(), format_name="folder")
result_dict = asdict(result, folder=Path(result_path).parent)
_write_dict(result_path, result_dict)
write_dict(result_dict, file_name=result_path)

def _load_yml(self, file_name: str) -> dict[str, Any]:
yaml = YAML()
Expand All @@ -153,12 +154,17 @@ def _load_yml(self, file_name: str) -> dict[str, Any]:
return spec


def _write_dict(file_name: str, data: Mapping[str, Any]):
def write_dict(data: Mapping[str, Any], file_name: str | None = None) -> str | None:
yaml = YAML()
yaml.representer.add_representer(type(None), _yaml_none_representer)
yaml.indent(mapping=2, sequence=2, offset=2)
with open(file_name, "w") as f:
yaml.dump(data, f)
if file_name is not None:
with open(file_name, "w") as f:
yaml.dump(data, f)
else:
stream = StringIO()
yaml.dump(data, stream)
return stream.getvalue()


def _yaml_none_representer(representer: BaseRepresenter, data: Mapping[str, Any]) -> ScalarNode:
Expand Down
Loading