From b24bffe9deb6a0edaeb41711905010a87cfe2140 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 11 Sep 2021 02:40:30 +0200 Subject: [PATCH 1/7] =?UTF-8?q?=F0=9F=A9=B9=20Fixed=20dimension=20mismatch?= =?UTF-8?q?ing=20of=20CoherentArtifactMegacomplex?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the dataset_model was index_dependent, CoherentArtifactMegacomplex.finalize_data did crash because it tried cast data of dimensions (global_dimension, model_dimension, "coherent_artifact_order") to the dimensions (model_dimension, "coherent_artifact_order") --- .../coherent_artifact/coherent_artifact_megacomplex.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py index a1aa8a2d8..4d5f20cfd 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py @@ -73,8 +73,11 @@ def finalize_data( global_dimension = dataset_model.get_global_dimension() model_dimension = dataset_model.get_model_dimension() dataset.coords["coherent_artifact_order"] = np.arange(1, self.order + 1) + concentration_dimensions = (model_dimension, "coherent_artifact_order") + if dataset_model.index_dependent() is True: + concentration_dimensions = (global_dimension, *concentration_dimensions) dataset["coherent_artifact_concentration"] = ( - (model_dimension, "coherent_artifact_order"), + concentration_dimensions, dataset.matrix.sel(clp_label=self.compartments()).values, ) dataset["coherent_artifact_associated_spectra"] = ( From df052c87459d5e2782511ac708eb7821741c9fd8 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 11 Sep 2021 14:26:33 +0200 Subject: [PATCH 2/7] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20Refactored=20test=5Fco?= =?UTF-8?q?herent=5Fartifact=20to=20be=20more=20readable?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test/test_coherent_artifact.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py index 9a02ce4aa..f61bcb6ad 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py @@ -14,7 +14,7 @@ def test_coherent_artifact(): model_dict = { "initial_concentration": { - "j1": {"compartments": ["s1"], "parameters": ["2"]}, + "j1": {"compartments": ["s1"], "parameters": ["irf_center"]}, }, "megacomplex": { "mc1": {"type": "decay", "k_matrix": ["k1"]}, @@ -23,15 +23,15 @@ def test_coherent_artifact(): "k_matrix": { "k1": { "matrix": { - ("s1", "s1"): "1", + ("s1", "s1"): "rate", } } }, "irf": { "irf1": { - "type": "spectral-gaussian", - "center": "2", - "width": "3", + "type": "spectral-multi-gaussian", + "center": ["irf_center"], + "width": ["irf_width"], }, }, "dataset": { @@ -42,6 +42,13 @@ def test_coherent_artifact(): }, }, } + + parameter_list = [ + ["rate", 101e-4], + ["irf_center", 10, {"vary": False, "non-negative": False}], + ["irf_width", 20, {"vary": False, "non-negative": False}], + ] + model = Model.from_dict( model_dict.copy(), megacomplex_types={ @@ -50,14 +57,7 @@ def test_coherent_artifact(): }, ) - parameters = ParameterGroup.from_list( - [ - 101e-4, - [10, {"vary": False, "non-negative": False}], - [20, {"vary": False, "non-negative": False}], - [30, {"vary": False, "non-negative": False}], - ] - ) + parameters = ParameterGroup.from_list(parameter_list) time = np.arange(0, 50, 1.5) spectral = np.asarray([0]) From 7c390ee10be3b412286ea751bc96dfa04af952d4 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 11 Sep 2021 15:58:03 +0200 Subject: [PATCH 3/7] =?UTF-8?q?=F0=9F=A7=AA=20Parametrized=20test=5Fcohere?= =?UTF-8?q?nt=5Fartifact=20to=20cover=20index=20dependent=20case?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test/test_coherent_artifact.py | 37 ++++++++++++++----- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py index f61bcb6ad..433688f3c 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py @@ -1,4 +1,5 @@ import numpy as np +import pytest import xarray as xr from glotaran.analysis.optimize import optimize @@ -11,7 +12,11 @@ from glotaran.project import Scheme -def test_coherent_artifact(): +@pytest.mark.parametrize( + "is_index_dependent", + (True, False), +) +def test_coherent_artifact(is_index_dependent: bool): model_dict = { "initial_concentration": { "j1": {"compartments": ["s1"], "parameters": ["irf_center"]}, @@ -49,6 +54,17 @@ def test_coherent_artifact(): ["irf_width", 20, {"vary": False, "non-negative": False}], ] + if is_index_dependent is True: + irf_spec = model_dict["irf"]["irf1"] + irf_spec["dispersion_center"] = "irf_dispc" + irf_spec["center_dispersion"] = ["irf_disp1", "irf_disp2"] + + parameter_list += [ + ["irf_dispc", 300, {"vary": False, "non-negative": False}], + ["irf_disp1", 0.01, {"vary": False, "non-negative": False}], + ["irf_disp2", 0.001, {"vary": False, "non-negative": False}], + ] + model = Model.from_dict( model_dict.copy(), megacomplex_types={ @@ -60,13 +76,13 @@ def test_coherent_artifact(): parameters = ParameterGroup.from_list(parameter_list) time = np.arange(0, 50, 1.5) - spectral = np.asarray([0]) + spectral = np.asarray([200, 300, 400]) coords = {"time": time, "spectral": spectral} dataset_model = model.dataset["dataset1"].fill(model, parameters) dataset_model.overwrite_global_dimension("spectral") dataset_model.set_coordinates(coords) - matrix = calculate_matrix(dataset_model, {}) + matrix = calculate_matrix(dataset_model, {"spectral": [0, 1, 2]}) compartments = matrix.clp_labels print(compartments) @@ -77,9 +93,9 @@ def test_coherent_artifact(): assert matrix.matrix.shape == (time.size, 4) clp = xr.DataArray( - [[1, 1, 1, 1]], + np.ones((3, 4)), coords=[ - ("spectral", [0]), + ("spectral", spectral), ( "clp_label", [ @@ -102,17 +118,20 @@ def test_coherent_artifact(): print(result.optimized_parameters) for label, param in result.optimized_parameters.all(): - assert np.allclose(param.value, parameters.get(label).value, rtol=1e-1) + assert np.allclose(param.value, parameters.get(label).value, rtol=1e-8) resultdata = result.data["dataset1"] assert np.array_equal(data.time, resultdata.time) assert np.array_equal(data.spectral, resultdata.spectral) assert data.data.shape == resultdata.data.shape assert data.data.shape == resultdata.fitted_data.shape - assert np.allclose(data.data, resultdata.fitted_data, rtol=1e-2) + assert np.allclose(data.data, resultdata.fitted_data) assert "coherent_artifact_concentration" in resultdata - assert resultdata["coherent_artifact_concentration"].shape == (time.size, 3) + if is_index_dependent: + assert resultdata["coherent_artifact_concentration"].shape == (spectral.size, time.size, 3) + else: + assert resultdata["coherent_artifact_concentration"].shape == (time.size, 3) assert "coherent_artifact_associated_spectra" in resultdata - assert resultdata["coherent_artifact_associated_spectra"].shape == (1, 3) + assert resultdata["coherent_artifact_associated_spectra"].shape == (3, 3) From 4d5aebf2c2ae4a3c53282688ed50f2fb73d245a8 Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 11 Sep 2021 16:19:56 +0200 Subject: [PATCH 4/7] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20Renamed=20'DatasetMode?= =?UTF-8?q?l.index=5Fdependent'=20->'is=5Findex=5Fdependent'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit (see #755) --- glotaran/analysis/problem.py | 2 +- glotaran/analysis/problem_grouped.py | 2 +- glotaran/analysis/problem_ungrouped.py | 10 +++++----- glotaran/analysis/simulation.py | 4 ++-- glotaran/analysis/test/test_optimization.py | 14 +++++++------- .../coherent_artifact_megacomplex.py | 2 +- glotaran/model/dataset_model.py | 2 +- glotaran/model/dataset_model.pyi | 2 +- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/glotaran/analysis/problem.py b/glotaran/analysis/problem.py index bc7463499..4d10d4c31 100644 --- a/glotaran/analysis/problem.py +++ b/glotaran/analysis/problem.py @@ -340,7 +340,7 @@ def create_result_dataset(self, label: str, copy: bool = True) -> xr.Dataset: model_dimension = dataset_model.get_model_dimension() if copy: dataset = dataset.copy() - if dataset_model.index_dependent(): + if dataset_model.is_index_dependent(): dataset = self.create_index_dependent_result_dataset(label, dataset) else: dataset = self.create_index_independent_result_dataset(label, dataset) diff --git a/glotaran/analysis/problem_grouped.py b/glotaran/analysis/problem_grouped.py index e898970b2..e26f33bea 100644 --- a/glotaran/analysis/problem_grouped.py +++ b/glotaran/analysis/problem_grouped.py @@ -48,7 +48,7 @@ def __init__(self, scheme: Scheme): raise ValueError( f"Cannot group datasets. Model dimension '{model_dimensions}' do not match." ) - self._index_dependent = any(d.index_dependent() for d in self.dataset_models.values()) + self._index_dependent = any(d.is_index_dependent() for d in self.dataset_models.values()) self._global_dimension = global_dimensions.pop() self._model_dimension = model_dimensions.pop() self._group_clp_labels = None diff --git a/glotaran/analysis/problem_ungrouped.py b/glotaran/analysis/problem_ungrouped.py index 8b7fedb18..8afd5f164 100644 --- a/glotaran/analysis/problem_ungrouped.py +++ b/glotaran/analysis/problem_ungrouped.py @@ -59,7 +59,7 @@ def calculate_matrices( for label, dataset_model in self.dataset_models.items(): - if dataset_model.index_dependent(): + if dataset_model.is_index_dependent(): self._calculate_index_dependent_matrix(label, dataset_model) else: self._calculate_index_independent_matrix(label, dataset_model) @@ -132,10 +132,10 @@ def _calculate_residual(self, label: str, dataset_model: DatasetModel): for i, index in enumerate(global_axis): reduced_clp_labels, reduced_matrix = ( self.reduced_matrices[label][i] - if dataset_model.index_dependent() + if dataset_model.is_index_dependent() else self.reduced_matrices[label] ) - if not dataset_model.index_dependent(): + if not dataset_model.is_index_dependent(): reduced_matrix = reduced_matrix.copy() if dataset_model.scale is not None: @@ -183,7 +183,7 @@ def _calculate_full_model_residual(self, label: str, dataset_model: DatasetModel model_matrix = self.matrices[label] global_matrix = self.global_matrices[label].matrix - if dataset_model.index_dependent(): + if dataset_model.is_index_dependent(): matrix = np.concatenate( [ np.kron(global_matrix[i, :], model_matrix[i].matrix) @@ -205,7 +205,7 @@ def _calculate_full_model_residual(self, label: str, dataset_model: DatasetModel def _get_clp_labels(self, label: str, index: int = 0): return ( self.matrices[label][index].clp_labels - if self.dataset_models[label].index_dependent() + if self.dataset_models[label].is_index_dependent() else self.matrices[label].clp_labels ) diff --git a/glotaran/analysis/simulation.py b/glotaran/analysis/simulation.py index 2bbdc0b00..22ebe0a38 100644 --- a/glotaran/analysis/simulation.py +++ b/glotaran/analysis/simulation.py @@ -93,7 +93,7 @@ def simulate_clp( ) for index, _ in enumerate(global_axis) ] - if dataset_model.index_dependent() + if dataset_model.is_index_dependent() else calculate_matrix(dataset_model, {}) ) @@ -108,7 +108,7 @@ def simulate_clp( ) result = result.to_dataset(name="data") for i in range(global_axis.size): - index_matrix = matrices[i] if dataset_model.index_dependent() else matrices + index_matrix = matrices[i] if dataset_model.is_index_dependent() else matrices result.data[:, i] = np.dot( index_matrix.matrix, clp.isel({global_dimension: i}).sel({"clp_label": index_matrix.clp_labels}), diff --git a/glotaran/analysis/test/test_optimization.py b/glotaran/analysis/test/test_optimization.py index 2576b02e3..2a8e97ea7 100644 --- a/glotaran/analysis/test/test_optimization.py +++ b/glotaran/analysis/test/test_optimization.py @@ -12,7 +12,7 @@ from glotaran.project import Scheme -@pytest.mark.parametrize("index_dependent", [True, False]) +@pytest.mark.parametrize("is_index_dependent", [True, False]) @pytest.mark.parametrize("grouped", [True, False]) @pytest.mark.parametrize("weight", [True, False]) @pytest.mark.parametrize( @@ -27,16 +27,16 @@ "suite", [OneCompartmentDecay, TwoCompartmentDecay, ThreeDatasetDecay, MultichannelMulticomponentDecay], ) -def test_optimization(suite, index_dependent, grouped, weight, method): +def test_optimization(suite, is_index_dependent, grouped, weight, method): model = suite.model - model.megacomplex["m1"].is_index_dependent = index_dependent + model.megacomplex["m1"].is_index_dependent = is_index_dependent print("Grouped:", grouped) - print("Index dependent:", index_dependent) + print("Index dependent:", is_index_dependent) sim_model = suite.sim_model - sim_model.megacomplex["m1"].is_index_dependent = index_dependent + sim_model.megacomplex["m1"].is_index_dependent = is_index_dependent print(model.validate()) assert model.valid() @@ -54,8 +54,8 @@ def test_optimization(suite, index_dependent, grouped, weight, method): print(model.validate(initial_parameters)) assert model.valid(initial_parameters) assert ( - model.dataset["dataset1"].fill(model, initial_parameters).index_dependent() - == index_dependent + model.dataset["dataset1"].fill(model, initial_parameters).is_index_dependent() + == is_index_dependent ) nr_datasets = 3 if issubclass(suite, ThreeDatasetDecay) else 1 diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py index 4d5f20cfd..66d9555ad 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py @@ -74,7 +74,7 @@ def finalize_data( model_dimension = dataset_model.get_model_dimension() dataset.coords["coherent_artifact_order"] = np.arange(1, self.order + 1) concentration_dimensions = (model_dimension, "coherent_artifact_order") - if dataset_model.index_dependent() is True: + if dataset_model.is_index_dependent() is True: concentration_dimensions = (global_dimension, *concentration_dimensions) dataset["coherent_artifact_concentration"] = ( concentration_dimensions, diff --git a/glotaran/model/dataset_model.py b/glotaran/model/dataset_model.py index 2290ff3c9..443de05ae 100644 --- a/glotaran/model/dataset_model.py +++ b/glotaran/model/dataset_model.py @@ -140,7 +140,7 @@ def get_weight(self) -> np.ndarray | None: """Gets the dataset model's weight.""" return self._weight - def index_dependent(self) -> bool: + def is_index_dependent(self) -> bool: """Indicates if the dataset model is index dependent.""" if hasattr(self, "_index_dependent"): return self._index_dependent diff --git a/glotaran/model/dataset_model.pyi b/glotaran/model/dataset_model.pyi index c0a7b49f7..96cd84ed0 100644 --- a/glotaran/model/dataset_model.pyi +++ b/glotaran/model/dataset_model.pyi @@ -37,7 +37,7 @@ class DatasetModel: def set_data(self, dataset: xr.Dataset) -> DatasetModel: ... def get_data(self) -> np.ndarray: ... def get_weight(self) -> np.ndarray | None: ... - def index_dependent(self) -> bool: ... + def is_index_dependent(self) -> bool: ... def overwrite_index_dependent(self, index_dependent: bool): ... def has_global_model(self) -> bool: ... def set_coordinates(self, coords: dict[str, np.ndarray]): ... From 8ffdcb0519db49dc24c6a15aa6cf35b96b7077de Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 11 Sep 2021 16:55:19 +0200 Subject: [PATCH 5/7] =?UTF-8?q?=F0=9F=94=A7=20Let=20mypy=20run=20normally?= =?UTF-8?q?=20in=20pre-commit?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .pre-commit-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7f1d4d407..b3ac808be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -96,8 +96,6 @@ repos: - id: mypy files: "^glotaran/(plugin_system|utils|deprecation)" exclude: "docs" - args: [glotaran] - pass_filenames: false additional_dependencies: [types-all] - repo: https://github.com/econchick/interrogate From 8821699dcef43369635ad1f497992c0fd8eb031c Mon Sep 17 00:00:00 2001 From: s-weigand Date: Sat, 11 Sep 2021 22:59:30 +0200 Subject: [PATCH 6/7] =?UTF-8?q?=F0=9F=91=8C=20Renamed=20'coherent=5Fartifa?= =?UTF-8?q?ct=5Fconcentration'=20to=20'coherent=5Fartifact=5Fresponse'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ref: https://github.com/glotaran/pyglotaran/pull/808#discussion_r706669563 --- .../coherent_artifact/coherent_artifact_megacomplex.py | 8 ++++---- .../coherent_artifact/test/test_coherent_artifact.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py index 66d9555ad..2c81cf93e 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/coherent_artifact_megacomplex.py @@ -73,11 +73,11 @@ def finalize_data( global_dimension = dataset_model.get_global_dimension() model_dimension = dataset_model.get_model_dimension() dataset.coords["coherent_artifact_order"] = np.arange(1, self.order + 1) - concentration_dimensions = (model_dimension, "coherent_artifact_order") + response_dimensions = (model_dimension, "coherent_artifact_order") if dataset_model.is_index_dependent() is True: - concentration_dimensions = (global_dimension, *concentration_dimensions) - dataset["coherent_artifact_concentration"] = ( - concentration_dimensions, + response_dimensions = (global_dimension, *response_dimensions) + dataset["coherent_artifact_response"] = ( + response_dimensions, dataset.matrix.sel(clp_label=self.compartments()).values, ) dataset["coherent_artifact_associated_spectra"] = ( diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py index 433688f3c..6e0dcfb0d 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py @@ -127,11 +127,11 @@ def test_coherent_artifact(is_index_dependent: bool): assert data.data.shape == resultdata.fitted_data.shape assert np.allclose(data.data, resultdata.fitted_data) - assert "coherent_artifact_concentration" in resultdata + assert "coherent_artifact_response" in resultdata if is_index_dependent: - assert resultdata["coherent_artifact_concentration"].shape == (spectral.size, time.size, 3) + assert resultdata["coherent_artifact_response"].shape == (spectral.size, time.size, 3) else: - assert resultdata["coherent_artifact_concentration"].shape == (time.size, 3) + assert resultdata["coherent_artifact_response"].shape == (time.size, 3) assert "coherent_artifact_associated_spectra" in resultdata assert resultdata["coherent_artifact_associated_spectra"].shape == (3, 3) From 619bd4f4ec3eeb9c29a55fa905cad419e05c1074 Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Sat, 11 Sep 2021 21:12:38 +0000 Subject: [PATCH 7/7] 'Refactored by Sourcery' --- .../coherent_artifact/test/test_coherent_artifact.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py index 6e0dcfb0d..770074091 100644 --- a/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py +++ b/glotaran/builtin/megacomplexes/coherent_artifact/test/test_coherent_artifact.py @@ -54,7 +54,7 @@ def test_coherent_artifact(is_index_dependent: bool): ["irf_width", 20, {"vary": False, "non-negative": False}], ] - if is_index_dependent is True: + if is_index_dependent: irf_spec = model_dict["irf"]["irf1"] irf_spec["dispersion_center"] = "irf_dispc" irf_spec["center_dispersion"] = ["irf_disp1", "irf_disp2"]