diff --git a/.github/workflows/test_benchmark_collection_models.yml b/.github/workflows/test_benchmark_collection_models.yml index 1071c5cbf5..5dc3658ba1 100644 --- a/.github/workflows/test_benchmark_collection_models.yml +++ b/.github/workflows/test_benchmark_collection_models.yml @@ -61,6 +61,12 @@ jobs: git clone --depth 1 https://github.com/benchmarking-initiative/Benchmark-Models-PEtab.git \ && export BENCHMARK_COLLECTION="$(pwd)/Benchmark-Models-PEtab/Benchmark-Models/" \ && AMICI_PARALLEL_COMPILE=2 tests/benchmark-models/test_benchmark_collection.sh + + # run gradient checks + - name: Run Gradient Checks + run: | + pip install git+https://github.com/ICB-DCM/fiddy.git \ + && cd tests/benchmark-models && pytest ./test_petab_benchmark.py # upload results - uses: actions/upload-artifact@v3 diff --git a/.gitignore b/.gitignore index 2559634002..9445cfa1b1 100644 --- a/.gitignore +++ b/.gitignore @@ -198,3 +198,6 @@ Benchmark-Models-PEtab/ /test_bmc/ CS_Signalling_ERBB_RAS_AKT/ +cache_fiddy/* +debug/* +tests/benchmark-models/cache_fiddy/* \ No newline at end of file diff --git a/python/sdist/amici/petab_objective.py b/python/sdist/amici/petab_objective.py index b1405a4d70..66d276d90e 100644 --- a/python/sdist/amici/petab_objective.py +++ b/python/sdist/amici/petab_objective.py @@ -24,7 +24,10 @@ from .logging import get_logger, log_execution_time from .petab_import import PREEQ_INDICATOR_ID, element_is_state from .parameter_mapping import ( - fill_in_parameters, ParameterMappingForCondition, ParameterMapping) + fill_in_parameters, + ParameterMappingForCondition, + ParameterMapping, +) logger = get_logger(__name__) @@ -53,9 +56,14 @@ def simulate_petab( log_level: int = logging.WARNING, num_threads: int = 1, failfast: bool = True, + scaled_gradients: bool = False, ) -> Dict[str, Any]: """Simulate PEtab model. + .. note:: + Regardless of `scaled_parameters`, unscaled sensitivities are returned, + unless `scaled_gradients=True`. + :param petab_problem: PEtab problem to work on. :param amici_model: @@ -87,6 +95,9 @@ def simulate_petab( :param failfast: Returns as soon as an integration failure is encountered, skipping any remaining simulations. + :param scaled_gradients: + Whether to compute gradients on parameter scale (``True``) or not + (``False``). :return: Dictionary of @@ -104,15 +115,13 @@ def simulate_petab( if solver is None: solver = amici_model.getSolver() - # Get parameters - if problem_parameters is None: - # Use PEtab nominal values as default - problem_parameters = {t.Index: getattr(t, NOMINAL_VALUE) for t in - petab_problem.parameter_df.itertuples()} - if scaled_parameters: - raise NotImplementedError( - "scaled_parameters=True in combination with " - "problem_parameters=None is currently not supported.") + # Switch to scaled parameters. + problem_parameters = _default_scaled_parameters( + petab_problem=petab_problem, + problem_parameters=problem_parameters, + scaled_parameters=scaled_parameters, + ) + scaled_parameters = True # number of amici simulations will be number of unique # (preequilibrationConditionId, simulationConditionId) pairs. @@ -153,6 +162,29 @@ def simulate_petab( # Compute total llh llh = sum(rdata['llh'] for rdata in rdatas) + # Compute total sllh + sllh = None + if solver.getSensitivityOrder() != amici.SensitivityOrder.none: + sllh = aggregate_sllh( + amici_model=amici_model, + rdatas=rdatas, + parameter_mapping=parameter_mapping, + petab_scale=scaled_parameters, + petab_problem=petab_problem, + edatas=edatas, + ) + if not scaled_gradients and sllh is not None: + sllh = { + parameter_id: rescale_sensitivity( + sensitivity=sensitivity, + parameter_value=problem_parameters[parameter_id], + old_scale=petab_problem.parameter_df.loc[ + parameter_id, PARAMETER_SCALE + ], + new_scale=LIN, + ) + for parameter_id, sensitivity in sllh.items() + } # Log results sim_cond = petab_problem.get_simulation_conditions_from_measurement_df() @@ -165,11 +197,158 @@ def simulate_petab( return { LLH: llh, + SLLH: sllh, RDATAS: rdatas, EDATAS: edatas, } +def aggregate_sllh( + amici_model: AmiciModel, + rdatas: Sequence[amici.ReturnDataView], + parameter_mapping: Optional[ParameterMapping], + edatas: List[AmiciExpData], + petab_scale: bool = True, + petab_problem: petab.Problem = None, +) -> Union[None, Dict[str, float]]: + """ + Aggregate likelihood gradient for all conditions, according to PEtab + parameter mapping. + + :param amici_model: + AMICI model from which ``rdatas`` were obtained. + :param rdatas: + Simulation results. + :param parameter_mapping: + PEtab parameter mapping to condition-specific simulation parameters. + :param edatas: + Experimental data used for simulation. + :param petab_scale: + Whether to check that sensitivities were computed with parameters on + the scales provided in the PEtab parameters table. + :param petab_problem: + The PEtab problem that defines the parameter scales. + + :return: + Aggregated likelihood sensitivities. + """ + accumulated_sllh = {} + model_parameter_ids = amici_model.getParameterIds() + + if petab_scale and petab_problem is None: + raise ValueError( + 'Please provide the PEtab problem, when using ' + '`petab_scale=True`.' + ) + + # Check for issues in all condition simulation results. + for rdata in rdatas: + # Condition failed during simulation. + if rdata.status != amici.AMICI_SUCCESS: + return None + # Condition simulation result does not provide SLLH. + if rdata.sllh is None: + raise ValueError( + 'The sensitivities of the likelihood for a condition were ' + 'not computed.' + ) + + for condition_parameter_mapping, edata, rdata in \ + zip(parameter_mapping, edatas, rdatas): + for sllh_parameter_index, condition_parameter_sllh in \ + enumerate(rdata.sllh): + # Get PEtab parameter ID + # Use ExpData if it provides a parameter list, else default to + # Model. + if edata.plist: + model_parameter_index = edata.plist[sllh_parameter_index] + else: + model_parameter_index = amici_model.plist(sllh_parameter_index) + model_parameter_id = model_parameter_ids[model_parameter_index] + petab_parameter_id = ( + condition_parameter_mapping + .map_sim_var[model_parameter_id] + ) + + # Initialize + if petab_parameter_id not in accumulated_sllh: + accumulated_sllh[petab_parameter_id] = 0 + + # Check that the scale is consistent + if petab_scale: + # `ParameterMappingForCondition` objects provide the scale in + # terms of `petab.C` constants already, not AMICI equivalents. + model_parameter_scale = ( + condition_parameter_mapping + .scale_map_sim_var[model_parameter_id] + ) + petab_parameter_scale = ( + petab_problem + .parameter_df + .loc[petab_parameter_id, PARAMETER_SCALE] + ) + if model_parameter_scale != petab_parameter_scale: + raise ValueError( + f'The scale of the parameter `{petab_parameter_id}` ' + 'differs between the AMICI model ' + f'({model_parameter_scale}) and the PEtab problem ' + f'({petab_parameter_scale}).' + ) + + # Accumulate + accumulated_sllh[petab_parameter_id] += condition_parameter_sllh + + return accumulated_sllh + + +def rescale_sensitivity( + sensitivity: float, + parameter_value: float, + old_scale: str, + new_scale: str, +) -> float: + """Rescale a sensitivity between parameter scales. + + :param sensitivity: + The sensitivity corresponding to the parameter value. + :param parameter_value: + The parameter vector element, on ``old_scale``. + :param old_scale: + The scale of the parameter value. + :param new_scale: + The parameter scale on which to rescale the sensitivity. + + :return: + The rescaled sensitivity. + """ + LOG_E_10 = np.log(10) + + if old_scale == new_scale: + return sensitivity + + unscaled_parameter_value = petab.parameters.unscale( + parameter=parameter_value, + scale_str=old_scale, + ) + + scale = { + (LIN, LOG): lambda s: s * unscaled_parameter_value, + (LOG, LIN): lambda s: s / unscaled_parameter_value, + (LIN, LOG10): lambda s: s * (unscaled_parameter_value * LOG_E_10), + (LOG10, LIN): lambda s: s / (unscaled_parameter_value * LOG_E_10), + } + + scale[(LOG, LOG10)] = lambda s: scale[(LIN, LOG10)](scale[(LOG, LIN)](s)) + scale[(LOG10, LOG)] = lambda s: scale[(LIN, LOG)](scale[(LOG10, LIN)](s)) + + if (old_scale, new_scale) not in scale: + raise NotImplementedError( + f"Old scale: {old_scale}. New scale: {new_scale}." + ) + + return scale[(old_scale, new_scale)](sensitivity) + + def create_parameterized_edatas( amici_model: AmiciModel, petab_problem: petab.Problem, @@ -811,3 +990,39 @@ def rdatas_to_simulation_df( measurement_df=measurement_df) return df.rename(columns={MEASUREMENT: SIMULATION}) + + +def _default_scaled_parameters( + petab_problem: petab.Problem, + problem_parameters: Optional[Dict[str, float]] = None, + scaled_parameters: bool = False, +) -> Optional[Dict[str, float]]: + """ + Helper method to handle an unscaled or unspecified parameter vector. + + The parameter vector defaults to the nominal values in the PEtab + parameter table. + + Unscaled parameter values are scaled. + + :param petab_problem: + The PEtab problem. + :param problem_parameters: + Keys are PEtab parameter IDs, values are parameter values on the scale + defined in the PEtab parameter table. Defaults to the nominal values in + the PEtab parameter table. + :param scaled_parameters: + Whether `problem_parameters` are on the scale defined in the PEtab + parameter table. + + :return: + The scaled parameter vector. + """ + if problem_parameters is None: + problem_parameters = dict(zip( + petab_problem.x_ids, + petab_problem.x_nominal_scaled, + )) + elif not scaled_parameters: + problem_parameters = petab_problem.scale_parameters(problem_parameters) + return problem_parameters diff --git a/python/tests/petab_test_problems/lotka_volterra/README.md b/python/tests/petab_test_problems/lotka_volterra/README.md new file mode 100644 index 0000000000..25fbc0cc17 --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/README.md @@ -0,0 +1,5 @@ +A Lotka-Volterra model, based on the model provided as an example in the `yaml2sbml` package: https://github.com/yaml2sbml-dev/yaml2sbml + +The PEtab problem can be created by running `bash model/convert_to_petab.sh` (test on Ubuntu 20.04) inside the `model` directory, in a Python 3 environment with `yaml2sbml`: https://pypi.org/project/yaml2sbml/ + +The model is augmented with new parameters `departure_prey` and `arrival_predator`, to allow for a steady-state under certain conditions. This results in a model that can pre-equilibrate, then switch to the expected oscillations of a Lotka-Volterra model. diff --git a/python/tests/petab_test_problems/lotka_volterra/model/convert_to_petab.sh b/python/tests/petab_test_problems/lotka_volterra/model/convert_to_petab.sh new file mode 100644 index 0000000000..7d1b4894ff --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/model/convert_to_petab.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -eou pipefail +script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +petab_path=$script_dir/../petab + +mkdir -p "${petab_path}" + +# Validate input yaml2sbml model +yaml2sbml_validate lotka_volterra.yaml + +# Copy measurements to PEtab directory +cp "${script_dir}/measurements.tsv" "$petab_path/measurements.tsv" + +# Write the PEtab problem +python "${script_dir}/writer.py" + +# Remove condition parameters from PEtab parameters table +for condition_parameter in beta delta departure_prey arrival_predator +do + sed -i "/^${condition_parameter}/d" "${petab_path}/parameter*" +done + +# Validate the PEtab problem +petablint -vy "${petab_path}/problem.yaml" diff --git a/python/tests/petab_test_problems/lotka_volterra/model/lotka_volterra.yaml b/python/tests/petab_test_problems/lotka_volterra/model/lotka_volterra.yaml new file mode 100644 index 0000000000..27fd6ec29a --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/model/lotka_volterra.yaml @@ -0,0 +1,69 @@ +odes: + - stateId: prey + rightHandSide: alpha * prey - beta * prey * predator - departure_prey * prey * prey + initialValue: 2 + + - stateId: predator + rightHandSide: delta * prey * predator - gamma * predator + arrival_predator * prey + initialValue: 2 + +parameters: + - parameterId: alpha + nominalValue: 2 + parameterScale: log10 + lowerBound: 0.1 + upperBound: 10 + estimate: 1 + + - parameterId: beta + nominalValue: 4 + parameterScale: log10 + estimate: 0 + + - parameterId: gamma + nominalValue: 3 + parameterScale: log10 + lowerBound: 0.1 + upperBound: 10 + estimate: 1 + + - parameterId: delta + nominalValue: 3 + parameterScale: log10 + estimate: 0 + + - parameterId: departure_prey + nominalValue: 3 + parameterScale: log10 + estimate: 0 + + - parameterId: arrival_predator + nominalValue: 3 + parameterScale: log10 + estimate: 0 + +observables: + - observableId: observable_prey + observableFormula: log10(prey) + observableTransformation: lin + noiseFormula: noiseParameter1_observable_prey + noiseDistribution: normal + +conditions: + - conditionId: weak_predator + beta: 2 + delta: 3 + departure_prey: 0 + arrival_predator: 0 + + - conditionId: strong_predator + beta: 4 + delta: 3 + departure_prey: 0 + arrival_predator: 0 + + - conditionId: no_interaction + beta: 0 + delta: 0 + departure_prey: 3 + arrival_predator: 3 diff --git a/python/tests/petab_test_problems/lotka_volterra/model/measurements.tsv b/python/tests/petab_test_problems/lotka_volterra/model/measurements.tsv new file mode 100644 index 0000000000..2875e3fe3d --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/model/measurements.tsv @@ -0,0 +1,43 @@ +observableId preequilibrationConditionId simulationConditionId measurement time noiseParameters +observable_prey no_interaction weak_predator 1 0 1.1 +observable_prey no_interaction weak_predator 1 0.1 1.1 +observable_prey no_interaction weak_predator 1 0.2 1.1 +observable_prey no_interaction weak_predator 1 0.3 1.1 +observable_prey no_interaction weak_predator 1 0.4 1.1 +observable_prey no_interaction weak_predator 1 0.5 1.2 +observable_prey no_interaction weak_predator 1 0.6 1.2 +observable_prey no_interaction weak_predator 1 0.7 1.2 +observable_prey no_interaction weak_predator 1 0.8 1.2 +observable_prey no_interaction weak_predator 1 0.9 1.2 +observable_prey no_interaction weak_predator 1 1.0 1.2 +observable_prey no_interaction weak_predator 1 1.1 1.2 +observable_prey no_interaction weak_predator 1 1.2 1.2 +observable_prey no_interaction weak_predator 1 1.3 1.2 +observable_prey no_interaction weak_predator 1 1.4 1.1 +observable_prey no_interaction weak_predator 1 1.5 1.1 +observable_prey no_interaction weak_predator 1 1.6 1.1 +observable_prey no_interaction weak_predator 1 1.7 1.1 +observable_prey no_interaction weak_predator 1 1.8 1.1 +observable_prey no_interaction weak_predator 1 1.9 1.1 +observable_prey no_interaction weak_predator 1 2.0 1.1 +observable_prey no_interaction strong_predator 0.5 0 1.1 +observable_prey no_interaction strong_predator 0.5 0.1 1.1 +observable_prey no_interaction strong_predator 0.5 0.2 1.1 +observable_prey no_interaction strong_predator 0.5 0.3 1.1 +observable_prey no_interaction strong_predator 0.5 0.4 1.1 +observable_prey no_interaction strong_predator 0.5 0.5 1.2 +observable_prey no_interaction strong_predator 0.5 0.6 1.2 +observable_prey no_interaction strong_predator 0.5 0.7 1.2 +observable_prey no_interaction strong_predator 0.5 0.8 1.2 +observable_prey no_interaction strong_predator 0.5 0.9 1.2 +observable_prey no_interaction strong_predator 0.5 1.0 1.2 +observable_prey no_interaction strong_predator 0.5 1.1 1.2 +observable_prey no_interaction strong_predator 0.5 1.2 1.2 +observable_prey no_interaction strong_predator 0.5 1.3 1.2 +observable_prey no_interaction strong_predator 0.5 1.4 1.1 +observable_prey no_interaction strong_predator 0.5 1.5 1.1 +observable_prey no_interaction strong_predator 0.5 1.6 1.1 +observable_prey no_interaction strong_predator 0.5 1.7 1.1 +observable_prey no_interaction strong_predator 0.5 1.8 1.1 +observable_prey no_interaction strong_predator 0.5 1.9 1.1 +observable_prey no_interaction strong_predator 0.5 2.0 1.1 diff --git a/python/tests/petab_test_problems/lotka_volterra/model/writer.py b/python/tests/petab_test_problems/lotka_volterra/model/writer.py new file mode 100644 index 0000000000..680cbe43e6 --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/model/writer.py @@ -0,0 +1,19 @@ +from pathlib import Path + +import petab +import yaml2sbml + + +yaml2sbml_yaml = 'lotka_volterra.yaml' +petab_path = Path(__file__).parent.parent / 'petab' +petab_yaml = 'problem.yaml' +measurements_tsv = 'measurements.tsv' +model_name = 'lotka_volterra' + +yaml2sbml.yaml2petab( + yaml_dir=yaml2sbml_yaml, + output_dir=str(petab_path), + sbml_name=model_name, + petab_yaml_name=petab_yaml, + measurement_table_name=measurements_tsv, +) diff --git a/python/tests/petab_test_problems/lotka_volterra/petab/experimental_conditions_lotka_volterra.tsv b/python/tests/petab_test_problems/lotka_volterra/petab/experimental_conditions_lotka_volterra.tsv new file mode 100644 index 0000000000..827bc0b491 --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/petab/experimental_conditions_lotka_volterra.tsv @@ -0,0 +1,4 @@ +conditionId beta delta departure_prey arrival_predator +weak_predator 2.0 3.0 0.0 0.0 +strong_predator 4.0 3.0 0.0 0.0 +no_interaction 0.0 0.0 3.0 3.0 diff --git a/python/tests/petab_test_problems/lotka_volterra/petab/lotka_volterra.xml b/python/tests/petab_test_problems/lotka_volterra/petab/lotka_volterra.xml new file mode 100644 index 0000000000..3553564e1e --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/petab/lotka_volterra.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + alpha + prey + + + + beta + prey + predator + + + + + departure_prey + prey + prey + + + + + + + + + + + + + delta + prey + predator + + + + gamma + predator + + + + + arrival_predator + prey + + + + + + + diff --git a/python/tests/petab_test_problems/lotka_volterra/petab/measurements.tsv b/python/tests/petab_test_problems/lotka_volterra/petab/measurements.tsv new file mode 100644 index 0000000000..2875e3fe3d --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/petab/measurements.tsv @@ -0,0 +1,43 @@ +observableId preequilibrationConditionId simulationConditionId measurement time noiseParameters +observable_prey no_interaction weak_predator 1 0 1.1 +observable_prey no_interaction weak_predator 1 0.1 1.1 +observable_prey no_interaction weak_predator 1 0.2 1.1 +observable_prey no_interaction weak_predator 1 0.3 1.1 +observable_prey no_interaction weak_predator 1 0.4 1.1 +observable_prey no_interaction weak_predator 1 0.5 1.2 +observable_prey no_interaction weak_predator 1 0.6 1.2 +observable_prey no_interaction weak_predator 1 0.7 1.2 +observable_prey no_interaction weak_predator 1 0.8 1.2 +observable_prey no_interaction weak_predator 1 0.9 1.2 +observable_prey no_interaction weak_predator 1 1.0 1.2 +observable_prey no_interaction weak_predator 1 1.1 1.2 +observable_prey no_interaction weak_predator 1 1.2 1.2 +observable_prey no_interaction weak_predator 1 1.3 1.2 +observable_prey no_interaction weak_predator 1 1.4 1.1 +observable_prey no_interaction weak_predator 1 1.5 1.1 +observable_prey no_interaction weak_predator 1 1.6 1.1 +observable_prey no_interaction weak_predator 1 1.7 1.1 +observable_prey no_interaction weak_predator 1 1.8 1.1 +observable_prey no_interaction weak_predator 1 1.9 1.1 +observable_prey no_interaction weak_predator 1 2.0 1.1 +observable_prey no_interaction strong_predator 0.5 0 1.1 +observable_prey no_interaction strong_predator 0.5 0.1 1.1 +observable_prey no_interaction strong_predator 0.5 0.2 1.1 +observable_prey no_interaction strong_predator 0.5 0.3 1.1 +observable_prey no_interaction strong_predator 0.5 0.4 1.1 +observable_prey no_interaction strong_predator 0.5 0.5 1.2 +observable_prey no_interaction strong_predator 0.5 0.6 1.2 +observable_prey no_interaction strong_predator 0.5 0.7 1.2 +observable_prey no_interaction strong_predator 0.5 0.8 1.2 +observable_prey no_interaction strong_predator 0.5 0.9 1.2 +observable_prey no_interaction strong_predator 0.5 1.0 1.2 +observable_prey no_interaction strong_predator 0.5 1.1 1.2 +observable_prey no_interaction strong_predator 0.5 1.2 1.2 +observable_prey no_interaction strong_predator 0.5 1.3 1.2 +observable_prey no_interaction strong_predator 0.5 1.4 1.1 +observable_prey no_interaction strong_predator 0.5 1.5 1.1 +observable_prey no_interaction strong_predator 0.5 1.6 1.1 +observable_prey no_interaction strong_predator 0.5 1.7 1.1 +observable_prey no_interaction strong_predator 0.5 1.8 1.1 +observable_prey no_interaction strong_predator 0.5 1.9 1.1 +observable_prey no_interaction strong_predator 0.5 2.0 1.1 diff --git a/python/tests/petab_test_problems/lotka_volterra/petab/observables_lotka_volterra.tsv b/python/tests/petab_test_problems/lotka_volterra/petab/observables_lotka_volterra.tsv new file mode 100644 index 0000000000..10cbcf2790 --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/petab/observables_lotka_volterra.tsv @@ -0,0 +1,2 @@ +observableId observableFormula noiseFormula observableTransformation noiseDistribution +observable_prey log10(prey) noiseParameter1_observable_prey lin normal diff --git a/python/tests/petab_test_problems/lotka_volterra/petab/parameters_lotka_volterra.tsv b/python/tests/petab_test_problems/lotka_volterra/petab/parameters_lotka_volterra.tsv new file mode 100644 index 0000000000..e5e65b225b --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/petab/parameters_lotka_volterra.tsv @@ -0,0 +1,3 @@ +parameterId parameterScale lowerBound upperBound estimate nominalValue +alpha log10 0.1 10.0 1.0 2.0 +gamma log10 0.1 10.0 1.0 3.0 diff --git a/python/tests/petab_test_problems/lotka_volterra/petab/problem.yaml b/python/tests/petab_test_problems/lotka_volterra/petab/problem.yaml new file mode 100644 index 0000000000..d980984881 --- /dev/null +++ b/python/tests/petab_test_problems/lotka_volterra/petab/problem.yaml @@ -0,0 +1,11 @@ +format_version: 1 +parameter_file: parameters_lotka_volterra.tsv +problems: +- condition_files: + - experimental_conditions_lotka_volterra.tsv + measurement_files: + - measurements.tsv + observable_files: + - observables_lotka_volterra.tsv + sbml_files: + - lotka_volterra.xml diff --git a/python/tests/test_petab_objective.py b/python/tests/test_petab_objective.py new file mode 100755 index 0000000000..5dfe7db890 --- /dev/null +++ b/python/tests/test_petab_objective.py @@ -0,0 +1,87 @@ +"""Tests for petab_objective.py.""" + +from functools import partial +from pathlib import Path + +import amici +import amici.petab_import +import amici.petab_objective +import numpy as np +import pandas as pd +import petab +import pytest +from amici.petab_objective import SLLH + + +# Absolute and relative tolerances for finite difference gradient checks. +ATOL: float = 1e-3 +RTOL: float = 1e-3 + + +@pytest.fixture +def lotka_volterra() -> petab.Problem: + return petab.Problem.from_yaml(str( + Path(__file__).parent + / 'petab_test_problems' + / 'lotka_volterra' + / 'petab' + / 'problem.yaml' + )) + + +def test_simulate_petab_sensitivities(lotka_volterra): + petab_problem = lotka_volterra + amici_model = amici.petab_import.import_petab_problem(petab_problem) + amici_solver = amici_model.getSolver() + + amici_solver.setSensitivityOrder(amici.SensitivityOrder_first) + amici_solver.setMaxSteps(int(1e5)) + + problem_parameters = dict(zip( + petab_problem.x_ids, + petab_problem.x_nominal, + )) + + results = {} + for scaled_parameters in [True, False]: + for scaled_gradients in [True, False]: + _problem_parameters = problem_parameters.copy() + if scaled_parameters: + _problem_parameters = \ + petab_problem.scale_parameters(problem_parameters) + results[(scaled_parameters, scaled_gradients)] = pd.Series( + amici.petab_objective.simulate_petab( + petab_problem=petab_problem, + amici_model=amici_model, + solver=amici_solver, + problem_parameters=_problem_parameters, + scaled_parameters=scaled_parameters, + scaled_gradients=scaled_gradients, + )[SLLH] + ) + + # Computed previously, is the same as a central difference gradient + # check, to >4 s.f. + expected_results_scaled = pd.Series({ + "alpha": -2.112626, + "gamma": 21.388535, + }) + expected_results_unscaled = pd.Series({ + "alpha": -0.458800, + "gamma": 3.096308, + }) + + assert_equal = partial(pd.testing.assert_series_equal, rtol=1e-3) + + # `scaled_gradients` affects gradients, `scaled_parameters` does not. + assert_equal(results[(True, True)], expected_results_scaled) + assert_equal(results[(False, True)], expected_results_scaled) + + assert_equal(results[(True, False)], expected_results_unscaled) + assert_equal(results[(False, False)], expected_results_unscaled) + + # The test gradients are scaled correctly. + assert_equal( + results[(True, True)], + results[(True, False)] * pd.Series(problem_parameters) * np.log(10), + ) diff --git a/scripts/installAmiciSource.sh b/scripts/installAmiciSource.sh index be027946ca..7f0433b7c6 100755 --- a/scripts/installAmiciSource.sh +++ b/scripts/installAmiciSource.sh @@ -30,6 +30,6 @@ fi pip install -U "setuptools<64" pip install --upgrade pip wheel pip install --upgrade pip scipy matplotlib coverage pytest \ - pytest-cov cmake_build_extension numpy + pytest-cov cmake_build_extension numpy pip install --verbose -e ${AMICI_PATH}/python/sdist[petab,test,pysb] --no-build-isolation deactivate diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py new file mode 100755 index 0000000000..6ed237190d --- /dev/null +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -0,0 +1,173 @@ +"""Tests for simulate_petab on PEtab benchmark problems.""" + +from pathlib import Path + +import amici +import amici.petab_import +import amici.petab_objective +import numpy as np +import pandas as pd +import petab +import pytest + +from fiddy import get_derivative, MethodId +from fiddy.success import Consistency +from fiddy.derivative_check import NumpyIsCloseDerivativeCheck +from fiddy.extensions.amici import ( + simulate_petab_to_cached_functions, +) + + +# Absolute and relative tolerances for finite difference gradient checks. +ATOL: float = 1e-3 +RTOL: float = 1e-2 + +benchmark_path = Path(__file__).parent.parent.parent / "Benchmark-Models-PEtab" / "Benchmark-Models" +# reuse compiled models from test_benchmark_collection.sh +benchmark_outdir = Path(__file__).parent.parent.parent / "test_bmc" +models = [ + str(petab_path.stem) + for petab_path in benchmark_path.glob("*") if petab_path.is_dir() + if str(petab_path.stem) not in ( + # excluded due to excessive runtime + 'Bachmann_MSB2011', + 'Chen_MSB2009', + 'Froehlich_CellSystems2018', + 'Raimundez_PCB2020', + 'Lucarelli_CellSystems2018', + 'Isensee_JCB2018', + 'Beer_MolBioSystems2014', + 'Alkan_SciSignal2018', + # excluded due to excessive numerical failures + 'Crauste_CellSystems2017', + 'Fujita_SciSignal2010', + ) +] + +debug = False +if debug: + debug_path = Path(__file__).parent / "debug" + debug_path.mkdir(exist_ok=True, parents=True) + + +@pytest.mark.parametrize("scale", (True, False)) +@pytest.mark.parametrize("model", models) +def test_benchmark_gradient(model, scale): + if not scale and model in ( + 'Smith_BMCSystBiol2013', + 'Brannmark_JBC2010', + 'Elowitz_Nature2000', + 'Borghans_BiophysChem1997', + 'Sneyd_PNAS2002', + 'Bertozzi_PNAS2020', + 'Okuonghae_ChaosSolitonsFractals2020', + ): + # not really worth the effort trying to fix these cases if they + # only fail on linear scale + pytest.skip() + + petab_problem = petab.Problem.from_yaml(benchmark_path / model / (model + ".yaml")) + petab.flatten_timepoint_specific_output_overrides(petab_problem) + + # Only compute gradient for estimated parameters. + parameter_df_free = petab_problem.parameter_df.loc[petab_problem.x_free_ids] + parameter_ids = list(parameter_df_free.index) + + # Setup AMICI objects. + amici_model = amici.petab_import.import_petab_problem( + petab_problem, + model_output_dir=benchmark_outdir / model, + ) + amici_solver = amici_model.getSolver() + amici_solver.setAbsoluteTolerance(1e-12) + amici_solver.setRelativeTolerance(1e-12) + if model in ( + 'Smith_BMCSystBiol2013', + 'Oliveira_NatCommun2021', + ): + amici_solver.setAbsoluteTolerance(1e-10) + amici_solver.setRelativeTolerance(1e-10) + elif model in ( + 'Okuonghae_ChaosSolitonsFractals2020', + ): + amici_solver.setAbsoluteTolerance(1e-14) + amici_solver.setRelativeTolerance(1e-14) + amici_solver.setMaxSteps(int(1e5)) + + if model in ( + 'Brannmark_JBC2010', + ): + amici_model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrationOnly) + + amici_function, amici_derivative = simulate_petab_to_cached_functions( + petab_problem=petab_problem, + parameter_ids=parameter_ids, + amici_model=amici_model, + solver=amici_solver, + scaled_parameters=scale, + scaled_gradients=scale, + cache=not debug, + ) + + noise_level = 0.1 + + np.random.seed(0) + if scale: + point = np.asarray(list( + petab_problem.scale_parameters(dict(parameter_df_free.nominalValue)).values() + )) + point_noise = np.random.randn(len(point)) * noise_level + else: + point = parameter_df_free.nominalValue.values + point_noise = np.random.randn(len(point)) * point * noise_level + point += point_noise # avoid small gradients at nominal value + + expected_derivative = amici_derivative(point) + + sizes = [ + 1e-1, + 1e-2, + 1e-3, + 1e-4, + 1e-5, + ] + if model in ( + 'Okuonghae_ChaosSolitonsFractals2020', + ): + sizes.insert(0, 0.2) + + derivative = get_derivative( + function=amici_function, + point=point, + sizes=sizes, + direction_ids=parameter_ids, + method_ids=[MethodId.CENTRAL, MethodId.FORWARD, MethodId.BACKWARD], + success_checker=Consistency(atol=1e-4, rtol=0.2), + expected_result=expected_derivative, + relative_sizes=not scale, + ) + success = False + if derivative.df.success.all(): + check = NumpyIsCloseDerivativeCheck( + derivative=derivative, + expectation=expected_derivative, + point=point, + ) + success = check(rtol=RTOL, atol=ATOL) + + if debug: + df = pd.DataFrame([ + { + ('fd', r.metadata['size_absolute'], str(r.method_id)): r.value + for c in d.computers + for r in c.results + } for d in derivative.directional_derivatives + ], index=parameter_ids) + df[('fd', 'full', '')] = derivative.series.values + df[('amici', '', '')] = expected_derivative + + file_name = f"{model}_scale={scale}.tsv" + df.to_csv(debug_path / file_name, sep='\t') + + # The gradients for all parameters are correct. + assert success, derivative.df