diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..dfe077042 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/.gitignore b/.gitignore index 7978daaa3..838f372b0 100644 --- a/.gitignore +++ b/.gitignore @@ -301,3 +301,6 @@ $RECYCLE.BIN/ *.lnk # End of https://www.toptal.com/developers/gitignore/api/python,macos,windows,linux,c + +# Visual Studio Code settings +.vscode/* diff --git a/examples/scripts/parameter_estimation_from_Chen_data.py b/examples/scripts/parameter_estimation_from_Chen_data.py new file mode 100644 index 000000000..aea0e2b89 --- /dev/null +++ b/examples/scripts/parameter_estimation_from_Chen_data.py @@ -0,0 +1,64 @@ +import pybop +import pandas as pd +import numpy as np +from os import path + +# Load dataset +data_path = path.join(pybop.script_path, "..", "examples/scripts/Chen_example.csv") +measurements = pd.read_csv(data_path, comment="#").to_numpy() +observations = [ + pybop.Dataset("Time [s]", measurements[:, 0]), + pybop.Dataset("Current function [A]", measurements[:, 1]), + pybop.Dataset("Voltage [V]", measurements[:, 2]), +] + +# Define model +# parameter_set = pybop.ParameterSet("pybamm", "Chen2020") +model = pybop.models.lithium_ion.SPM() + +# Define fitting parameters +params = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.75, 0.05), + bounds=[0.65, 0.85], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.65, 0.05), + bounds=[0.55, 0.75], + ), +] + +# Define the cost to optimise +cost = pybop.RMSE() +signal = "Voltage [V]" + +# Select optimiser +optimiser = pybop.NLoptOptimize(x0=params) + +# Build the optimisation problem +parameterisation = pybop.Optimisation( + cost=cost, + dataset=observations, + signal=signal, + model=model, + optimiser=optimiser, + fit_parameters=params, +) + +# Run the optimisation problem +x, output, final_cost, num_evals = parameterisation.run() + +print("Estimated parameters:", x) +print("Final cost:", final_cost) + + +# get MAP estimate, starting at a random initial point in parameter space +# optimisation.map(x0=[p.sample() for p in params]) + +# or sample from posterior +# optimisation.sample(1000, n_chains=4, ....) + +# or SOBER +# optimisation.sober() diff --git a/examples/scripts/parameter_estimation_from_synthetic_data.py b/examples/scripts/parameter_estimation_from_synthetic_data.py new file mode 100644 index 000000000..900dea623 --- /dev/null +++ b/examples/scripts/parameter_estimation_from_synthetic_data.py @@ -0,0 +1,90 @@ +import pybop +import pybamm +import pandas as pd +import numpy as np + + +def getdata(x0): + # Define the "ground truth" model with the default parameter set + model = pybamm.lithium_ion.SPM() + params = model.default_parameter_values + + # Overwrite the uncertain parameters + params.update( + { + "Negative electrode active material volume fraction": x0[0], + "Positive electrode active material volume fraction": x0[1], + } + ) + + # Define the experimental protocol + experiment = pybamm.Experiment( + [ + ( + "Discharge at 2C for 5 minutes (1 second period)", + "Rest for 2 minutes (1 second period)", + "Charge at 1C for 5 minutes (1 second period)", + "Rest for 2 minutes (1 second period)", + ), + ] + * 2 + ) + + # Run a forward simulation + sim = pybamm.Simulation(model, experiment=experiment, parameter_values=params) + + # Return the simulation results + return sim.solve() + + +# Define the initial values of the uncertain parameters +x0 = np.array([0.55, 0.63]) + +# Generate observations +solution = getdata(x0) +observations = [ + pybop.Dataset("Time [s]", solution["Time [s]"].data), + pybop.Dataset("Current function [A]", solution["Current [A]"].data), + pybop.Dataset("Voltage [V]", solution["Terminal voltage [V]"].data), +] + +# Define the model with the default parameter set +model = pybop.models.lithium_ion.SPM() +model.parameter_set = model.pybamm_model.default_parameter_values + +# Initialise the fitting parameters +params = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.05), + bounds=[0.35, 0.75], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.65, 0.05), + bounds=[0.45, 0.85], + ), +] + +# Define the cost to optimise +cost = pybop.RMSE() +signal = "Voltage [V]" + +# Select optimiser +optimiser = pybop.NLoptOptimize(x0=params) + +# Build the optimisation problem +parameterisation = pybop.Optimisation( + cost=cost, + dataset=observations, + signal=signal, + model=model, + optimiser=optimiser, + fit_parameters=params, +) + +# Run the optimisation problem +x, output, final_cost, num_evals = parameterisation.run() + +print("Estimated parameters:", x) # x = [0.54452026, 0.63064801] +print("Final cost:", final_cost) diff --git a/examples/scripts/rmse-estimisation.py b/examples/scripts/rmse-estimisation.py deleted file mode 100644 index 1e82d1df6..000000000 --- a/examples/scripts/rmse-estimisation.py +++ /dev/null @@ -1,50 +0,0 @@ -import pybop -import pandas as pd -import numpy as np - -# Form observations -Measurements = pd.read_csv("examples/scripts/Chen_example.csv", comment="#").to_numpy() -observations = [ - pybop.Observed("Time [s]", Measurements[:, 0]), - pybop.Observed("Current function [A]", Measurements[:, 1]), - pybop.Observed("Voltage [V]", Measurements[:, 2]), -] - -# Define model -# parameter_set = pybop.ParameterSet("pybamm", "Chen2020") -model = pybop.models.lithium_ion.SPM() - -# Fitting parameters -params = [ - pybop.Parameter( - "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.75, 0.05), - bounds=[0.65, 0.85], - ), - pybop.Parameter( - "Positive electrode active material volume fraction", - prior=pybop.Gaussian(0.65, 0.05), - bounds=[0.55, 0.75], - ), -] - -parameterisation = pybop.Parameterisation( - model, observations=observations, fit_parameters=params -) - -# get RMSE estimate using NLOpt -results, last_optim, num_evals = parameterisation.rmse( - signal="Voltage [V]", method="nlopt" -) - -# get MAP estimate, starting at a random initial point in parameter space -# parameterisation.map(x0=[p.sample() for p in params]) - -# or sample from posterior -# parameterisation.sample(1000, n_chains=4, ....) - -# or SOBER -# parameterisation.sober() - - -# Optimisation = pybop.optimisation(model, cost=cost, parameters=parameters, observation=observation) diff --git a/pybop/__init__.py b/pybop/__init__.py index 6b5cfe509..e00e3d3c1 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -1,5 +1,5 @@ # -# Root of the pybop module. +# Root of the PyBOP module. # Provides access to all shared functionality (models, solvers, etc.). # # This file is adapted from Pints @@ -7,7 +7,7 @@ # import sys -import os +from os import path # # Version info @@ -21,31 +21,50 @@ # loss of information FLOAT_FORMAT = "{: .17e}" # Absolute path to the PyBOP repo -script_path = os.path.abspath(__file__) +script_path = path.dirname(__file__) # -# Model Classes +# Cost function class # -from .models import BaseModel, lithium_ion +from .costs.error_costs import RMSE # -# Parameterisation class +# Dataset class # -from .identification import Parameterisation, ParameterSet, Parameter, Observed +from .datasets.base_dataset import Dataset # -# Priors class +# Model classes # -from .priors import Gaussian, Uniform, Exponential +from .models.base_model import BaseModel +from .models import lithium_ion # -# Optimisation class +# Main optimisation class # -from .optimisation import BaseOptimisation -from .optimisation.NLoptOptimize import NLoptOptimize -from .optimisation.SciPyMinimize import SciPyMinimize +from .optimisation import Optimisation # -# Remove any imported modules, so we don't expose them as part of pybop +# Optimiser class +# +from .optimisers.base_optimiser import BaseOptimiser +from .optimisers.nlopt_optimize import NLoptOptimize +from .optimisers.scipy_minimize import SciPyMinimize +from .optimisers.pints_optimiser import PintsOptimiser, PintsError, PintsBoundaries + +# +# Parameter classes +# +from .parameters.base_parameter import Parameter +from .parameters.base_parameter_set import ParameterSet +from .parameters.priors import Gaussian, Uniform, Exponential + +# +# Plotting class +# +from .plotting.quick_plot import QuickPlot + +# +# Remove any imported modules, so we don't expose them as part of PyBOP # del sys diff --git a/pybop/optimisation/__init__.py b/pybop/costs/__init__.py similarity index 100% rename from pybop/optimisation/__init__.py rename to pybop/costs/__init__.py diff --git a/pybop/costs/error_costs.py b/pybop/costs/error_costs.py new file mode 100644 index 000000000..42b5c8bed --- /dev/null +++ b/pybop/costs/error_costs.py @@ -0,0 +1,93 @@ +import pybop +import numpy as np + + +class RMSE: + """ + Defines the root mean square error cost function. + """ + + def __init__(self): + self.name = "RMSE" + + def compute(self, prediction, target): + # Check compatibility + if len(prediction) != len(target): + print( + "Length of vectors:", + len(prediction), + len(target), + ) + raise ValueError( + "Measurement and simulated data length mismatch, potentially due to reaching a voltage cut-off" + ) + + print("Last Values:", prediction[-1], target[-1]) + + # Compute the cost + try: + cost = np.sqrt(np.mean((prediction - target) ** 2)) + except: + print("Error in RMSE calculation") + + return cost + + +class MLE: + """ + Defines the cost function for maximum likelihood estimation. + """ + + def __init__(self): + self.name = "MLE" + + def compute(self, prediction, target): + # Compute the cost + try: + cost = 0 # update with MLE residual + except: + print("Error in MLE calculation") + + return cost + + +class PEM: + """ + Defines the cost function for prediction error minimisation. + """ + + def __init__(self): + self.name = "PEM" + + def compute(self, prediction, target): + # Compute the cost + try: + cost = 0 # update with MLE residual + except: + print("Error in PEM calculation") + + return cost + + +class MAP: + """ + Defines the cost function for maximum a posteriori estimation. + """ + + def __init__(self): + self.name = "MAP" + + def compute(self, prediction, target): + # Compute the cost + try: + cost = 0 # update with MLE residual + except: + print("Error in MAP calculation") + + return cost + + def sample(self, n_chains): + """ + Sample from the posterior distribution. + """ + pass diff --git a/pybop/datasets/__init__.py b/pybop/datasets/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pybop/identification/observations.py b/pybop/datasets/base_dataset.py similarity index 76% rename from pybop/identification/observations.py rename to pybop/datasets/base_dataset.py index e6db914bf..f5b2e49a3 100644 --- a/pybop/identification/observations.py +++ b/pybop/datasets/base_dataset.py @@ -3,9 +3,9 @@ import numpy as np -class Observed: +class Dataset: """ - Class for experimental Observations. + Class for experimental observations. """ def __init__(self, name, data): @@ -13,7 +13,7 @@ def __init__(self, name, data): self.data = data def __repr__(self): - return f"Observation: {self.name} \n Data: {self.data}" + return f"Dataset: {self.name} \n Data: {self.data}" def Interpolant(self): if self.variable == "time": diff --git a/pybop/identification/__init__.py b/pybop/identification/__init__.py deleted file mode 100644 index 4bcb89c1d..000000000 --- a/pybop/identification/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .parameter_set import ParameterSet -from .parameter import Parameter -from .observations import Observed -from .parameterisation import Parameterisation diff --git a/pybop/identification/parameterisation.py b/pybop/identification/parameterisation.py deleted file mode 100644 index 4a2cee9d5..000000000 --- a/pybop/identification/parameterisation.py +++ /dev/null @@ -1,128 +0,0 @@ -import pybop -import numpy as np - - -class Parameterisation: - """ - Parameterisation class for pybop. - """ - - def __init__( - self, - model, - observations, - fit_parameters, - x0=None, - check_model=True, - init_soc=None, - verbose=False, - ): - self.model = model - self.verbose = verbose - self.fit_dict = {} - self.fit_parameters = {o.name: o for o in fit_parameters} - self.observations = {o.name: o for o in observations} - self.model.n_parameters = len(self.fit_dict) - - # Check that the observations contain time and current - for name in ["Time [s]", "Current function [A]"]: - if name not in self.observations: - raise ValueError(f"expected {name} in list of observations") - - # Set bounds - self.bounds = dict( - lower=[self.fit_parameters[p].bounds[0] for p in self.fit_parameters], - upper=[self.fit_parameters[p].bounds[1] for p in self.fit_parameters], - ) - - # Sample from prior for x0 - if x0 is None: - self.x0 = np.zeros(len(self.fit_parameters)) - for i, j in enumerate(self.fit_parameters): - self.x0[i] = self.fit_parameters[j].prior.rvs(1)[ - 0 - ] # Updt to capture dimensions per parameter - - # Align initial guess with sample from prior - for i, j in enumerate(self.fit_parameters): - self.fit_dict[j] = {j: self.x0[i]} - - # Build model with observations and fitting_parameters - self.model.build( - self.observations, - self.fit_parameters, - check_model=check_model, - init_soc=init_soc, - ) - - def step(self, signal, x, grad): - for i, p in enumerate(self.fit_dict): - self.fit_dict[p] = x[i] - - y_hat = self.model.solver.solve( - self.model.built_model, self.model.time_data, inputs=self.fit_dict - )[signal].data - - print( - "Last Voltage Values:", y_hat[-1], self.observations["Voltage [V]"].data[-1] - ) - - if len(y_hat) != len(self.observations["Voltage [V]"].data): - print( - "len of vectors:", - len(y_hat), - len(self.observations["Voltage [V]"].data), - ) - raise ValueError( - "Measurement and simulated data length mismatch, potentially due to reaching a voltage cut-off" - ) - - try: - res = np.sqrt(np.mean((self.observations["Voltage [V]"].data - y_hat) ** 2)) - except: - print("Error in RMSE calculation") - - if self.verbose: - print(self.fit_dict) - - return res - - def map(self, x0): - """ - Maximum a posteriori estimation. - """ - pass - - def sample(self, n_chains): - """ - Sample from the posterior distribution. - """ - pass - - def pem(self, method=None): - """ - Predictive error minimisation. - """ - pass - - def rmse(self, signal, method=None): - """ - Calculate the root mean squared error. - """ - - if method == "nlopt": - optim = pybop.NLoptOptimize(x0=self.x0) - results = optim.optimise( - lambda x, grad: self.step(signal, x, grad), self.x0, self.bounds - ) - - else: - optim = pybop.NLoptOptimize(method) - results = optim.optimise(self.step, self.x0, self.bounds) - return results - - def mle(self, method=None): - """ - Maximum likelihood estimation. - """ - pass diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py new file mode 100644 index 000000000..75cd423e7 --- /dev/null +++ b/pybop/models/base_model.py @@ -0,0 +1,79 @@ +import pybop + + +class BaseModel: + """ + Base class for PyBOP models. + """ + + def __init__(self, name="Base Model"): + self.name = name + + def build( + self, + dataset=None, + parameters=None, + check_model=True, + init_soc=None, + ): + """ + Build the model (if not built already). + """ + self.dataset = dataset + self.parameters = parameters + + raise ValueError("Not yet implemented") + + def set_init_soc(self, init_soc): + """ + Set the initial state of charge. + """ + raise ValueError("Not yet implemented") + + def set_params(self): + """ + Set each parameter in the model either equal to its value + or mark it as an input. + """ + raise ValueError("Not yet implemented") + + def simulate(self, inputs=None, t_eval=None, parameter_set=None, experiment=None): + """ + Run the forward model and return the result in Numpy array format + aligning with Pints' ForwardModel simulate method. + """ + raise ValueError("Not yet implemented") + + def _simulate(self, parameter_set=None, experiment=None): + """ + Return the results of a simulation. + """ + raise ValueError("Not yet implemented") + + def n_parameters(self): + """ + Returns the dimension of the parameter space. + """ + return len(self.parameters) + + def n_outputs(self): + """ + Returns the number of outputs this model has. The default is 1. + """ + return 1 + + @property + def built_model(self): + return self._built_model + + @property + def parameter_set(self): + return self._parameter_set + + @parameter_set.setter + def parameter_set(self, parameter_set): + self._parameter_set = parameter_set.copy() + + @property + def model_with_set_params(self): + return self._model_with_set_params diff --git a/pybop/models/lithium_ion/__init__.py b/pybop/models/lithium_ion/__init__.py index 50f61b1e9..69b51653b 100644 --- a/pybop/models/lithium_ion/__init__.py +++ b/pybop/models/lithium_ion/__init__.py @@ -1,5 +1,4 @@ # # Import lithium ion based models # - from .base_echem import SPM, SPMe diff --git a/pybop/models/lithium_ion/base_echem.py b/pybop/models/lithium_ion/base_echem.py index 25f6526c9..17d4121a7 100644 --- a/pybop/models/lithium_ion/base_echem.py +++ b/pybop/models/lithium_ion/base_echem.py @@ -1,9 +1,9 @@ import pybop import pybamm -from ..BaseModel import BaseModel +from ..pybamm_model import PybammModel -class SPM(BaseModel): +class SPM(PybammModel): """ Composition of the PyBaMM Single Particle Model class. @@ -43,7 +43,7 @@ def __init__( self._disc = None -class SPMe(BaseModel): +class SPMe(PybammModel): """ Composition of the PyBaMM Single Particle Model with Electrolyte class. diff --git a/pybop/models/BaseModel.py b/pybop/models/pybamm_model.py similarity index 54% rename from pybop/models/BaseModel.py rename to pybop/models/pybamm_model.py index 7d529b3cf..22ef2c60e 100644 --- a/pybop/models/BaseModel.py +++ b/pybop/models/pybamm_model.py @@ -1,30 +1,34 @@ import pybop import pybamm +from pybop import BaseModel -class BaseModel: +class PybammModel(BaseModel): """ - Base class for PyBOP models + Wrapper class for PyBaMM model classes. Extends the BaseModel class. + """ - def __init__(self, name="Base Model"): - self.name = name + def __init__(self): + super().__init__() self.pybamm_model = None - self.fit_parameters = None - self.observations = None def build( self, - observations=None, - fit_parameters=None, + dataset=None, + experiment=None, + parameters=None, check_model=True, init_soc=None, ): """ Build the model (if not built already). + + Specifiy either a dataset or an experiment. """ - self.fit_parameters = fit_parameters - self.observations = observations + self.dataset = dataset + self.experiment = experiment + self.parameters = parameters if init_soc is not None: self.set_init_soc(init_soc) @@ -35,6 +39,7 @@ def build( elif self.pybamm_model.is_discretised: self._model_with_set_params = self.pybamm_model self._built_model = self.pybamm_model + else: self.set_params() self._mesh = pybamm.Mesh(self.geometry, self.submesh_types, self.var_pts) @@ -68,24 +73,25 @@ def set_init_soc(self, init_soc): def set_params(self): """ - Set the parameters in the model. + Set each parameter in the model either equal to its value + or mark it as an input. """ if self.model_with_set_params: return - if self.fit_parameters is not None: - # set input parameters in parameter set from fitting parameters - for i in self.fit_parameters: - self.parameter_set[i] = "[input]" + if self.parameters is not None: + # Set input parameters in parameter set from fitting parameters + for i, Param in enumerate(self.parameters): + self.parameter_set[Param.name] = "[input]" - if self.observations is not None and self.fit_parameters is not None: + if self.dataset is not None: self.parameter_set["Current function [A]"] = pybamm.Interpolant( - self.observations["Time [s]"].data, - self.observations["Current function [A]"].data, + self.dataset["Time [s]"].data, + self.dataset["Current function [A]"].data, pybamm.t, ) - # Set t_eval - self.time_data = self._parameter_set["Current function [A]"].x[0] + # Set times + self.times = self._parameter_set["Current function [A]"].x[0] self._model_with_set_params = self._parameter_set.process_model( self._unprocessed_model, inplace=False @@ -93,37 +99,79 @@ def set_params(self): self._parameter_set.process_geometry(self.geometry) self.pybamm_model = self._model_with_set_params - def simulate(self, inputs=None, t_eval=None, parameter_set=None, experiment=None): + def simulate(self, parameters=None, times=None, experiment=None): """ Run the forward model and return the result in Numpy array format aligning with Pints' ForwardModel simulate method. + + Inputs + ---------- + parameters : A collection of fitting parameters to pass to the model when + solving + times : numeric type, optional + The times (in seconds) at which to compute the solution. Can be + provided as an array of times at which to return the solution, or as a + list `[t0, tf]` where `t0` is the initial time and `tf` is the final time. + If provided as a list the solution is returned at 100 points within the + interval `[t0, tf]`. + + If not using an experiment or running a drive cycle simulation (current + provided as data) `times` *must* be provided. + + If running an experiment the values in `times` are ignored, and the + solution times are specified by the experiment. + + If None and the parameter "Current function [A]" is read from data + (i.e. drive cycle simulation) the model will be solved at the times + provided in the data. + experiment : of the PyBaMM Experiment class (for PyBaMM models only) """ - parameter_set = parameter_set or self.parameter_set - if inputs is None: - return self._simulate(parameter_set, experiment).solve(t_eval=t_eval) - else: - if self._built_model is None: - self.build(fit_parameters=inputs.keys()) - return self.solver.solve(self.built_model, inputs=inputs, t_eval=t_eval) - def _simulate(self, parameter_set=None, experiment=None): + # Run the simulation + prediction = self._simulate(parameters, times, experiment) + + return prediction + + def _simulate(self, parameters, times, experiment): """ - Create a PyBaMM simulation object and return it. + Create a PyBaMM simulation object and run it. """ - if self.pybamm_model is not None: - return pybamm.Simulation( - self.pybamm_model, - experiment=experiment, - parameter_values=parameter_set, - ) - else: + if self.pybamm_model is None: raise ValueError("This sim method currently only supports PyBaMM models") + else: + # Build the model if necessary + if self._built_model is None: + self.build() + + # Pass the input parameters + if parameters is not None: + inputs = {} + for i, Param in enumerate(parameters): + inputs[Param.name] = Param.value + + # Define the simulation + if experiment is None: + sim = pybamm.Simulation(self.pybamm_model) + self.times = self.dataset["Time [s]"].data + t_eval = times or self.times + else: + sim = pybamm.Simulation(self.pybamm_model, experiment=experiment) + t_eval = None + + # Run the simulation + if parameters is None: + prediction = sim.solve(t_eval=t_eval) + else: + prediction = sim.solve(t_eval=t_eval, inputs=inputs) + + return prediction + def n_parameters(self): """ Returns the dimension of the parameter space. """ - return len(self.fit_parameters) + return len(self.parameters) def n_outputs(self): """ diff --git a/pybop/optimisation.py b/pybop/optimisation.py new file mode 100644 index 000000000..c817744e0 --- /dev/null +++ b/pybop/optimisation.py @@ -0,0 +1,99 @@ +import pybop +import numpy as np + + +class Optimisation: + """ + Optimisation class for PyBOP. + """ + + def __init__( + self, + cost, + model, + optimiser, + parameters, + x0=None, + dataset=None, + signal=None, + check_model=True, + init_soc=None, + verbose=False, + ): + self.cost = cost + self.model = model + self.optimiser = optimiser + self.parameters = parameters + self.x0 = x0 + self.dataset = {o.name: o for o in dataset} + self.signal = signal + self.n_parameters = len(self.parameters) + self.verbose = verbose + + # Check that the dataset contains time and current + for name in ["Time [s]", "Current function [A]"]: + if name not in self.dataset: + raise ValueError(f"expected {name} in list of dataset") + + # Set bounds + self.bounds = dict( + lower=[Param.bounds[0] for Param in self.parameters], + upper=[Param.bounds[1] for Param in self.parameters], + ) + + # Sample from prior for x0 + if x0 is None: + self.x0 = np.zeros(self.n_parameters) + for i, Param in enumerate(self.parameters): + self.x0[i] = Param.prior.rvs(1)[0] + # Update to capture dimensions per parameter + + # Add the initial values to the parameter definitions + for i, Param in enumerate(self.parameters): + Param.update(value=self.x0[i]) + + # Build model with dataset and fitting parameters + self.model.build( + dataset=self.dataset, + parameters=self.parameters, + check_model=check_model, + init_soc=init_soc, + ) + + def run(self): + """ + Run the optimisation algorithm. + """ + + results = self.optimiser.optimise( + self.cost_function, # lambda x, grad: self.cost_function(x, grad), + self.x0, + self.bounds, + ) + + return results + + def cost_function(self, x, grad=None): + """ + Compute a model prediction and associated value of the cost. + """ + + # Unpack the target dataset + target = self.dataset[self.signal].data + + # Update the parameter dictionary + for i, Param in enumerate(self.parameters): + Param.update(value=self.x0[i]) + + # Make prediction + prediction = self.model.simulate(parameters=self.parameters)[self.signal].data + + # Add simulation error handling here + + # Compute cost + res = self.cost.compute(prediction, target) + + if self.verbose: + print("Parameter estimates: ", self.parameters.value, "\n") + + return res diff --git a/pybop/optimisation/NLoptOptimize.py b/pybop/optimisation/NLoptOptimize.py deleted file mode 100644 index 6d0f5ba87..000000000 --- a/pybop/optimisation/NLoptOptimize.py +++ /dev/null @@ -1,43 +0,0 @@ -import pybop -import nlopt -from .BaseOptimisation import BaseOptimisation - - -class NLoptOptimize(BaseOptimisation): - """ - Wrapper class for the NLOpt optimisation class. Extends the BaseOptimisation class. - """ - - def __init__(self, method=None, x0=None, xtol=None): - super().__init__() - self.name = "NLOpt Optimisation" - - if method is not None: - self.opt = nlopt.opt(method, len(x0)) - else: - self.opt = nlopt.opt(nlopt.LN_BOBYQA, len(x0)) - - if xtol is not None: - self.opt.set_xtol_rel(xtol) - else: - self.opt.set_xtol_rel(1e-5) - - def _runoptimise(self, cost_function, x0, bounds): - """ - Run the NLOpt opt method. - - Parameters - ---------- - cost_function: function for optimising - method: optimisation method - x0: Initialisation array - bounds: bounds array - """ - - self.opt.set_min_objective(cost_function) - self.opt.set_lower_bounds(bounds["lower"]) - self.opt.set_upper_bounds(bounds["upper"]) - results = self.opt.optimize(x0) - num_evals = self.opt.get_numevals() - - return results, self.opt.last_optimum_value(), num_evals diff --git a/pybop/optimisation/SciPyMinimize.py b/pybop/optimisation/SciPyMinimize.py deleted file mode 100644 index ee4c57067..000000000 --- a/pybop/optimisation/SciPyMinimize.py +++ /dev/null @@ -1,42 +0,0 @@ -import pybop -from scipy.optimize import minimize -from .BaseOptimisation import BaseOptimisation - - -class SciPyMinimize(BaseOptimisation): - """ - Wrapper class for the Scipy optimisation class. Extends the BaseOptimisation class. - """ - - def __init__(self, cost_function, x0, bounds=None, options=None): - super().__init__() - self.cost_function = cost_function - self.method = options.optmethod - self.x0 = x0 or cost_function.x0 - self.bounds = bounds - self.options = options - self.name = "Scipy Optimisation" - - def _runoptimise(self): - """ - Run the Scipy opt method. - - Parameters - ---------- - cost_function: function for optimising - method: optimisation method - x0: Initialisation array - options: options dictionary - bounds: bounds array - """ - - if self.method is not None and self.bounds is not None: - opt = minimize( - self.cost_function, self.x0, method=self.method, bounds=self.bounds - ) - elif self.method is not None: - opt = minimize(self.cost_function, self.x0, method=self.method) - else: - opt = minimize(self.cost_function, self.x0, method="BFGS") - - return opt diff --git a/pybop/optimisers/__init__.py b/pybop/optimisers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pybop/optimisation/BaseOptimisation.py b/pybop/optimisers/base_optimiser.py similarity index 67% rename from pybop/optimisation/BaseOptimisation.py rename to pybop/optimisers/base_optimiser.py index 14364a787..91c55ee20 100644 --- a/pybop/optimisation/BaseOptimisation.py +++ b/pybop/optimisers/base_optimiser.py @@ -1,7 +1,7 @@ import pybop -class BaseOptimisation: +class BaseOptimiser: """ Base class for the optimisation methods. @@ -9,17 +9,15 @@ class BaseOptimisation: """ def __init__(self): - self.name = "Base Optimisation" + self.name = "Base Optimiser" - def optimise(self, cost_function, x0, bounds, method=None): + def optimise(self, cost_function, x0, bounds): """ - Optimise method to be overloaded by child classes. + Optimisiation method to be overloaded by child classes. """ - # Set up optimisation self.cost_function = cost_function self.x0 = x0 - self.method = method self.bounds = bounds # Run optimisation diff --git a/pybop/optimisers/nlopt_optimize.py b/pybop/optimisers/nlopt_optimize.py new file mode 100644 index 000000000..7909efd0c --- /dev/null +++ b/pybop/optimisers/nlopt_optimize.py @@ -0,0 +1,50 @@ +import pybop +import nlopt +from pybop.optimisers.base_optimiser import BaseOptimiser + + +class NLoptOptimize(BaseOptimiser): + """ + Wrapper class for the NLOpt optimiser class. Extends the BaseOptimiser class. + """ + + def __init__(self, x0, xtol=None, method=None): + super().__init__() + self.name = "NLOpt Optimiser" + + if method is not None: + self.optim = nlopt.opt(method, len(x0)) + else: + self.optim = nlopt.opt(nlopt.LN_BOBYQA, len(x0)) + + if xtol is not None: + self.optim.set_xtol_rel(xtol) + else: + self.optim.set_xtol_rel(1e-5) + + def _runoptimise(self, cost_function, x0, bounds): + """ + Run the NLOpt optimisation method. + + Inputs + ---------- + cost_function: function for optimising + method: optimisation algorithm + x0: initialisation array + bounds: bounds array + """ + + # Pass settings to the optimiser + self.optim.set_min_objective(cost_function) + self.optim.set_lower_bounds(bounds["lower"]) + self.optim.set_upper_bounds(bounds["upper"]) + + # Run the optimser + x = self.optim.optimize(x0) + + # Get performance statistics + output = self.optim + final_cost = self.optim.last_optimum_value() + num_evals = self.optim.get_numevals() + + return x, output, final_cost, num_evals diff --git a/pybop/optimisers/pints_optimiser.py b/pybop/optimisers/pints_optimiser.py new file mode 100644 index 000000000..e83ce0f82 --- /dev/null +++ b/pybop/optimisers/pints_optimiser.py @@ -0,0 +1,158 @@ +import pybop +import pints +from pybop.optimisers.base_optimiser import BaseOptimiser +from pints import ErrorMeasure, Boundaries + + +class PintsOptimiser(BaseOptimiser): + """ + Wrapper class for the PINTS optimisation class. Extends the BaseOptimiser class. + """ + + def __init__(self, x0, xtol=None, method=None): + super().__init__() + self.name = "PINTS Optimiser" + + if method is not None: + self.method = method + else: + self.method = pints.PSO + + def _runoptimise(self, cost_function, x0, bounds): + """ + Run the PINTS optimisation method. + + Inputs + ---------- + cost_function: function for optimising + method: optimisation algorithm + x0: initialisation array + bounds: bounds array + """ + + # Wrap bounds + boundaries = pybop.PintsBoundaries(bounds, x0) + + # Wrap error measure + error = pybop.PintsError(cost_function, x0) + + # Set up optimisation controller + controller = pints.OptimisationController( + error, x0, boundaries=boundaries, method=self.method + ) + controller.set_max_unchanged_iterations(20) # default 200 + + # Run the optimser + x, final_cost = controller.run() + + # Get performance statistics + # output = *pass all output* + # final_cost + # num_evals + output = None + num_evals = None + + return x, output, final_cost, num_evals + + +class PintsError(ErrorMeasure): + """ + An interface class for PyBOP that extends the PINTS ErrorMeasure class. + + From PINTS: + Abstract base class for objects that calculate some scalar measure of + goodness-of-fit (for a model and a data set), such that a smaller value + means a better fit. + + ErrorMeasures are callable objects: If ``e`` is an instance of an + :class:`ErrorMeasure` class you can calculate the error by calling ``e(p)`` + where ``p`` is a point in parameter space. + """ + + def __init__(self, cost_function, x0): + self.cost_function = cost_function + self.x0 = x0 + + def __call__(self, x): + cost = self.cost_function(x) + + return cost + + def evaluateS1(self, x): + """ + Evaluates this error measure, and returns the result plus the partial + derivatives of the result with respect to the parameters. + + The returned data has the shape ``(e, e')`` where ``e`` is a scalar + value and ``e'`` is a sequence of length ``n_parameters``. + + *This is an optional method that is not always implemented.* + """ + raise NotImplementedError + + def n_parameters(self): + """ + Returns the dimension of the parameter space this measure is defined + over. + """ + return len(self.x0) + + +class PintsBoundaries(object): + """ + An interface class for PyBOP that extends the PINTS ErrorMeasure class. + + From PINTS: + Abstract class representing boundaries on a parameter space. + """ + + def __init__(self, bounds, x0): + self.bounds = bounds + self.x0 = x0 + + def check(self, parameters): + """ + Returns ``True`` if and only if the given point in parameter space is + within the boundaries. + + Parameters + ---------- + parameters + A point in parameter space + """ + result = False + if ( + parameters[0] >= self.bounds["lower"][0] + and parameters[1] >= self.bounds["lower"][1] + and parameters[0] <= self.bounds["upper"][0] + and parameters[1] <= self.bounds["upper"][1] + ): + result = True + + return result + + def n_parameters(self): + """ + Returns the dimension of the parameter space these boundaries are + defined on. + """ + return len(self.x0) + + def sample(self, n=1): + """ + Returns ``n`` random samples from within the boundaries, for example to + use as starting points for an optimisation. + + The returned value is a NumPy array with shape ``(n, d)`` where ``n`` + is the requested number of samples, and ``d`` is the dimension of the + parameter space these boundaries are defined on. + + *Note that implementing :meth:`sample()` is optional, so some boundary + types may not support it.* + + Parameters + ---------- + n : int + The number of points to sample + """ + raise NotImplementedError diff --git a/pybop/optimisers/scipy_minimize.py b/pybop/optimisers/scipy_minimize.py new file mode 100644 index 000000000..7a860f525 --- /dev/null +++ b/pybop/optimisers/scipy_minimize.py @@ -0,0 +1,54 @@ +import pybop +from scipy.optimize import minimize +from pybop.optimisers.base_optimiser import BaseOptimiser + + +class SciPyMinimize(BaseOptimiser): + """ + Wrapper class for the SciPy optimisation class. Extends the BaseOptimiser class. + """ + + def __init__(self, x0, xtol=None, method=None, options=None): + super().__init__() + self.name = "Scipy Optimiser" + + if method is None: + self.method = method + else: + self.method = "BFGS" + + if xtol is not None: + self.xtol = xtol + else: + self.xtol = 1e-5 + + self.options = options + + def _runoptimise(self, cost_function, x0, bounds): + """ + Run the SciPy optimisation method. + + Inputs + ---------- + cost_function: function for optimising + method: optimisation algorithm + x0: initialisation array + bounds: bounds array + """ + + # Reformat bounds + bounds = ( + (lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"]) + ) + + # Run the optimser + output = minimize( + cost_function, x0, method=self.method, bounds=bounds, tol=self.xtol + ) + + # Get performance statistics + x = output.x + final_cost = output.fun + num_evals = output.nfev + + return x, output, final_cost, num_evals diff --git a/pybop/parameters/__init__.py b/pybop/parameters/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pybop/identification/parameter.py b/pybop/parameters/base_parameter.py similarity index 64% rename from pybop/identification/parameter.py rename to pybop/parameters/base_parameter.py index e0933a6c0..1de1a6f3f 100644 --- a/pybop/identification/parameter.py +++ b/pybop/parameters/base_parameter.py @@ -5,11 +5,11 @@ class Parameter: """ "" - Class for creating parameters in pybop. + Class for creating parameters in PyBOP. """ - def __init__(self, param, value=None, prior=None, bounds=None): - self.name = param + def __init__(self, name, value=None, prior=None, bounds=None): + self.name = name self.prior = prior self.value = value self.bounds = bounds @@ -20,5 +20,8 @@ def __init__(self, param, value=None, prior=None, bounds=None): # bounds checks and set defaults # implement methods to assign and retrieve parameters + def update(self, value): + self.value = value + def __repr__(self): - return f"Parameter: {self.name} \n Prior: {self.prior} \n Bounds: {self.bounds}" + return f"Parameter: {self.name} \n Prior: {self.prior} \n Bounds: {self.bounds} \n Value: {self.value}" diff --git a/pybop/identification/parameter_set.py b/pybop/parameters/base_parameter_set.py similarity index 74% rename from pybop/identification/parameter_set.py rename to pybop/parameters/base_parameter_set.py index eb84b9da1..215b3cc9f 100644 --- a/pybop/identification/parameter_set.py +++ b/pybop/parameters/base_parameter_set.py @@ -4,7 +4,7 @@ class ParameterSet: """ - Class for creating parameter sets in pybop. + Class for creating parameter sets in PyBOP. """ def __new__(cls, method, name): @@ -14,4 +14,4 @@ def __new__(cls, method, name): except: raise ValueError("Parameter set not found") else: - raise ValueError("Only PybaMM parameter sets are currently implemented") + raise ValueError("Only PyBaMM parameter sets are currently implemented") diff --git a/pybop/priors.py b/pybop/parameters/priors.py similarity index 98% rename from pybop/priors.py rename to pybop/parameters/priors.py index b23f2c38e..835e79934 100644 --- a/pybop/priors.py +++ b/pybop/parameters/priors.py @@ -59,7 +59,7 @@ def __repr__(self): class Exponential: """ - exponential prior class. + Exponential prior class. """ def __init__(self, scale): diff --git a/run-tests.py b/run-tests.py new file mode 100644 index 000000000..bcd44d1c4 --- /dev/null +++ b/run-tests.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +# +# Runs all unit tests included in PyBOP. +# +# This file is adapted from PINTS (https://github.com/pints-team/pints/). +# +import argparse +import datetime +import os +import re +import subprocess +import sys +import unittest + + +def run_unit_tests(): + """ + Runs unit tests (without subprocesses). + """ + # tests = os.path.join('pybop', 'tests') + tests = os.path.join(os.path.dirname(__file__), "tests", "unit") + suite = unittest.defaultTestLoader.discover(tests, pattern="test*.py") + res = unittest.TextTestRunner(verbosity=2).run(suite) + sys.exit(0 if res.wasSuccessful() else 1) + + +# def run_flake8(): + +# def run_copyright_checks(): + +# def run_doctests(): + +# def doctest_sphinx(): + +# def doctest_examples_readme(): + +# def doctest_rst_and_public_interface(): + +# def check_exposed_symbols(module, submodule_names, doc_symbols): + +# def get_all_documented_symbols(): + +# def run_notebook_tests(): + +# def run_notebook_interfaces_tests(): + +# def list_notebooks(root, recursive=True, ignore_list=None, notebooks=None): + +# def test_notebook(path): + +# def export_notebook(ipath, opath): + + +if __name__ == "__main__": + # Prevent CI from hanging on multiprocessing tests + import multiprocessing + + multiprocessing.set_start_method("spawn") + + # Set up argument parsing + parser = argparse.ArgumentParser( + description="Run unit tests for PyBOP.", + epilog="To run individual unit tests, use e.g." + " $ python tests/unit/test_parameterisation.py", + ) + # Unit tests + parser.add_argument( + "--unit", + action="store_true", + help="Run all unit tests using the `python` interpreter.", + ) + # # Notebook tests + # parser.add_argument( + # '--books', + # action='store_true', + # help='Test only the fast Jupyter notebooks in `examples`.', + # ) + # parser.add_argument( + # '--interfaces', + # action='store_true', + # help='Test only the fast Jupyter notebooks in `examples/interfaces`.', + # ) + # parser.add_argument( + # '-debook', + # nargs=2, + # metavar=('in', 'out'), + # help='Export a Jupyter notebook to a Python file for manual testing.', + # ) + # # Doctests + # parser.add_argument( + # '--doctest', + # action='store_true', + # help='Run any doctests, check if docs can be built', + # ) + # # Copyright checks + # parser.add_argument( + # '--copyright', + # action='store_true', + # help='Check copyright runs to the current year', + # ) + # Combined test sets + parser.add_argument( + "--quick", + action="store_true", + help="Run quick checks (unit tests)", # , flake8, docs)', + ) + + # Parse! + args = parser.parse_args() + + # Run tests + has_run = False + # Unit tests + if args.unit: + has_run = True + run_unit_tests() + # # Doctests + # if args.doctest: + # has_run = True + # run_doctests() + # # Copyright checks + # if args.copyright: + # has_run = True + # run_copyright_checks() + # # Notebook tests + # elif args.books: + # has_run = True + # run_notebook_tests() + # if args.interfaces: + # has_run = True + # run_notebook_interfaces_tests() + # if args.debook: + # has_run = True + # export_notebook(*args.debook) + # Combined test sets + if args.quick: + has_run = True + # run_flake8() + run_unit_tests() + # run_doctests() + # Help + if not has_run: + parser.print_help() diff --git a/setup.py b/setup.py index 28900d55e..bc3978cf7 100644 --- a/setup.py +++ b/setup.py @@ -30,6 +30,7 @@ "scipy>=1.3", "pandas>=1.0", "nlopt>=2.6", + "pints>=0.5", ], # https://pypi.org/classifiers/ classifiers=[], diff --git a/tests/unit/test_parameterisation.py b/tests/unit/test_parameterisation.py index 5cbadf84a..ad57f2b0c 100644 --- a/tests/unit/test_parameterisation.py +++ b/tests/unit/test_parameterisation.py @@ -1,18 +1,24 @@ +import unittest import pybop import pybamm import numpy as np -class TestParameterisation: - def getdata(self, model, x0): - model.parameter_set = model.pybamm_model.default_parameter_values +class TestParameterisation(unittest.TestCase): + """ + Tests the parameterisation functionality of PyBOP. + """ + def getdata(self, model, x0): + # Update fitting parameters model.parameter_set.update( { "Negative electrode active material volume fraction": x0[0], "Positive electrode active material volume fraction": x0[1], } ) + + # Define experimental protocol experiment = pybamm.Experiment( [ ( @@ -24,87 +30,167 @@ def getdata(self, model, x0): ] * 2 ) - sim = model.simulate(experiment=experiment) - return sim - def test_spm(self): + # Simulate model to generate test dataset + prediction = model.simulate(experiment=experiment) + return prediction + + def test_spm_nlopt(self): # Define model model = pybop.lithium_ion.SPM() model.parameter_set = model.pybamm_model.default_parameter_values - # Form observations + # Form dataset x0 = np.array([0.52, 0.63]) solution = self.getdata(model, x0) - - observations = [ - pybop.Observed("Time [s]", solution["Time [s]"].data), - pybop.Observed("Current function [A]", solution["Current [A]"].data), - pybop.Observed("Voltage [V]", solution["Terminal voltage [V]"].data), + dataset = [ + pybop.Dataset("Time [s]", solution["Time [s]"].data), + pybop.Dataset("Current function [A]", solution["Current [A]"].data), + pybop.Dataset("Voltage [V]", solution["Terminal voltage [V]"].data), ] - # Fitting parameters - params = [ + # Define fitting parameters + parameters = [ pybop.Parameter( - "Negative electrode active material volume fraction", + name="Negative electrode active material volume fraction", prior=pybop.Gaussian(0.5, 0.02), bounds=[0.375, 0.625], ), pybop.Parameter( - "Positive electrode active material volume fraction", + name="Positive electrode active material volume fraction", prior=pybop.Gaussian(0.65, 0.02), bounds=[0.525, 0.75], ), ] - parameterisation = pybop.Parameterisation( - model, observations=observations, fit_parameters=params - ) + # Define the cost to optimise + cost = pybop.RMSE() + signal = "Voltage [V]" + + # Select optimiser + optimiser = pybop.NLoptOptimize(x0=x0) - # get RMSE estimate using NLOpt - results, last_optim, num_evals = parameterisation.rmse( - signal="Voltage [V]", method="nlopt" + # Build the optimisation problem + parameterisation = pybop.Optimisation( + cost=cost, + model=model, + optimiser=optimiser, + parameters=parameters, + dataset=dataset, + signal=signal, ) - # Assertions - np.testing.assert_allclose(last_optim, 1e-3, atol=1e-2) - np.testing.assert_allclose(results, x0, atol=1e-1) - def test_spme(self): + # Run the optimisation problem + x, output, final_cost, num_evals = parameterisation.run() + + # Check assertions + np.testing.assert_allclose(final_cost, 1e-3, atol=1e-2) + np.testing.assert_allclose(x, x0, atol=1e-1) + + def test_spme_scipy(self): # Define model model = pybop.lithium_ion.SPMe() model.parameter_set = model.pybamm_model.default_parameter_values - # Form observations + # Form dataset x0 = np.array([0.52, 0.63]) solution = self.getdata(model, x0) - - observations = [ - pybop.Observed("Time [s]", solution["Time [s]"].data), - pybop.Observed("Current function [A]", solution["Current [A]"].data), - pybop.Observed("Voltage [V]", solution["Terminal voltage [V]"].data), + dataset = [ + pybop.Dataset("Time [s]", solution["Time [s]"].data), + pybop.Dataset("Current function [A]", solution["Current [A]"].data), + pybop.Dataset("Voltage [V]", solution["Terminal voltage [V]"].data), ] - # Fitting parameters - params = [ + # Define fitting parameters + parameters = [ pybop.Parameter( - "Negative electrode active material volume fraction", + name="Negative electrode active material volume fraction", prior=pybop.Gaussian(0.5, 0.02), bounds=[0.375, 0.625], ), pybop.Parameter( - "Positive electrode active material volume fraction", + name="Positive electrode active material volume fraction", prior=pybop.Gaussian(0.65, 0.02), bounds=[0.525, 0.75], ), ] - parameterisation = pybop.Parameterisation( - model, observations=observations, fit_parameters=params + # Define the cost to optimise + cost = pybop.RMSE() + signal = "Voltage [V]" + + # Select optimiser + optimiser = pybop.SciPyMinimize(x0=x0) + + # Build the optimisation problem + parameterisation = pybop.Optimisation( + cost=cost, + model=model, + optimiser=optimiser, + parameters=parameters, + dataset=dataset, + signal=signal, ) - # get RMSE estimate using NLOpt - results, last_optim, num_evals = parameterisation.rmse( - signal="Voltage [V]", method="nlopt" + # Run the optimisation problem + x, output, final_cost, num_evals = parameterisation.run() + + # Check assertions + np.testing.assert_allclose(final_cost, 1e-3, atol=1e-2) + np.testing.assert_allclose(x, x0, atol=1e-1) + + def test_spm_pints(self): + # Define model + model = pybop.lithium_ion.SPMe() + model.parameter_set = model.pybamm_model.default_parameter_values + + # Form dataset + x0 = np.array([0.52, 0.63]) + solution = self.getdata(model, x0) + dataset = [ + pybop.Dataset("Time [s]", solution["Time [s]"].data), + pybop.Dataset("Current function [A]", solution["Current [A]"].data), + pybop.Dataset("Voltage [V]", solution["Terminal voltage [V]"].data), + ] + + # Define fitting parameters + parameters = [ + pybop.Parameter( + name="Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.02), + bounds=[0.375, 0.625], + ), + pybop.Parameter( + name="Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.65, 0.02), + bounds=[0.525, 0.75], + ), + ] + + # Define the cost to optimise + cost = pybop.RMSE() + signal = "Voltage [V]" + + # Select optimiser + optimiser = pybop.PintsOptimiser(x0=x0) + + # Build the optimisation problem + parameterisation = pybop.Optimisation( + cost=cost, + model=model, + optimiser=optimiser, + parameters=parameters, + dataset=dataset, + signal=signal, ) - # Assertions - np.testing.assert_allclose(last_optim, 1e-3, atol=1e-2) - np.testing.assert_allclose(results, x0, atol=1e-1) + + # Run the optimisation problem + x, output, final_cost, num_evals = parameterisation.run() + + # Check assertions + np.testing.assert_allclose(final_cost, 1e-3, atol=1e-2) + np.testing.assert_allclose(x, x0, atol=1e-1) + + +if __name__ == "__main__": + unittest.main()