Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set minimising for each cost and make a property of BaseOptimiser only #588

Merged
merged 12 commits into from
Dec 18, 2024
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

## Optimisations

- [#588](https://github.com/pybop-team/PyBOP/pull/588) - Makes `minimising` a property of `BaseOptimiser` set by the cost class.
- [#512](https://github.com/pybop-team/PyBOP/pull/513) - Refactors `LogPosterior` with attributes pointing to composed likelihood object.
- [#551](https://github.com/pybop-team/PyBOP/pull/551) - Refactors Optimiser arguments, `population_size` and `max_iterations` as default args, improves optimiser docstrings

Expand Down
4 changes: 1 addition & 3 deletions examples/standalone/optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,10 @@ def callback(x):
)

return OptimisationResult(
optim=self,
x=result.x,
cost=self.cost,
final_cost=self.cost(result.x),
n_iterations=result.nit,
scipy_result=result,
optim=self,
)

def name(self):
Expand Down
1 change: 1 addition & 0 deletions pybop/costs/_likelihoods.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class BaseLikelihood(BaseCost):
def __init__(self, problem: BaseProblem):
super().__init__(problem)
self.n_data = problem.n_data
self.minimising = False


class BaseMetaLikelihood(BaseLikelihood):
Expand Down
13 changes: 9 additions & 4 deletions pybop/costs/_weighted_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import numpy as np

from pybop import BaseCost, BaseLikelihood, DesignCost
from pybop import BaseCost


class WeightedCost(BaseCost):
Expand Down Expand Up @@ -32,9 +32,6 @@ def __init__(self, *costs, weights: Optional[list[float]] = None):
self.costs = [cost for cost in costs]
if len(set(type(cost.problem) for cost in self.costs)) > 1:
raise TypeError("All problems must be of the same class type.")
self.minimising = not any(
isinstance(cost, (BaseLikelihood, DesignCost)) for cost in self.costs
)

# Check if weights are provided
if weights is not None:
Expand Down Expand Up @@ -62,6 +59,14 @@ def __init__(self, *costs, weights: Optional[list[float]] = None):
for cost in self.costs:
self.join_parameters(cost.parameters)

# Apply the minimising property from each cost
for i, cost in enumerate(self.costs):
self.weights[i] = self.weights[i] * (1 if cost.minimising else -1)
if all(not cost.minimising for cost in self.costs):
# If all costs are maximising, convert the weighted cost to maximising
self.weights = -self.weights
self.minimising = False

# Weighted costs do not use this functionality
self._has_separable_problem = False

Expand Down
20 changes: 18 additions & 2 deletions pybop/costs/base_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ class BaseCost:
_de : float
The gradient of the cost function to use if an error occurs during
evaluation. Defaults to 1.0.
minimsing : bool, optional, default=True
NicolaCourtier marked this conversation as resolved.
Show resolved Hide resolved
If False, switches the sign of the cost and gradient to perform maximisation
instead of minimisation.
"""

def __init__(self, problem: Optional[BaseProblem] = None):
Expand All @@ -43,6 +46,7 @@ def __init__(self, problem: Optional[BaseProblem] = None):
self.y = None
self.dy = None
self._de = 1.0
self.minimising = True
if isinstance(self.problem, BaseProblem):
self._target = self.problem.target
self._parameters.join(self.problem.parameters)
Expand All @@ -58,6 +62,7 @@ def __call__(
inputs: Union[Inputs, list],
calculate_grad: bool = False,
apply_transform: bool = False,
for_optimiser: bool = False,
) -> Union[float, tuple[float, np.ndarray]]:
"""
This method calls the forward model via problem.evaluate(inputs),
Expand All @@ -73,6 +78,9 @@ def __call__(
cost is computed.
apply_transform : bool, optional, default=False
If True, applies a transformation to the inputs before evaluating the model.
for_optimiser : bool, optional, default=False
If True, returns the cost value if self.minimising=True and the negative of
the cost value if self.minimising=False (i.e. the cost is being maximised).

Returns
-------
Expand All @@ -96,6 +104,9 @@ def __call__(
else:
model_inputs = inputs

# Check whether we are maximising or minimising
NicolaCourtier marked this conversation as resolved.
Show resolved Hide resolved
minimising = self.minimising or not for_optimiser

# Validate inputs, update parameters
model_inputs = self.parameters.verify(model_inputs)
self.parameters.update(values=list(model_inputs.values()))
Expand All @@ -110,10 +121,15 @@ def __call__(
jac = self.transformation.jacobian(inputs)
grad = np.matmul(grad, jac)

return cost, grad
return cost * (1 if minimising else -1), grad * (
1 if minimising else -1
)

y = self.problem.evaluate(self.problem.parameters.as_dict())
return self.compute(y, dy=dy, calculate_grad=calculate_grad)

return self.compute(y, dy=dy, calculate_grad=calculate_grad) * (
1 if minimising else -1
)

def compute(self, y: dict, dy: ndarray, calculate_grad: bool = False):
"""
Expand Down
15 changes: 6 additions & 9 deletions pybop/costs/design_costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,23 +19,22 @@ class DesignCost(BaseCost):

def __init__(self, problem):
"""
Initialises the gravimetric energy density calculator with a problem.
Initialises the design cost calculator with a problem.

Parameters
----------
problem : object
The problem instance containing the model and data.
"""
super().__init__(problem)
self.problem = problem
self.minimising = False


class GravimetricEnergyDensity(DesignCost):
"""
Calculates the gravimetric energy density (specific energy) of a battery cell,
when applied to a normalised discharge from upper to lower voltage limits. The
goal of maximising the energy density is achieved by setting minimising = False
in the optimiser settings.
goal of maximising the energy density is achieved with self.minimising=False.

The gravimetric energy density [Wh.kg-1] is calculated as

Expand Down Expand Up @@ -92,8 +91,7 @@ class VolumetricEnergyDensity(DesignCost):
"""
Calculates the (volumetric) energy density of a battery cell, when applied to a
normalised discharge from upper to lower voltage limits. The goal of maximising
the energy density is achieved by setting minimising = False in the optimiser
settings.
the energy density is achieved with self.minimising = False.

The volumetric energy density [Wh.m-3] is calculated as

Expand Down Expand Up @@ -150,8 +148,7 @@ class GravimetricPowerDensity(DesignCost):
"""
Calculates the gravimetric power density (specific power) of a battery cell,
when applied to a discharge from upper to lower voltage limits. The goal of
maximising the power density is achieved by setting minimising = False in the
optimiser settings.
maximising the power density is achieved with self.minimising=False.

The time-averaged gravimetric power density [W.kg-1] is calculated as

Expand Down Expand Up @@ -214,7 +211,7 @@ class VolumetricPowerDensity(DesignCost):
"""
Calculates the (volumetric) power density of a battery cell, when applied to a
discharge from upper to lower voltage limits. The goal of maximising the power
density is achieved by setting minimising = False in the optimiser settings.
density is achieved with self.minimising=False.

The time-averaged volumetric power density [W.m-3] is calculated as

Expand Down
86 changes: 59 additions & 27 deletions pybop/optimisers/base_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,7 @@
import numpy as np
from scipy.optimize import OptimizeResult

from pybop import (
BaseCost,
BaseLikelihood,
DesignCost,
Inputs,
Parameter,
Parameters,
WeightedCost,
)
from pybop import BaseCost, Inputs, Parameter, Parameters


class BaseOptimiser:
Expand Down Expand Up @@ -43,9 +35,6 @@ class BaseOptimiser:
Not all methods will use this information.
verbose : bool, optional
If True, the optimisation progress is printed (default: False).
minimising : bool, optional
If True, the target is to minimise the cost, else target is to maximise by minimising
the negative cost (default: True).
physical_viability : bool, optional
If True, the feasibility of the optimised parameters is checked (default: False).
allow_infeasible_solutions : bool, optional
Expand All @@ -62,13 +51,13 @@ def __init__(
# First set attributes to default values
self.parameters = Parameters()
self.x0 = optimiser_kwargs.get("x0", [])
self.log = dict(x=[], x_best=[], cost=[], cost_best=[], x0=[])
self.log = dict(x=[], x_best=[], x_search=[], x0=[], cost=[], cost_best=[])
self.bounds = None
self.sigma0 = 0.02
self.verbose = True
self.minimising = True
self._transformation = None
self._needs_sensitivities = False
self._minimising = True
self.physical_viability = False
self.allow_infeasible_solutions = False
self.default_max_iterations = 1000
Expand All @@ -79,10 +68,7 @@ def __init__(
self.parameters = self.cost.parameters
self._transformation = self.cost.transformation
self.set_allow_infeasible_solutions()
if isinstance(cost, WeightedCost):
self.minimising = cost.minimising
if isinstance(cost, (BaseLikelihood, DesignCost)):
self.minimising = False
self._minimising = self.cost.minimising

else:
try:
Expand All @@ -98,7 +84,6 @@ def __init__(
self.parameters.add(
Parameter(name=f"Parameter {i}", initial_value=value)
)
self.minimising = True

except Exception as e:
raise Exception(
Expand Down Expand Up @@ -147,7 +132,6 @@ def set_base_options(self):

# Set other options
self.verbose = self.unset_options.pop("verbose", self.verbose)
self.minimising = self.unset_options.pop("minimising", self.minimising)
if "allow_infeasible_solutions" in self.unset_options.keys():
self.set_allow_infeasible_solutions(
self.unset_options.pop("allow_infeasible_solutions")
Expand All @@ -169,6 +153,38 @@ def _set_up_optimiser(self):
"""
raise NotImplementedError

def cost_call(
BradyPlanden marked this conversation as resolved.
Show resolved Hide resolved
self,
x: Union[Inputs, list],
calculate_grad: bool = False,
) -> Union[float, tuple[float, np.ndarray]]:
"""
Call the cost function to minimise, applying any given transformation to the
input parameters.

Parameters
----------
x : Inputs or list-like
The input parameters for which the cost and optionally the gradient
will be computed.
calculate_grad : bool, optional, default=False
If True, both the cost and gradient will be computed. Otherwise, only the
cost is computed.

Returns
-------
float or tuple
- If `calculate_grad` is False, returns the computed cost (float).
- If `calculate_grad` is True, returns a tuple containing the cost (float)
and the gradient (np.ndarray).
"""
return self.cost(
x,
calculate_grad=calculate_grad,
apply_transform=True,
for_optimiser=True,
)

def run(self):
"""
Run the optimisation and return the optimised parameters and final cost.
Expand Down Expand Up @@ -243,6 +259,7 @@ def apply_transformation(values):

if x is not None:
x = convert_to_list(x)
self.log["x_search"].extend(x)
x = apply_transformation(x)
self.log["x"].extend(x)

Expand All @@ -252,10 +269,17 @@ def apply_transformation(values):

if cost is not None:
cost = convert_to_list(cost)
cost = [
internal_cost * (1 if self.minimising else -1) for internal_cost in cost
]
self.log["cost"].extend(cost)

if cost_best is not None:
cost_best = convert_to_list(cost_best)
cost_best = [
internal_cost * (1 if self.minimising else -1)
for internal_cost in cost_best
]
self.log["cost_best"].extend(cost_best)

if x0 is not None:
Expand Down Expand Up @@ -302,6 +326,10 @@ def set_allow_infeasible_solutions(self, allow: bool = True):
def needs_sensitivities(self):
return self._needs_sensitivities

@property
def minimising(self):
return self._minimising


class OptimisationResult:
"""
Expand All @@ -321,22 +349,26 @@ class OptimisationResult:

def __init__(
self,
optim: BaseOptimiser,
x: Union[Inputs, np.ndarray] = None,
cost: Union[BaseCost, None] = None,
final_cost: Optional[float] = None,
n_iterations: Optional[int] = None,
optim: Optional[BaseOptimiser] = None,
time: Optional[float] = None,
scipy_result=None,
):
self.x = x
self.cost = cost
self.optim = optim
self.cost = self.optim.cost
self.minimising = self.optim.minimising
self._transformation = self.optim._transformation # noqa: SLF001

self.x = self._transformation.to_model(x) if self._transformation else x
self.final_cost = (
final_cost if final_cost is not None else self._calculate_final_cost()
final_cost * (1 if self.minimising else -1)
if final_cost is not None
else self._calculate_final_cost()
)
self.n_iterations = n_iterations
self.scipy_result = scipy_result
self.optim = optim
self.time = time
if isinstance(self.optim, BaseOptimiser):
self.x0 = self.optim.parameters.initial_value()
Expand Down Expand Up @@ -458,7 +490,7 @@ def add_run(self, result: OptimisationResult):
def best_run(self) -> Optional[OptimisationResult]:
"""Returns the result with the best final cost."""
valid_results = [res for res in self.results if res.final_cost is not None]
if self.results[0].optim.minimising is True:
if self.results[0].minimising is True:
return min(valid_results, key=lambda res: res.final_cost)

return max(valid_results, key=lambda res: res.final_cost)
Expand Down
Loading
Loading