Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improvement to landscape plots, MCMC summary, x0 log entry #570

Merged
merged 7 commits into from
Dec 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

## Features

- [#570](https://github.com/pybop-team/PyBOP/pull/570) - Updates the contour and surface plots, adds mixed chain effective sample size computation, x0 to optim.log
- [#566](https://github.com/pybop-team/PyBOP/pull/566) - Adds `UnitHyperCube` transformation class, fixes incorrect application of gradient transformation.
- [#569](https://github.com/pybop-team/PyBOP/pull/569) - Adds parameter specific learning rate functionality to GradientDescent optimiser.
- [#282](https://github.com/pybop-team/PyBOP/issues/282) - Restructures the examples directory.
Expand Down
3 changes: 3 additions & 0 deletions examples/scripts/comparison_examples/spm_XNES.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,6 @@

# Plot the cost landscape with optimisation path
pybop.plot.surface(optim)

# Plot contour
pybop.plot.contour(optim)
16 changes: 9 additions & 7 deletions pybop/optimisers/base_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@ def __init__(
):
# First set attributes to default values
self.parameters = Parameters()
self.x0 = None
self.x0 = optimiser_kwargs.get("x0", [])
self.log = dict(x=[], x_best=[], cost=[], cost_best=[], x0=[])
self.bounds = None
self.sigma0 = 0.02
self.verbose = True
self.log = dict(x=[], x_best=[], cost=[], cost_best=[])
self.minimising = True
self._transformation = None
self._needs_sensitivities = False
Expand All @@ -86,7 +86,6 @@ def __init__(

else:
try:
self.x0 = optimiser_kwargs.get("x0", [])
cost_test = cost(self.x0)
warnings.warn(
"The cost is not an instance of pybop.BaseCost, but let's continue "
Expand Down Expand Up @@ -132,6 +131,7 @@ def set_base_options(self):
"""
# Set initial values, if x0 is None, initial values are unmodified.
self.parameters.update(initial_values=self.unset_options.pop("x0", None))
self.log_update(x0=self.parameters.reset_initial_value())
self.x0 = self.parameters.reset_initial_value(apply_transform=True)

# Set default bounds (for all or no parameters)
Expand Down Expand Up @@ -197,7 +197,7 @@ def _run(self):
"""
raise NotImplementedError

def log_update(self, x=None, x_best=None, cost=None, cost_best=None):
def log_update(self, x=None, x_best=None, cost=None, cost_best=None, x0=None):
"""
Update the log with new values.

Expand Down Expand Up @@ -234,9 +234,8 @@ def apply_transformation(values):
self.log["x"].extend(x)

if x_best is not None:
x_best = convert_to_list(x_best)
x_best = apply_transformation(x_best)
self.log["x_best"].extend([x_best])
x_best = apply_transformation([x_best])
self.log["x_best"].extend(x_best)

if cost is not None:
cost = convert_to_list(cost)
Expand All @@ -246,6 +245,9 @@ def apply_transformation(values):
cost_best = convert_to_list(cost_best)
self.log["cost_best"].extend(cost_best)

if x0 is not None:
self.log["x0"].extend(x0)

def name(self):
"""
Returns the name of the optimiser, to be overwritten by child classes.
Expand Down
44 changes: 34 additions & 10 deletions pybop/plot/contour.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ def contour(
bounds : numpy.ndarray, optional
A 2x2 array specifying the [min, max] bounds for each parameter. If None, uses
`cost.parameters.get_bounds_for_plotly`.
apply_transform : bool, optional
Converts contour landscape into transformed landscape that was used by the optimiser.
steps : int, optional
The number of grid points to divide the parameter space into along each dimension (default: 10).
show : bool, optional
Expand Down Expand Up @@ -156,11 +158,12 @@ def contour(
layout_options = dict(
title="Cost Landscape",
title_x=0.5,
title_y=0.9,
title_y=0.905,
width=600,
height=600,
xaxis=dict(range=bounds[0], showexponent="last", exponentformat="e"),
yaxis=dict(range=bounds[1], showexponent="last", exponentformat="e"),
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="right", x=1),
)
if hasattr(cost, "parameters"):
name = cost.parameters.keys()
Expand All @@ -170,7 +173,8 @@ def contour(

# Create contour plot and update the layout
fig = go.Figure(
data=[go.Contour(x=x, y=y, z=costs, connectgaps=True)], layout=layout
data=[go.Contour(x=x, y=y, z=costs, colorscale="Viridis", connectgaps=True)],
layout=layout,
)

if plot_optim:
Expand All @@ -184,29 +188,49 @@ def contour(
mode="markers",
marker=dict(
color=[i / len(optim_trace) for i in range(len(optim_trace))],
colorscale="YlOrBr",
colorscale="Greys",
size=8,
showscale=False,
),
showlegend=False,
)
)

# Plot the initial guess
if optim.x0 is not None:
if optim.log["x0"] is not None:
fig.add_trace(
go.Scatter(
x=[optim.x0[0]],
y=[optim.x0[1]],
x=[optim.log["x0"][0]],
y=[optim.log["x0"][1]],
mode="markers",
marker_symbol="circle",
marker_symbol="x",
marker=dict(
color="mediumspringgreen",
line_color="mediumspringgreen",
color="white",
line_color="black",
line_width=1,
size=14,
showscale=False,
),
showlegend=False,
name="Initial values",
)
)

# Plot optimised value
if optim.log["x_best"] is not None:
fig.add_trace(
go.Scatter(
x=[optim.log["x_best"][-1][0]],
y=[optim.log["x_best"][-1][1]],
mode="markers",
marker_symbol="cross",
marker=dict(
color="black",
line_color="white",
line_width=1,
size=14,
showscale=False,
),
name="Final values",
)
)

Expand Down
39 changes: 30 additions & 9 deletions pybop/plot/voronoi.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,9 +293,9 @@ def surface(
y=y_optim,
mode="markers",
marker=dict(
color=[i / len(x_optim) for i in range(len(x_optim))],
colorscale="Greys",
size=8,
symbol="x-thin",
line=dict(color="white", width=1),
showscale=False,
),
text=[f"f={val:.2f}" for val in f],
Expand All @@ -308,31 +308,52 @@ def surface(
if optim.x0 is not None:
fig.add_trace(
go.Scatter(
x=[optim.x0[0]],
y=[optim.x0[1]],
x=[optim.log["x0"][0]],
y=[optim.log["x0"][1]],
mode="markers",
marker_symbol="circle",
marker_symbol="x",
marker=dict(
color="mediumspringgreen",
line_color="mediumspringgreen",
color="white",
line_color="black",
line_width=1,
size=14,
showscale=False,
),
showlegend=False,
name="Initial values",
)
)

# Plot optimised value
if optim.log["x_best"] is not None:
fig.add_trace(
go.Scatter(
x=[optim.log["x_best"][-1][0]],
y=[optim.log["x_best"][-1][1]],
mode="markers",
marker_symbol="cross",
marker=dict(
color="black",
line_color="white",
line_width=1,
size=14,
showscale=False,
),
name="Final values",
)
)

names = optim.cost.parameters.keys()
fig.update_layout(
title="Voronoi Cost Landscape",
title_x=0.5,
title_y=0.9,
title_y=0.905,
xaxis_title=names[0],
yaxis_title=names[1],
width=600,
height=600,
xaxis=dict(range=xlim, showexponent="last", exponentformat="e"),
yaxis=dict(range=ylim, showexponent="last", exponentformat="e"),
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="right", x=1),
)
fig.update_layout(**layout_kwargs)
if show:
Expand Down
48 changes: 38 additions & 10 deletions pybop/samplers/mcmc_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def get_summary_statistics(self):
for key, func in summary_funs.items()
}

def plot_trace(self):
def plot_trace(self, **kwargs):
"""
Plot trace plots for the posterior samples.
"""
Expand All @@ -83,9 +83,10 @@ def plot_trace(self):
xaxis_title="Sample Index",
yaxis_title="Value",
)
fig.update_layout(**kwargs)
fig.show()

def plot_chains(self):
def plot_chains(self, **kwargs):
"""
Plot posterior distributions for each chain.
"""
Expand Down Expand Up @@ -117,9 +118,10 @@ def plot_chains(self):
xaxis_title="Value",
yaxis_title="Density",
)
fig.update_layout(**kwargs)
fig.show()

def plot_posterior(self):
def plot_posterior(self, **kwargs):
"""
Plot the summed posterior distribution across chains.
"""
Expand All @@ -142,7 +144,9 @@ def plot_posterior(self):
xaxis_title="Value",
yaxis_title="Density",
)
fig.update_layout(**kwargs)
fig.show()
return fig

def summary_table(self):
"""
Expand Down Expand Up @@ -192,21 +196,45 @@ def _autocorrelate_negative(self, autocorrelation):
negative_indices[0] if negative_indices.size > 0 else len(autocorrelation)
)

def effective_sample_size(self):
"""
Computes the effective sample size for each parameter in each chain.
def effective_sample_size(self, mixed_chains=False):
"""
Computes the effective sample size (ESS) for each parameter in each chain.

Parameters
----------
mixed_chains : bool, optional
If True, the ESS is computed for all samplers mixed into a single chain.
Defaults to False.

Returns
-------
list
A list of effective sample sizes for each parameter in each chain,
or for the mixed chain if `mixed_chains` is True.

Raises
------
ValueError
If there are fewer than two samples in the data.
"""
if self.all_samples.shape[0] < 2:
raise ValueError("At least two samples must be given.")

ess = []
for _, chain in enumerate(self.chains):
def compute_ess(samples):
"""Helper function to compute the ESS for a single set of samples."""
ess = []
for j in range(self.num_parameters):
rho = self.autocorrelation(chain[:, j])
rho = self.autocorrelation(samples[:, j])
T = self._autocorrelate_negative(rho)
ess.append(len(chain[:, j]) / (1 + 2 * rho[:T].sum()))
ess.append(len(samples[:, j]) / (1 + 2 * rho[:T].sum()))
return ess

if mixed_chains:
return compute_ess(self.all_samples)

ess = []
for chain in self.chains:
ess.extend(compute_ess(chain))
return ess

def rhat(self):
Expand Down
30 changes: 22 additions & 8 deletions tests/unit/test_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,18 +153,32 @@ def test_initialisation_and_run(
assert samples.shape == (chains, 1, 2)

@pytest.mark.unit
def test_effectie_sample_size(self, log_posterior):
sampler = pybop.SliceStepoutMCMC(
log_pdf=log_posterior,
chains=1,
max_iterations=1,
)
results = sampler.run()
summary = pybop.PosteriorSummary(results)
def test_effective_sample_size(self, log_posterior):
chains = np.asarray([[[0, 0]]])
summary = pybop.PosteriorSummary(chains)

with pytest.raises(ValueError, match="At least two samples must be given."):
summary.effective_sample_size()

n_chains = 3
sampler = pybop.HaarioBardenetACMC(
log_pdf=log_posterior,
chains=n_chains,
max_iterations=3,
)
chains = sampler.run()
summary = pybop.PosteriorSummary(chains)

# Non mixed chains
ess = summary.effective_sample_size()
assert len(ess) == log_posterior.n_parameters * n_chains
assert all(e > 0 for e in ess) # ESS should be positive

# Mixed chains
ess = summary.effective_sample_size(mixed_chains=True)
assert len(ess) == log_posterior.n_parameters
assert all(e > 0 for e in ess)

@pytest.mark.unit
def test_single_parameter_sampling(self, model, dataset, MCMC, chains):
parameters = pybop.Parameters(
Expand Down
Loading