Skip to content

Commit

Permalink
Refactor calibration logs
Browse files Browse the repository at this point in the history
Introduce two different ways of logging calibration results:
* As a 3 column table.
  benchmark_0 | strategy_0 | performance_00
  benchmark_0 | strategy_1 | performance_01
  benchmark_1 | strategy_0 | performance_10
  benchmark_1 | strategy_1 | performance_11

* As a cartesian product of benchmark/strategy. In this case
  the performance is at intersection of particular benchmark and
  particular strategy.
  |            | benchmark_0    | benchmark_1    |
  | strategy_0 | performance_00 | performance_01 |
  | strategy_1 | performance_10 | performance_11 |

Benchmarks, strategies and performances are miltiline
stringified dicts. This makes it easy to maintain the calibration
logging for all new benchmarks/strategies as it only assumes
implementing to_dict methods for them.

Fixes #2012
  • Loading branch information
kozhukalov committed Oct 26, 2023
1 parent 696da9e commit 4022f5f
Show file tree
Hide file tree
Showing 3 changed files with 139 additions and 84 deletions.
167 changes: 88 additions & 79 deletions mitiq/calibration/calibrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,10 @@
# LICENSE file in the root directory of this source tree.

import warnings
from itertools import product
from operator import itemgetter
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Expand Down Expand Up @@ -101,88 +99,96 @@ def unique_techniques(self) -> Set[MitigationTechnique]:
collection of experiment results."""
return set(strategy.technique for strategy in self.strategies)

def _technique_results(
self, technique: MitigationTechnique
) -> Iterator[Tuple[BenchmarkProblem, Strategy, str, float, float]]:
"""Yields the results from this collection of experiment results,
limited to a specific technique."""
for strategy, problem in product(self.strategies, self.problems):
if strategy.technique is technique:
performance_symbol, nerr, merr = self._get_performance(
def log_results(self) -> None:
"""Prints calibration results in the following form
┌────────────────────────────────────────────┬────────────────────────────────┬─────────────────────────┐
│ benchmark │ strategy │ performance │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: rb │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 1.0} │ Factory: RichardsonFactory │ Noisy error: 0.1053 │
│ Num qubits: 2 │ Scale factors: [1.0, 2.0, 3.0] │ Mitigated error: 0.0146 │
│ Circuit depth: 326 │ Scale method: fold_global │ │
│ Two qubit gate count: 79 │ │ │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: rb │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 1.0} │ Factory: RichardsonFactory │ Noisy error: 0.1053 │
│ Num qubits: 2 │ Scale factors: [1.0, 3.0, 5.0] │ Mitigated error: 0.0422 │
│ Circuit depth: 326 │ Scale method: fold_global │ │
│ Two qubit gate count: 79 │ │ │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: ghz │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 0.5, '11': 0.5} │ Factory: RichardsonFactory │ Noisy error: 0.0157 │
│ Num qubits: 2 │ Scale factors: [1.0, 2.0, 3.0] │ Mitigated error: 0.0018 │
│ Circuit depth: 2 │ Scale method: fold_global │ │
│ Two qubit gate count: 1 │ │ │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: ghz │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 0.5, '11': 0.5} │ Factory: RichardsonFactory │ Noisy error: 0.0157 │
│ Num qubits: 2 │ Scale factors: [1.0, 3.0, 5.0] │ Mitigated error: 0.0091 │
│ Circuit depth: 2 │ Scale method: fold_global │ │
│ Two qubit gate count: 1 │ │ │
└────────────────────────────────────────────┴────────────────────────────────┴─────────────────────────┘
""" # noqa: E501
table: List[List[str | float]] = []
headers: List[str] = ["benchmark", "strategy", "performance"]
for problem in self.problems:
row_group: List[List[str | float]] = []
for strategy in self.strategies:
perf, nerr, merr = self._get_performance(
strategy.id, problem.id
)
yield problem, strategy, performance_symbol, nerr, merr

def log_technique(self, technique: MitigationTechnique) -> str:
"""Creates a table displaying all results of a given mitigation
technique."""
table: List[List[Union[str, float]]] = []
for (
problem,
strategy,
performance_symbol,
noisy_error,
mitigated_error,
) in self._technique_results(technique):
row: List[Union[str, float]] = [
performance_symbol,
problem.type,
technique.name,
]
summary_dict = strategy.to_pretty_dict()
if strategy.technique is MitigationTechnique.ZNE:
row.extend(
row_group.append(
[
summary_dict["factory"],
summary_dict["scale_factors"],
summary_dict["scale_method"],
str(problem),
str(strategy),
f"{perf}\nNoisy error: {round(nerr, 4)}\n"
"Mitigated error: {round(merr, 4)}",
# this is only for sorting
# removed after sorting
merr - nerr,
]
)
elif strategy.technique is MitigationTechnique.PEC:
row.extend(
[
summary_dict["noise_bias"],
summary_dict["representation_function"],
]
row_group.sort(key=itemgetter(-1))
table.extend([r[:-1] for r in row_group])
return print(tabulate(table, headers, tablefmt="simple_grid"))

def log_results_cartesian(self) -> None:
"""Prints calibration results in the following form
┌────────────────────────────────┬───────────────────────────────────┬──────────────────────────────────────────────┐
│ strategy\benchmark │ Type: rb │ Type: ghz │
│ │ Ideal distribution: {'00': 1.0} │ Ideal distribution: {'00': 0.5, '11': 0.5} │
│ │ Num qubits: 2 │ Num qubits: 2 │
│ │ Circuit depth: 326 │ Circuit depth: 2 │
│ │ Two qubit gate count: 79 │ Two qubit gate count: 1 │
│ │ │ │
├────────────────────────────────┼───────────────────────────────────┼──────────────────────────────────────────────┤
│ Technique: ZNE │ ✔ │ ✔ │
│ Factory: RichardsonFactory │ Noisy error: 0.1053 │ Noisy error: 0.0157 │
│ Scale factors: [1.0, 2.0, 3.0] │ Mitigated error: 0.0146 │ Mitigated error: 0.0018 │
│ Scale method: fold_global │ │ │
├────────────────────────────────┼───────────────────────────────────┼──────────────────────────────────────────────┤
│ Technique: ZNE │ ✔ │ ✔ │
│ Factory: RichardsonFactory │ Noisy error: 0.1053 │ Noisy error: 0.0157 │
│ Scale factors: [1.0, 3.0, 5.0] │ Mitigated error: 0.0422 │ Mitigated error: 0.0091 │
│ Scale method: fold_global │ │ │
└────────────────────────────────┴───────────────────────────────────┴──────────────────────────────────────────────┘
""" # noqa: E501
table: List[List[str]] = []
headers: List[str] = ["strategy\\benchmark"]
for problem in self.problems:
headers.append(str(problem))
for strategy in self.strategies:
row: List[str] = [str(strategy)]
for problem in self.problems:
perf, nerr, merr = self._get_performance(
strategy.id, problem.id
)
row.append(
f"{perf}\nNoisy error: {round(nerr, 4)}\n"
"Mitigated error: {round(merr, 4)}",
)
row.extend([noisy_error, mitigated_error])
table.append(row)

def _sort_best_perf(row: List[Any]) -> float:
return row[-1] - row[-2]

table.sort(key=_sort_best_perf)

if technique is MitigationTechnique.ZNE:
headers = [
"performance",
"circuit type",
"method",
"extrapolation",
"scale_factors",
"scale method",
]
elif technique is MitigationTechnique.PEC:
headers = [
"performance",
"circuit type",
"method",
"noise bias",
"noise representation",
]

headers.extend(["noisy error", "mitigated error"])

return tabulate(table, headers, tablefmt="simple_grid")

def log_results(self) -> None:
"""Log results from entire calibration run. Logging is performed on
each mitigation technique individually to avoid confusion when many
techniques are used."""
for mitigation_technique in self.unique_techniques():
print(f"{mitigation_technique.name} results:")
print(self.log_technique(mitigation_technique))
print()
return print(tabulate(table, headers, tablefmt="simple_grid"))

def is_missing_data(self) -> bool:
"""Method to check if there is any missing data that was expected from
Expand Down Expand Up @@ -306,7 +312,7 @@ def get_cost(self) -> Dict[str, int]:
"ideal_executions": ideal,
}

def run(self, log: bool = False) -> None:
def run(self, log: bool = False, log_cartesian: bool = False) -> None:
"""Runs all the circuits required for calibration."""
if not self.results.is_missing_data():
self.results.reset_data()
Expand Down Expand Up @@ -340,6 +346,9 @@ def run(self, log: bool = False) -> None:
if log:
self.results.log_results()

if log_cartesian:
self.results.log_results_cartesian()

def best_strategy(self) -> Strategy:
"""Finds the best strategy by using the parameters that had the
smallest error.
Expand Down
14 changes: 14 additions & 0 deletions mitiq/calibration/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,13 @@ def to_dict(self) -> Dict[str, Any]:
def __repr__(self) -> str:
return str(self.to_dict())

def __str__(self) -> str:
result: str = ""
for key, value in self.to_dict().items():
title: str = key.replace("_", " ").capitalize()
result += f"{title}: {value}\n"
return result


@dataclass
class Strategy:
Expand Down Expand Up @@ -239,6 +246,13 @@ def to_pretty_dict(self) -> Dict[str, str]:
def __repr__(self) -> str:
return str(self.to_dict())

def __str__(self) -> str:
result: str = ""
for key, value in self.to_dict().items():
title: str = key.replace("_", " ").capitalize()
result += f"{title}: {value}\n"
return result

def num_circuits_required(self) -> int:
summary = self.to_dict()
if self.technique is MitigationTechnique.ZNE:
Expand Down
42 changes: 37 additions & 5 deletions mitiq/calibration/tests/test_calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# LICENSE file in the root directory of this source tree.

"""Tests for the Clifford data regression top-level API."""
import re
from functools import partial

import cirq
Expand Down Expand Up @@ -378,11 +379,42 @@ def test_logging(capfd):
)
cal.run(log=True)
captured = capfd.readouterr()
assert "ZNE results:" in captured.out
assert "PEC results:" in captured.out
assert settings.get_strategy(0).technique.name in captured.out
assert "noisy error" in captured.out
assert "mitigated error" in captured.out
for s in cal.strategies:
for line in str(s).split():
assert line in captured.out
for p in cal.problems:
for line in str(p).split():
assert line in captured.out
mcount = 0
ncount = 0
for line in captured.out.split("\n"):
if "Mitigated error: " in line:
mcount += 1
if "Noisy error: " in line:
ncount += 1
assert mcount == (len(cal.strategies) * len(cal.problems))
assert ncount == (len(cal.strategies) * len(cal.problems))


def test_logging_cartesian(capfd):
cal = Calibrator(
damping_execute, frontend="cirq", settings=light_combined_settings
)
cal.run(log_cartesian=True)
captured = capfd.readouterr()
for s in cal.strategies:
for line in str(s).split():
assert line in captured.out
for p in cal.problems:
for line in str(p).split():
assert line in captured.out
for line in captured.out.split("\n"):
if "Mitigated error: " in line:
assert len(re.findall("Mitigated error: ", line)) == len(
cal.problems
)
if "Noisy error: " in line:
assert len(re.findall("Noisy error: ", line)) == len(cal.problems)


def test_ExperimentResults_reset_data():
Expand Down

0 comments on commit 4022f5f

Please sign in to comment.