Skip to content

Commit

Permalink
Refactor calibration logs
Browse files Browse the repository at this point in the history
Introduce two different ways of logging calibration results:
* As a 3 column table.
  benchmark_0 | strategy_0 | performance_00
  benchmark_0 | strategy_1 | performance_01
  benchmark_1 | strategy_0 | performance_10
  benchmark_1 | strategy_1 | performance_11

* As a cartesian product of benchmark/strategy. In this case
  the performance is at intersection of particular benchmark and
  particular strategy.
  |            | benchmark_0    | benchmark_1    |
  | strategy_0 | performance_00 | performance_01 |
  | strategy_1 | performance_10 | performance_11 |

Benchmarks, strategies and performances are miltiline
stringified dicts. This makes it easy to maintain the calibration
logging for all new benchmarks/strategies as it only assumes
implementing to_dict methods for them.

Fixes #2012
  • Loading branch information
kozhukalov committed Oct 29, 2023
1 parent c461501 commit 095741f
Show file tree
Hide file tree
Showing 4 changed files with 201 additions and 104 deletions.
187 changes: 89 additions & 98 deletions mitiq/calibration/calibrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,8 @@
# LICENSE file in the root directory of this source tree.

import warnings
from itertools import product
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
from operator import itemgetter
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union, cast

import cirq
import numpy as np
Expand All @@ -33,7 +21,6 @@
)
from mitiq.calibration.settings import (
BenchmarkProblem,
MitigationTechnique,
Settings,
Strategy,
ZNESettings,
Expand Down Expand Up @@ -96,95 +83,96 @@ def _get_performance(
performance_symbol = "✔" if mitigation_worked else "✘"
return performance_symbol, noisy_error, mitigated_error

def unique_techniques(self) -> Set[MitigationTechnique]:
"""Returns the unique mitigation techniques used across this
collection of experiment results."""
return set(strategy.technique for strategy in self.strategies)

def _technique_results(
self, technique: MitigationTechnique
) -> Iterator[Tuple[BenchmarkProblem, Strategy, str, float, float]]:
"""Yields the results from this collection of experiment results,
limited to a specific technique."""
for strategy, problem in product(self.strategies, self.problems):
if strategy.technique is technique:
performance_symbol, nerr, merr = self._get_performance(
def log_results(self) -> None:
"""Prints calibration results in the following form
┌────────────────────────────────────────────┬────────────────────────────────┬─────────────────────────┐
│ benchmark │ strategy │ performance │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: rb │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 1.0} │ Factory: RichardsonFactory │ Noisy error: 0.1053 │
│ Num qubits: 2 │ Scale factors: [1.0, 2.0, 3.0] │ Mitigated error: 0.0146 │
│ Circuit depth: 326 │ Scale method: fold_global │ │
│ Two qubit gate count: 79 │ │ │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: rb │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 1.0} │ Factory: RichardsonFactory │ Noisy error: 0.1053 │
│ Num qubits: 2 │ Scale factors: [1.0, 3.0, 5.0] │ Mitigated error: 0.0422 │
│ Circuit depth: 326 │ Scale method: fold_global │ │
│ Two qubit gate count: 79 │ │ │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: ghz │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 0.5, '11': 0.5} │ Factory: RichardsonFactory │ Noisy error: 0.0157 │
│ Num qubits: 2 │ Scale factors: [1.0, 2.0, 3.0] │ Mitigated error: 0.0018 │
│ Circuit depth: 2 │ Scale method: fold_global │ │
│ Two qubit gate count: 1 │ │ │
├────────────────────────────────────────────┼────────────────────────────────┼─────────────────────────┤
│ Type: ghz │ Technique: ZNE │ ✔ │
│ Ideal distribution: {'00': 0.5, '11': 0.5} │ Factory: RichardsonFactory │ Noisy error: 0.0157 │
│ Num qubits: 2 │ Scale factors: [1.0, 3.0, 5.0] │ Mitigated error: 0.0091 │
│ Circuit depth: 2 │ Scale method: fold_global │ │
│ Two qubit gate count: 1 │ │ │
└────────────────────────────────────────────┴────────────────────────────────┴─────────────────────────┘
""" # noqa: E501
table: List[List[str | float]] = []
headers: List[str] = ["benchmark", "strategy", "performance"]
for problem in self.problems:
row_group: List[List[str | float]] = []
for strategy in self.strategies:
perf, nerr, merr = self._get_performance(
strategy.id, problem.id
)
yield problem, strategy, performance_symbol, nerr, merr

def log_technique(self, technique: MitigationTechnique) -> str:
"""Creates a table displaying all results of a given mitigation
technique."""
table: List[List[Union[str, float]]] = []
for (
problem,
strategy,
performance_symbol,
noisy_error,
mitigated_error,
) in self._technique_results(technique):
row: List[Union[str, float]] = [
performance_symbol,
problem.type,
technique.name,
]
summary_dict = strategy.to_pretty_dict()
if strategy.technique is MitigationTechnique.ZNE:
row.extend(
row_group.append(
[
summary_dict["factory"],
summary_dict["scale_factors"],
summary_dict["scale_method"],
str(problem),
str(strategy),
f"{perf}\nNoisy error: {round(nerr, 4)}\n"
f"Mitigated error: {round(merr, 4)}",
# this is only for sorting
# removed after sorting
merr - nerr,
]
)
elif strategy.technique is MitigationTechnique.PEC:
row.extend(
[
summary_dict["noise_level"],
summary_dict["noise_bias"],
summary_dict["representation_function"],
]
row_group.sort(key=itemgetter(-1))
table.extend([r[:-1] for r in row_group])
return print(tabulate(table, headers, tablefmt="simple_grid"))

def log_results_cartesian(self) -> None:
"""Prints calibration results in the following form
┌────────────────────────────────┬───────────────────────────────────┬──────────────────────────────────────────────┐
│ strategy\benchmark │ Type: rb │ Type: ghz │
│ │ Ideal distribution: {'00': 1.0} │ Ideal distribution: {'00': 0.5, '11': 0.5} │
│ │ Num qubits: 2 │ Num qubits: 2 │
│ │ Circuit depth: 326 │ Circuit depth: 2 │
│ │ Two qubit gate count: 79 │ Two qubit gate count: 1 │
│ │ │ │
├────────────────────────────────┼───────────────────────────────────┼──────────────────────────────────────────────┤
│ Technique: ZNE │ ✔ │ ✔ │
│ Factory: RichardsonFactory │ Noisy error: 0.1053 │ Noisy error: 0.0157 │
│ Scale factors: [1.0, 2.0, 3.0] │ Mitigated error: 0.0146 │ Mitigated error: 0.0018 │
│ Scale method: fold_global │ │ │
├────────────────────────────────┼───────────────────────────────────┼──────────────────────────────────────────────┤
│ Technique: ZNE │ ✔ │ ✔ │
│ Factory: RichardsonFactory │ Noisy error: 0.1053 │ Noisy error: 0.0157 │
│ Scale factors: [1.0, 3.0, 5.0] │ Mitigated error: 0.0422 │ Mitigated error: 0.0091 │
│ Scale method: fold_global │ │ │
└────────────────────────────────┴───────────────────────────────────┴──────────────────────────────────────────────┘
""" # noqa: E501
table: List[List[str]] = []
headers: List[str] = ["strategy\\benchmark"]
for problem in self.problems:
headers.append(str(problem))
for strategy in self.strategies:
row: List[str] = [str(strategy)]
for problem in self.problems:
perf, nerr, merr = self._get_performance(
strategy.id, problem.id
)
row.append(
f"{perf}\nNoisy error: {round(nerr, 4)}\n"
f"Mitigated error: {round(merr, 4)}",
)
row.extend([noisy_error, mitigated_error])
table.append(row)

def _sort_best_perf(row: List[Any]) -> float:
return row[-1] - row[-2]

table.sort(key=_sort_best_perf)

if technique is MitigationTechnique.ZNE:
headers = [
"performance",
"circuit type",
"method",
"extrapolation",
"scale_factors",
"scale method",
]
elif technique is MitigationTechnique.PEC:
headers = [
"performance",
"circuit type",
"method",
"noise level",
"noise bias",
"noise representation",
]

headers.extend(["noisy error", "mitigated error"])

return tabulate(table, headers, tablefmt="simple_grid")

def log_results(self) -> None:
"""Log results from entire calibration run. Logging is performed on
each mitigation technique individually to avoid confusion when many
techniques are used."""
for mitigation_technique in self.unique_techniques():
print(f"{mitigation_technique.name} results:")
print(self.log_technique(mitigation_technique))
print()
return print(tabulate(table, headers, tablefmt="simple_grid"))

def is_missing_data(self) -> bool:
"""Method to check if there is any missing data that was expected from
Expand Down Expand Up @@ -308,7 +296,7 @@ def get_cost(self) -> Dict[str, int]:
"ideal_executions": ideal,
}

def run(self, log: bool = False) -> None:
def run(self, log: bool = False, log_cartesian: bool = False) -> None:
"""Runs all the circuits required for calibration."""
if not self.results.is_missing_data():
self.results.reset_data()
Expand Down Expand Up @@ -342,6 +330,9 @@ def run(self, log: bool = False) -> None:
if log:
self.results.log_results()

if log_cartesian:
self.results.log_results_cartesian()

def best_strategy(self) -> Strategy:
"""Finds the best strategy by using the parameters that had the
smallest error.
Expand Down
14 changes: 14 additions & 0 deletions mitiq/calibration/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,13 @@ def to_dict(self) -> Dict[str, Any]:
def __repr__(self) -> str:
return str(self.to_dict())

def __str__(self) -> str:
result: str = ""
for key, value in self.to_dict().items():
title: str = key.replace("_", " ").capitalize()
result += f"{title}: {value}\n"
return result.rstrip()


@dataclass
class Strategy:
Expand Down Expand Up @@ -239,6 +246,13 @@ def to_pretty_dict(self) -> Dict[str, str]:
def __repr__(self) -> str:
return str(self.to_dict())

def __str__(self) -> str:
result: str = ""
for key, value in self.to_pretty_dict().items():
title: str = key.replace("_", " ").capitalize()
result += f"{title}: {value}\n"
return result.rstrip()

def num_circuits_required(self) -> int:
summary = self.to_dict()
if self.technique is MitigationTechnique.ZNE:
Expand Down
42 changes: 37 additions & 5 deletions mitiq/calibration/tests/test_calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# LICENSE file in the root directory of this source tree.

"""Tests for the Clifford data regression top-level API."""
import re
from functools import partial

import cirq
Expand Down Expand Up @@ -378,11 +379,42 @@ def test_logging(capfd):
)
cal.run(log=True)
captured = capfd.readouterr()
assert "ZNE results:" in captured.out
assert "PEC results:" in captured.out
assert settings.get_strategy(0).technique.name in captured.out
assert "noisy error" in captured.out
assert "mitigated error" in captured.out
for s in cal.strategies:
for line in str(s).split():
assert line in captured.out
for p in cal.problems:
for line in str(p).split():
assert line in captured.out
mcount = 0
ncount = 0
for line in captured.out.split("\n"):
if "Mitigated error: " in line:
mcount += 1
if "Noisy error: " in line:
ncount += 1
assert mcount == (len(cal.strategies) * len(cal.problems))
assert ncount == (len(cal.strategies) * len(cal.problems))


def test_logging_cartesian(capfd):
cal = Calibrator(
damping_execute, frontend="cirq", settings=light_combined_settings
)
cal.run(log_cartesian=True)
captured = capfd.readouterr()
for s in cal.strategies:
for line in str(s).split():
assert line in captured.out
for p in cal.problems:
for line in str(p).split():
assert line in captured.out
for line in captured.out.split("\n"):
if "Mitigated error: " in line:
assert len(re.findall("Mitigated error: ", line)) == len(
cal.problems
)
if "Noisy error: " in line:
assert len(re.findall("Noisy error: ", line)) == len(cal.problems)


def test_ExperimentResults_reset_data():
Expand Down
62 changes: 61 additions & 1 deletion mitiq/calibration/tests/test_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,73 @@ def test_basic_settings():
assert ghz_problem.two_qubit_gate_count == 1
assert ghz_problem.ideal_distribution == {"00": 0.5, "11": 0.5}

lines = str(ghz_problem).split("\n")
ghz_problem_dict = ghz_problem.to_dict()
for line in lines:
[title, value] = line.split(":", 1)
key = title.lower().replace(" ", "_")
value = value.strip()
assert key in ghz_problem_dict
assert value == str(ghz_problem_dict[key])

strategies = settings.make_strategies()
num_strategies = 4
assert len(strategies) == num_strategies

strategy_summary = str(strategies[0]).replace("'", '"')
strategy_summary = repr(strategies[0]).replace("'", '"')
assert isinstance(json.loads(strategy_summary), dict)

lines = str(strategies[0]).split("\n")
strategy_dict = strategies[0].to_pretty_dict()
for line in lines:
[title, value] = line.split(":")
key = title.lower().replace(" ", "_")
value = value.strip()
assert key in strategy_dict
assert value == str(strategy_dict[key])


def test_settings_pretty_dict():
settings = Settings(
benchmarks=[
{
"circuit_type": "ghz",
"num_qubits": 2,
"circuit_depth": 999,
}
],
strategies=[
{
"technique": "zne",
"scale_noise": fold_global,
"factory": RichardsonFactory([1.0, 2.0, 3.0]),
},
{
"technique": "pec",
"representation_function": (
represent_operation_with_local_depolarizing_noise
),
"is_qubit_dependent": False,
"noise_level": 0.001,
"num_samples": 200,
},
],
)
strategies = settings.make_strategies()
_dict = strategies[0].to_dict()
pretty_dict = strategies[0].to_pretty_dict()
if pretty_dict["technique"] == "ZNE":
assert pretty_dict["factory"] == _dict["factory"][:-7]
assert (
pretty_dict["scale_factors"] == str(_dict["scale_factors"])[1:-1]
)
elif pretty_dict["technique"] == "PEC":
assert pretty_dict["noise_bias"] == _dict.get("noise_bias", "N/A")
assert (
pretty_dict["representation_function"]
== _dict["representation_function"][25:]
)


def test_make_circuits_qv_circuits():
settings = Settings(
Expand Down

0 comments on commit 095741f

Please sign in to comment.