diff --git a/mitiq/rem/inverse_confusion_matrix.py b/mitiq/rem/inverse_confusion_matrix.py index ac52910fd1..96e7271558 100644 --- a/mitiq/rem/inverse_confusion_matrix.py +++ b/mitiq/rem/inverse_confusion_matrix.py @@ -4,7 +4,7 @@ # LICENSE file in the root directory of this source tree. from functools import reduce -from typing import List, Sequence +from typing import Sequence import numpy as np import numpy.typing as npt @@ -14,38 +14,35 @@ def sample_probability_vector( - probability_vector: npt.NDArray[np.float64], samples: int -) -> List[Bitstring]: + probability_vector: Sequence[float], samples: int +) -> list[str]: """Generate a number of samples from a probability distribution as bitstrings. Args: probability_vector: A probability vector. + samples: The number of samples to generate. Returns: A list of sampled bitstrings. + + Example: + >>> sample_probability_vector([0, 1/2, 1/4, 1/4], 4) + ['01', '10', '11', '11'] """ - # sample using the probability distribution given num_values = len(probability_vector) - choices = np.random.choice(num_values, size=samples, p=probability_vector) - - # convert samples to binary strings - bit_width = int(np.log2(num_values)) - binary_repr_vec = np.vectorize(np.binary_repr) - binary_strings = binary_repr_vec(choices, width=bit_width) - - # split the binary strings into an array of ints - bitstrings = ( - np.apply_along_axis( # type: ignore - func1d=np.fromstring, # type: ignore - axis=1, - arr=binary_strings[:, None], - dtype="U1", # type: ignore + if not np.log2(num_values).is_integer(): + raise ValueError( + "The length of the probability vector must be a power of 2." ) - .astype(np.uint8) - .tolist() + + sampled_indices = np.random.choice( + num_values, size=samples, p=probability_vector ) + bit_width = int(np.log2(num_values)) + bitstrings = [format(index, f"0{bit_width}b") for index in sampled_indices] + return bitstrings @@ -60,7 +57,7 @@ def bitstrings_to_probability_vector( bitstrings: All measured bitstrings. Returns: - A probabiity vector corresponding to the measured bitstrings. + A probability vector corresponding to the measured bitstrings. """ pv = np.zeros(2 ** len(bitstrings[0])) for bs in bitstrings: @@ -100,7 +97,7 @@ def generate_inverse_confusion_matrix( def generate_tensored_inverse_confusion_matrix( - num_qubits: int, confusion_matrices: List[npt.NDArray[np.float64]] + num_qubits: int, confusion_matrices: list[npt.NDArray[np.float64]] ) -> npt.NDArray[np.float64]: """ Generates the inverse confusion matrix utilizing the supplied @@ -132,7 +129,7 @@ def generate_tensored_inverse_confusion_matrix( def closest_positive_distribution( quasi_probabilities: npt.NDArray[np.float64], -) -> npt.NDArray[np.float64]: +) -> list[float]: """Given the input quasi-probability distribution returns the closest positive probability distribution (with respect to the total variation distance). @@ -163,7 +160,7 @@ def distance(probabilities: npt.NDArray[np.float64]) -> np.float64: raise ValueError( "REM failed to determine the closest positive distribution." ) - return result.x + return result.x.tolist() def mitigate_measurements( diff --git a/mitiq/rem/tests/test_inverse_confusion_matrix.py b/mitiq/rem/tests/test_inverse_confusion_matrix.py index bd3371c468..7e982469f6 100644 --- a/mitiq/rem/tests/test_inverse_confusion_matrix.py +++ b/mitiq/rem/tests/test_inverse_confusion_matrix.py @@ -22,29 +22,35 @@ ) +def test_sample_probability_vector_invalid_size(): + with pytest.raises(ValueError, match="power of 2"): + sample_probability_vector([1 / 3, 1 / 3, 1 / 3], 3) + + def test_sample_probability_vector_single_qubit(): bitstrings = sample_probability_vector(np.array([1, 0]), 10) - assert all([b == [0] for b in bitstrings]) + assert all(b == "0" for b in bitstrings) bitstrings = sample_probability_vector(np.array([0, 1]), 10) - assert all([b == [1] for b in bitstrings]) + assert all(b == "1" for b in bitstrings) + np.random.seed(0) bitstrings = sample_probability_vector(np.array([0.5, 0.5]), 1000) - assert isclose(sum([b[0] for b in bitstrings]), 500, rel_tol=0.1) + assert sum(int(b) for b in bitstrings) == 483 def test_sample_probability_vector_two_qubits(): bitstrings = sample_probability_vector(np.array([1, 0, 0, 0]), 10) - assert all([b == [0, 0] for b in bitstrings]) + assert all(b == "00" for b in bitstrings) bitstrings = sample_probability_vector(np.array([0, 1, 0, 0]), 10) - assert all([b == [0, 1] for b in bitstrings]) + assert all(b == "01" for b in bitstrings) bitstrings = sample_probability_vector(np.array([0, 0, 1, 0]), 10) - assert all([b == [1, 0] for b in bitstrings]) + assert all(b == "10" for b in bitstrings) bitstrings = sample_probability_vector(np.array([0, 0, 0, 1]), 10) - assert all([b == [1, 1] for b in bitstrings]) + assert all(b == "11" for b in bitstrings) def test_bitstrings_to_probability_vector(): @@ -64,20 +70,20 @@ def test_bitstrings_to_probability_vector(): assert (pv == np.array([0, 0, 0, 1])).all() -def test_probability_vector_roundtrip(): - for _ in range(10): - pv = np.random.rand(4) - pv /= np.sum(pv) - assert isclose( - np.linalg.norm( - pv - - bitstrings_to_probability_vector( - sample_probability_vector(pv, 1000) - ) - ), - 0, - abs_tol=0.1, - ) +@pytest.mark.parametrize("_", range(10)) +def test_probability_vector_roundtrip(_): + pv = np.random.rand(4) + pv /= np.sum(pv) + assert isclose( + np.linalg.norm( + pv + - bitstrings_to_probability_vector( + sample_probability_vector(pv, 1000) + ) + ), + 0, + abs_tol=0.1, + ) def test_generate_inverse_confusion_matrix(): @@ -137,12 +143,12 @@ def test_generate_tensored_inverse_confusion_matrix( num_qubits, confusion_matrices ) else: - assert np.isclose( + assert np.allclose( generate_tensored_inverse_confusion_matrix( num_qubits, confusion_matrices ), expected, - ).all() + ) def test_mitigate_measurements():