diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index d877c4c28d..229a859f09 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -1740,6 +1740,12 @@ Classical relative entropy .. autofunction:: qibo.quantum_info.classical_relative_entropy +Classical mutual information +"""""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_mutual_information + + Classical Rényi entropy """"""""""""""""""""""" @@ -1785,6 +1791,12 @@ Relative von Neumann entropy an error will be raised when using `cupy` backend. +Mutual information +"""""""""""""""""" + +.. autofunction:: qibo.quantum_info.mutual_information + + Rényi entropy """"""""""""" diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index c9c889b603..b779084287 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -30,7 +30,7 @@ def shannon_entropy(prob_dist, base: float = 2, backend=None): Defaults to ``None``. Returns: - (float): Shannon entropy :math:`H(\\mathcal{p})`. + float: Shannon entropy :math:`H(\\mathcal{p})`. """ backend = _check_backend(backend) @@ -143,6 +143,40 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen return entropy_p - relative +def classical_mutual_information( + prob_dist_joint, prob_dist_p, prob_dist_q, base: float = 2, backend=None +): + """Calculates the classical mutual information of two random variables. + + Given two random variables :math:`(X, \\, Y)`, their mutual information is given by + + .. math:: + I(X, \\, Y) \\equiv H(p(x)) + H(q(y)) - H(p(x, \\, y)) \\, , + + where :math:`p(x, \\, y)` is the joint probability distribution of :math:`(X, Y)`, + :math:`p(x)` is the marginal probability distribution of :math:`X`, + :math:`q(y)` is the marginal probability distribution of :math:`Y`, + and :math:`H(\\cdot)` is the :func:`qibo.quantum_info.entropies.shannon_entropy`. + + Args: + prob_dist_joint (ndarray): joint probability distribution :math:`p(x, \\, y)`. + prob_dist_p (ndarray): marginal probability distribution :math:`p(x)`. + prob_dist_q (ndarray): marginal probability distribution :math:`q(y)`. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + Returns: + float: Mutual information :math:`I(X, \\, Y)`. + """ + return ( + shannon_entropy(prob_dist_p, base, backend) + + shannon_entropy(prob_dist_q, base, backend) + - shannon_entropy(prob_dist_joint, base, backend) + ) + + def classical_renyi_entropy( prob_dist, alpha: Union[float, int], base: float = 2, backend=None ): @@ -580,6 +614,50 @@ def relative_von_neumann_entropy( return float(backend.np.real(entropy_state - relative)) +def mutual_information( + state, partition, base: float = 2, check_hermitian: bool = False, backend=None +): + """Calculates the mutual information of a bipartite state. + + Given a qubit ``partition`` :math:`A`, the mutual information + of state :math:`\\rho` is given by + + .. math:: + I(\\rho}) \\equiv S(\\rho_{A}) + S(\\rho_{B}) - S(\\rho) \\, , + + where :math:`B` is the remaining qubits that are not in partition :math:`A`, + and :math:`S(\\cdot)` is the :func:`qibo.quantum_info.von_neumann_entropy`. + + Args: + state (ndarray): statevector or density matrix. + partition (Union[List[int], Tuple[int]]): indices of qubits in partition :math:`A`. + base (float, optional): the base of the log. Defaults to :math:`2`. + check_hermitian (bool, optional): if ``True``, checks if ``state`` is Hermitian. + If ``False``, it assumes ``state`` is Hermitian . Defaults to ``False``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Mutual information :math:`I(\\rho)` of ``state`` :math:`\\rho`. + """ + nqubits = np.log2(len(state)) + + if not nqubits.is_integer(): + raise_error(ValueError, f"dimensions of ``state`` must be a power of 2.") + + partition_b = set(list(range(int(nqubits)))) ^ set(list(partition)) + + state_a = partial_trace(state, partition_b, backend) + state_b = partial_trace(state, partition, backend) + + return ( + von_neumann_entropy(state_a, base, check_hermitian, False, backend) + + von_neumann_entropy(state_b, base, check_hermitian, False, backend) + - von_neumann_entropy(state, base, check_hermitian, False, backend) + ) + + def renyi_entropy(state, alpha: Union[float, int], base: float = 2, backend=None): """Calculates the Rényi entropy :math:`H_{\\alpha}` of a quantum state :math:`\\rho`. diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index 84129e3c6d..a0644a17c4 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -5,11 +5,13 @@ from qibo.config import PRECISION_TOL from qibo.quantum_info.entropies import ( _matrix_power, + classical_mutual_information, classical_relative_entropy, classical_relative_renyi_entropy, classical_renyi_entropy, classical_tsallis_entropy, entanglement_entropy, + mutual_information, relative_renyi_entropy, relative_von_neumann_entropy, renyi_entropy, @@ -125,6 +127,27 @@ def test_classical_relative_entropy(backend, base, kind): backend.assert_allclose(divergence, target, atol=1e-5) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_classical_mutual_information(backend, base): + prob_p = np.random.rand(10) + prob_q = np.random.rand(10) + prob_p /= np.sum(prob_p) + prob_q /= np.sum(prob_q) + + joint_dist = np.kron(prob_p, prob_q) + joint_dist /= np.sum(joint_dist) + + prob_p = backend.cast(prob_p, dtype=prob_p.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + joint_dist = backend.cast(joint_dist, dtype=joint_dist.dtype) + + backend.assert_allclose( + classical_mutual_information(joint_dist, prob_p, prob_q, base, backend), + 0.0, + atol=1e-10, + ) + + @pytest.mark.parametrize("kind", [None, list]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) @pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) @@ -499,6 +522,25 @@ def test_relative_entropy(backend, base, check_hermitian): ) +@pytest.mark.parametrize("check_hermitian", [False, True]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_mutual_information(backend, base, check_hermitian): + with pytest.raises(ValueError): + state = np.ones((3, 3)) + state = backend.cast(state, dtype=state.dtype) + test = mutual_information(state, [0], backend) + + state_a = random_density_matrix(4, backend=backend) + state_b = random_density_matrix(4, backend=backend) + state = backend.np.kron(state_a, state_b) + + backend.assert_allclose( + mutual_information(state, [0, 1], base, check_hermitian, backend), + 0.0, + atol=1e-10, + ) + + @pytest.mark.parametrize("base", [2, 10, np.e, 5]) @pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) def test_renyi_entropy(backend, alpha, base):