Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add mutual_information and classical_mutual_information to quantum_info.entropies #1455

Merged
merged 11 commits into from
Sep 20, 2024
12 changes: 12 additions & 0 deletions doc/source/api-reference/qibo.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1740,6 +1740,12 @@ Classical relative entropy
.. autofunction:: qibo.quantum_info.classical_relative_entropy


Classical mutual information
""""""""""""""""""""""""""""

.. autofunction:: qibo.quantum_info.classical_mutual_information


Classical Rényi entropy
"""""""""""""""""""""""

Expand Down Expand Up @@ -1785,6 +1791,12 @@ Relative von Neumann entropy
an error will be raised when using `cupy` backend.


Mutual information
""""""""""""""""""

.. autofunction:: qibo.quantum_info.mutual_information


Rényi entropy
"""""""""""""

Expand Down
80 changes: 79 additions & 1 deletion src/qibo/quantum_info/entropies.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def shannon_entropy(prob_dist, base: float = 2, backend=None):
Defaults to ``None``.

Returns:
(float): Shannon entropy :math:`H(\\mathcal{p})`.
float: Shannon entropy :math:`H(\\mathcal{p})`.
"""
backend = _check_backend(backend)

Expand Down Expand Up @@ -143,6 +143,40 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen
return entropy_p - relative


def classical_mutual_information(
prob_dist_joint, prob_dist_p, prob_dist_q, base: float = 2, backend=None
):
"""Calculates the classical mutual information of two random variables.

Given two random variables :math:`(X, \\, Y)`, their mutual information is given by

.. math::
I(X, \\, Y) \\equiv H(p(x)) + H(q(y)) - H(p(x, \\, y)) \\, ,

where :math:`p(x, \\, y)` is the joint probability distribution of :math:`(X, Y)`,
:math:`p(x)` is the marginal probability distribution of :math:`X`,
:math:`q(y)` is the marginal probability distribution of :math:`Y`,
and :math:`H(\\cdot)` is the :func:`qibo.quantum_info.entropies.shannon_entropy`.

Args:
prob_dist_joint (ndarray): joint probability distribution :math:`p(x, \\, y)`.
prob_dist_p (ndarray): marginal probability distribution :math:`p(x)`.
prob_dist_q (ndarray): marginal probability distribution :math:`q(y)`.
base (float): the base of the log. Defaults to :math:`2`.
backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used
in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`.
Defaults to ``None``.

Returns:
float: Mutual information :math:`I(X, \\, Y)`.
"""
return (
shannon_entropy(prob_dist_p, base, backend)
+ shannon_entropy(prob_dist_q, base, backend)
- shannon_entropy(prob_dist_joint, base, backend)
)


def classical_renyi_entropy(
prob_dist, alpha: Union[float, int], base: float = 2, backend=None
):
Expand Down Expand Up @@ -580,6 +614,50 @@ def relative_von_neumann_entropy(
return float(backend.np.real(entropy_state - relative))


def mutual_information(
state, partition, base: float = 2, check_hermitian: bool = False, backend=None
):
"""Calculates the mutual information of a bipartite state.

Given a qubit ``partition`` :math:`A`, the mutual information
of state :math:`\\rho` is given by

.. math::
I(\\rho}) \\equiv S(\\rho_{A}) + S(\\rho_{B}) - S(\\rho) \\, ,

where :math:`B` is the remaining qubits that are not in partition :math:`A`,
and :math:`S(\\cdot)` is the :func:`qibo.quantum_info.von_neumann_entropy`.

Args:
state (ndarray): statevector or density matrix.
partition (Union[List[int], Tuple[int]]): indices of qubits in partition :math:`A`.
base (float, optional): the base of the log. Defaults to :math:`2`.
check_hermitian (bool, optional): if ``True``, checks if ``state`` is Hermitian.
If ``False``, it assumes ``state`` is Hermitian . Defaults to ``False``.
backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used
in the execution. If ``None``, it uses
:class:`qibo.backends.GlobalBackend`. Defaults to ``None``.

Returns:
float: Mutual information :math:`I(\\rho)` of ``state`` :math:`\\rho`.
"""
nqubits = np.log2(len(state))

if not nqubits.is_integer():
raise_error(ValueError, f"dimensions of ``state`` must be a power of 2.")
renatomello marked this conversation as resolved.
Show resolved Hide resolved

partition_b = set(list(range(int(nqubits)))) ^ set(list(partition))

state_a = partial_trace(state, partition_b, backend)
state_b = partial_trace(state, partition, backend)

return (
von_neumann_entropy(state_a, base, check_hermitian, False, backend)
+ von_neumann_entropy(state_b, base, check_hermitian, False, backend)
- von_neumann_entropy(state, base, check_hermitian, False, backend)
)


def renyi_entropy(state, alpha: Union[float, int], base: float = 2, backend=None):
"""Calculates the Rényi entropy :math:`H_{\\alpha}` of a quantum state :math:`\\rho`.

Expand Down
37 changes: 37 additions & 0 deletions tests/test_quantum_info_entropies.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
from qibo.config import PRECISION_TOL
from qibo.quantum_info.entropies import (
_matrix_power,
classical_mutual_information,
classical_relative_entropy,
classical_relative_renyi_entropy,
classical_renyi_entropy,
classical_tsallis_entropy,
entanglement_entropy,
mutual_information,
relative_renyi_entropy,
relative_von_neumann_entropy,
renyi_entropy,
Expand Down Expand Up @@ -125,6 +127,27 @@ def test_classical_relative_entropy(backend, base, kind):
backend.assert_allclose(divergence, target, atol=1e-5)


@pytest.mark.parametrize("base", [2, 10, np.e, 5])
def test_classical_mutual_information(backend, base):
prob_p = np.random.rand(10)
prob_q = np.random.rand(10)
prob_p /= np.sum(prob_p)
prob_q /= np.sum(prob_q)

joint_dist = np.kron(prob_p, prob_q)
joint_dist /= np.sum(joint_dist)

prob_p = backend.cast(prob_p, dtype=prob_p.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
joint_dist = backend.cast(joint_dist, dtype=joint_dist.dtype)

backend.assert_allclose(
classical_mutual_information(joint_dist, prob_p, prob_q, base, backend),
0.0,
atol=1e-10,
)


@pytest.mark.parametrize("kind", [None, list])
@pytest.mark.parametrize("base", [2, 10, np.e, 5])
@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf])
Expand Down Expand Up @@ -499,6 +522,20 @@ def test_relative_entropy(backend, base, check_hermitian):
)


@pytest.mark.parametrize("check_hermitian", [False, True])
@pytest.mark.parametrize("base", [2, 10, np.e, 5])
def test_mutual_information(backend, base, check_hermitian):
state_a = random_density_matrix(4, backend=backend)
state_b = random_density_matrix(4, backend=backend)
state = backend.np.kron(state_a, state_b)

backend.assert_allclose(
mutual_information(state, [0, 1], base, check_hermitian, backend),
0.0,
atol=1e-10,
)


@pytest.mark.parametrize("base", [2, 10, np.e, 5])
@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf])
def test_renyi_entropy(backend, alpha, base):
Expand Down