From 68b5c56ac99f31e15f8374be15ceab5f1d658d2a Mon Sep 17 00:00:00 2001 From: Jake Lishman Date: Tue, 8 Oct 2024 18:41:18 +0100 Subject: [PATCH] Implement arithmetic on `SparseObservable` The simple arithmetic operators of addition, subtraction, multiplication, division and tensor product are in line with other `quantum_info` objects, including coercion from other objects (though better behaved on failed coercions, in many cases). Where appopriate, in-place operations will re-use the existing allocations. The tensor product cannot really be done in place, so it doesn't define a special `__rxor__` operator, but it inherits the standard Python behaviour of this being derived from `__xor__`. There are further mathematical operations to add around composition and evolution, and to apply a `TranspileLayout` to the observable. All of these, in the `quantum_info` style either directly deal with a layout, or take a `qargs` argument that is effectively a sublayout for the right-hand side of the operation. These will be added in a follow-up. --- crates/accelerate/src/sparse_observable.rs | 887 +++++++++++++++++- .../quantum_info/test_sparse_observable.py | 635 ++++++++++++- 2 files changed, 1478 insertions(+), 44 deletions(-) diff --git a/crates/accelerate/src/sparse_observable.rs b/crates/accelerate/src/sparse_observable.rs index e452f19b3235..4533504d2324 100644 --- a/crates/accelerate/src/sparse_observable.rs +++ b/crates/accelerate/src/sparse_observable.rs @@ -13,14 +13,14 @@ use std::collections::btree_map; use num_complex::Complex64; +use num_traits::Zero; use thiserror::Error; use numpy::{ PyArray1, PyArrayDescr, PyArrayDescrMethods, PyArrayLike1, PyReadonlyArray1, PyReadonlyArray2, PyUntypedArrayMethods, }; - -use pyo3::exceptions::{PyTypeError, PyValueError}; +use pyo3::exceptions::{PyTypeError, PyValueError, PyZeroDivisionError}; use pyo3::intern; use pyo3::prelude::*; use pyo3::sync::GILOnceCell; @@ -67,7 +67,7 @@ static SPARSE_PAULI_OP_TYPE: ImportOnceCell = /// return `PyArray1` at Python-space boundaries) so that it's clear when we're doing /// the transmute, and we have to be explicit about the safety of that. #[repr(u8)] -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] pub enum BitTerm { /// Pauli X operator. X = 0b00_10, @@ -367,8 +367,8 @@ impl From for PyErr { /// The allowed alphabet forms an overcomplete basis of the operator space. This means that there /// is not a unique summation to represent a given observable. By comparison, /// :class:`.SparsePauliOp` uses a precise basis of the operator space, so (after combining terms of -/// the same Pauli string, removing zeros, and sorting the terms to some canonical order) there is -/// only one representation of any operator. +/// the same Pauli string, removing zeros, and sorting the terms to :ref:`some canonical order +/// `) there is only one representation of any operator. /// /// :class:`SparseObservable` uses its particular overcomplete basis with the aim of making /// "efficiency of measurement" equivalent to "efficiency of representation". For example, the @@ -537,6 +537,44 @@ impl From for PyErr { /// >>> obs.bit_terms[:] = obs.bit_terms[:] & 0b00_11 /// >>> assert obs == SparseObservable.from_list([("XZY", 1.5j), ("XZY", -0.5)]) /// +/// .. _sparse-observable-canonical-order: +/// +/// Canonical ordering +/// ------------------ +/// +/// For any given mathematical observable, there are several ways of representing it with +/// :class:`SparseObservable`. For example, the same set of single-bit terms and their +/// corresponding indices might appear multiple times in the observable. Mathematically, this is +/// equivalent to having only a single term with all the coefficients summed. Similarly, the terms +/// of the sum in a :class:`SparseObservable` can be in any order while representing the same +/// observable, since addition is commutative (although while floating-point addition is not +/// associative, :class:`SparseObservable` makes no guarantees about the summation order). +/// +/// These two categories of representation degeneracy can cause the ``==`` operator to claim that +/// two observables are not equal, despite representating the same object. In these cases, it can +/// be convenient to define some *canonical form*, which allows observables to be compared +/// structurally. +/// +/// You can put a :class:`SparseObservable` in canonical form by using the :meth:`simplify` method. +/// The precise ordering of terms in canonical ordering is not specified, and may change between +/// versions of Qiskit. Within the same version of Qiskit, however, you can compare two observables +/// structurally by comparing their simplified forms. +/// +/// .. note:: +/// +/// If you wish to account for floating-point tolerance in the comparison, it is safest to use +/// a recipe such as:: +/// +/// def equivalent(left, right, tol): +/// return (left - right).simplify(tol) == SparseObservable.zero(left.num_qubits) +/// +/// .. note:: +/// +/// The canonical form produced by :meth:`simplify` will still not universally detect all +/// observables that are equivalent due to the over-complete basis alphabet; it is not +/// computationally feasible to do this at scale. For example, on observable built from ``+`` +/// and ``-`` components will not canonicalize to a single ``X`` term. +/// /// /// Construction /// ============ @@ -599,6 +637,58 @@ impl From for PyErr { /// /// :meth:`identity` The identity operator on a given number of qubits. /// ============================ ================================================================ +/// +/// +/// Mathematical manipulation +/// ========================= +/// +/// :class:`SparseObservable` supports the standard set of Python mathematical operators like other +/// :mod:`~qiskit.quantum_info` operators. +/// +/// In basic arithmetic, you can: +/// +/// * add two observables using ``+`` +/// * subtract two observables using ``-`` +/// * multiply or divide by an :class:`int`, :class:`float` or :class:`complex` using ``*`` and ``/`` +/// * negate all the coefficients in an observable with unary ``-`` +/// +/// Each of the basic binary arithmetic operators has a corresponding specialized in-place method, +/// which mutates the left-hand side in-place. Using these is typically more efficient than the +/// infix operators, especially for building an observable in a loop. +/// +/// The tensor product is calculated with :meth:`tensor` (for standard, juxtaposition ordering of +/// Pauli labels) or :meth:`expand` (for the reverse order). The ``^`` operator is overloaded to be +/// equivalent to :meth:`tensor`. +/// +/// .. note:: +/// +/// When using the binary operators ``^`` (:meth:`tensor`) and ``&`` (:meth:`compose`), beware +/// that `Python's operator-precedence rules +/// `__ may cause the +/// evaluation order to be different to your expectation. In particular, the operator ``+`` +/// binds more tightly than ``^`` or ``&``, just like ``*`` binds more tightly than ``+``. +/// +/// When using the operators in mixed expressions, it is safest to use parentheses to group the +/// operands of tensor products. +/// +/// A :class:`SparseObservable` has a well-defined :meth:`adjoint`. The notions of scalar complex +/// conjugation (:meth:`conjugate`) and real-value transposition (:meth:`transpose`) are defined +/// analogously to the matrix representation of other Pauli operators in Qiskit. +/// +/// +/// Efficiency notes +/// ---------------- +/// +/// Internally, :class:`SparseObservable` is in-place mutable, including using over-allocating +/// growable vectors for extending the number of terms. This means that the cost of appending to an +/// observable using ``+=`` is amortised linear in the total number of terms added, rather than +/// the quadratic complexity that the binary ``+`` would require. +/// +/// Additions and subtractions are implemented by a term-stacking operation; there is no automatic +/// "simplification" (summing of like terms), because the majority of additions to build up an +/// observable generate only a small number of duplications, and like-term detection has additional +/// costs. If this does not fit your use cases, you can either periodically call :meth:`simplify`, +/// or discuss further APIs with us for better building of observables. #[pyclass(module = "qiskit.quantum_info")] #[derive(Clone, Debug, PartialEq)] pub struct SparseObservable { @@ -699,6 +789,23 @@ impl SparseObservable { } } + /// Create a zero operator with pre-allocated space for the given number of summands and + /// single-qubit bit terms. + #[inline] + pub fn with_capacity(num_qubits: u32, num_terms: usize, num_bit_terms: usize) -> Self { + Self { + num_qubits, + coeffs: Vec::with_capacity(num_terms), + bit_terms: Vec::with_capacity(num_bit_terms), + indices: Vec::with_capacity(num_bit_terms), + boundaries: { + let mut boundaries = Vec::with_capacity(num_terms + 1); + boundaries.push(0); + boundaries + }, + } + } + /// Get an iterator over the individual terms of the operator. pub fn iter(&'_ self) -> impl ExactSizeIterator> + '_ { self.coeffs.iter().enumerate().map(|(i, coeff)| { @@ -712,6 +819,82 @@ impl SparseObservable { }) } + /// Get an iterator over the individual terms of the operator that allows mutation of each term + /// in place without affecting its length or indices, both of which would allow breaking data + /// coherence. + pub fn iter_mut(&mut self) -> IterMut<'_> { + self.into() + } + + /// Reduce the observable to its canonical form. + /// + /// This sums like terms, removing them if the final complex coefficient's absolute value is + /// less than or equal to the tolerance. The terms are reordered to some canonical ordering. + /// + /// This function is idempotent. + pub fn canonicalize(&self, tol: f64) -> SparseObservable { + let mut terms = btree_map::BTreeMap::new(); + for term in self.iter() { + terms + .entry((term.indices, term.bit_terms)) + .and_modify(|c| *c += term.coeff) + .or_insert(term.coeff); + } + let mut out = SparseObservable::zero(self.num_qubits); + for ((indices, bit_terms), coeff) in terms { + if coeff.norm_sqr() <= tol * tol { + continue; + } + out.coeffs.push(coeff); + out.bit_terms.extend_from_slice(bit_terms); + out.indices.extend_from_slice(indices); + out.boundaries.push(out.indices.len()); + } + out + } + + /// Tensor product of `self` with `other`. + /// + /// The bit ordering is defined such that the qubit indices of `other` will remain the same, and + /// the indices of `self` will be offset by the number of qubits in `other`. This is the same + /// convention as used by the rest of Qiskit's `quantum_info` operators. + /// + /// Put another way, in the simplest case of two observables formed of dense labels: + /// + /// ``` + /// let mut left = SparseObservable::zero(5); + /// left.add_dense_label("IXY+Z", Complex64::new(1.0, 0.0)); + /// let mut right = SparseObservable::zero(6); + /// right.add_dense_label("IIrl01", Complex64::new(1.0, 0.0)); + /// + /// // The result is the concatenation of the two labels. + /// let mut out = SparseObservable::zero(11); + /// out.add_dense_label("IXY+ZIIrl01", Complex64::new(1.0, 0.0)); + /// + /// assert_eq!(left.tensor(right), out); + /// ``` + pub fn tensor(&self, other: &SparseObservable) -> SparseObservable { + let mut out = SparseObservable::with_capacity( + self.num_qubits + other.num_qubits, + self.coeffs.len() * other.coeffs.len(), + other.coeffs.len() * self.bit_terms.len() + self.coeffs.len() * other.bit_terms.len(), + ); + let mut self_indices = Vec::new(); + for self_term in self.iter() { + self_indices.clear(); + self_indices.extend(self_term.indices.iter().map(|i| i + other.num_qubits)); + for other_term in other.iter() { + out.coeffs.push(self_term.coeff * other_term.coeff); + out.indices.extend_from_slice(other_term.indices); + out.indices.extend_from_slice(&self_indices); + out.bit_terms.extend_from_slice(other_term.bit_terms); + out.bit_terms.extend_from_slice(self_term.bit_terms); + out.boundaries.push(out.bit_terms.len()); + } + } + out + } + /// Add the term implied by a dense string label onto this observable. pub fn add_dense_label>( &mut self, @@ -747,13 +930,25 @@ impl SparseObservable { self.boundaries.push(self.bit_terms.len()); Ok(()) } + + /// Return a suitable Python error if two observables do not have equal numbers of qubits. + fn check_equal_qubits(&self, other: &SparseObservable) -> PyResult<()> { + if self.num_qubits != other.num_qubits { + Err(PyValueError::new_err(format!( + "incompatible numbers of qubits: {} and {}", + self.num_qubits, other.num_qubits + ))) + } else { + Ok(()) + } + } } #[pymethods] impl SparseObservable { #[pyo3(signature = (data, /, num_qubits=None))] #[new] - fn py_new(data: Bound, num_qubits: Option) -> PyResult { + fn py_new(data: &Bound, num_qubits: Option) -> PyResult { let py = data.py(); let check_num_qubits = |data: &Bound| -> PyResult<()> { let Some(num_qubits) = num_qubits else { @@ -769,11 +964,11 @@ impl SparseObservable { }; if data.is_instance(PAULI_TYPE.get_bound(py))? { - check_num_qubits(&data)?; + check_num_qubits(data)?; return Self::py_from_pauli(data); } if data.is_instance(SPARSE_PAULI_OP_TYPE.get_bound(py))? { - check_num_qubits(&data)?; + check_num_qubits(data)?; return Self::py_from_sparse_pauli_op(data); } if let Ok(label) = data.extract::() { @@ -788,7 +983,7 @@ impl SparseObservable { return Self::py_from_label(&label).map_err(PyErr::from); } if let Ok(observable) = data.downcast::() { - check_num_qubits(&data)?; + check_num_qubits(data)?; return Ok(observable.borrow().clone()); } // The type of `vec` is inferred from the subsequent calls to `Self::py_from_list` or @@ -907,13 +1102,7 @@ impl SparseObservable { #[pyo3(signature = (/, num_qubits))] #[staticmethod] pub fn zero(num_qubits: u32) -> Self { - Self { - num_qubits, - coeffs: vec![], - bit_terms: vec![], - indices: vec![], - boundaries: vec![0], - } + Self::with_capacity(num_qubits, 0, 0) } /// Get the identity operator over the given number of qubits. @@ -936,6 +1125,25 @@ impl SparseObservable { } } + /// Clear all the terms from this operator, making it equal to the zero operator again. + /// + /// This does not change the capacity of the internal allocations, so subsequent addition or + /// substraction operations may not need to reallocate. + /// + /// Examples: + /// + /// .. code-block:: python + /// + /// >>> obs = SparseObservable.from_list([("IX+-rl", 2.0), ("01YZII", -1j)]) + /// >>> obs.clear() + /// >>> assert obs == SparseObservable.zero(obs.num_qubits) + pub fn clear(&mut self) { + self.coeffs.clear(); + self.bit_terms.clear(); + self.indices.clear(); + self.boundaries.truncate(1); + } + fn __repr__(&self) -> String { let num_terms = format!( "{} term{}", @@ -998,6 +1206,133 @@ impl SparseObservable { slf.borrow().eq(&other.borrow()) } + fn __add__(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Ok(py.NotImplemented()); + }; + let other = other.borrow(); + self.check_equal_qubits(&other)?; + Ok((self + &other).into_py(py)) + } + fn __radd__(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Ok(py.NotImplemented()); + }; + let other = other.borrow(); + self.check_equal_qubits(&other)?; + Ok((<&SparseObservable as ::std::ops::Add>::add(&other, self)).into_py(py)) + } + fn __iadd__(slf_: Bound, other: &Bound) -> PyResult<()> { + if slf_.is(other) { + *slf_.borrow_mut() *= Complex64::new(2.0, 0.0); + } else { + let mut slf_ = slf_.borrow_mut(); + let Some(other) = coerce_to_observable(other)? else { + // This is not well behaved - we _should_ return `NotImplemented` to Python space + // without an exception, but limitations in PyO3 prevent this at the moment. See + // https://github.com/PyO3/pyo3/issues/4605. + return Err(PyTypeError::new_err(format!( + "invalid object for in-place addition of 'SparseObservable': {}", + other.repr()? + ))); + }; + let other = other.borrow(); + slf_.check_equal_qubits(&other)?; + *slf_ += &other; + } + Ok(()) + } + + fn __sub__(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Ok(py.NotImplemented()); + }; + let other = other.borrow(); + self.check_equal_qubits(&other)?; + Ok((self - &other).into_py(py)) + } + fn __rsub__(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Ok(py.NotImplemented()); + }; + let other = other.borrow(); + self.check_equal_qubits(&other)?; + Ok((<&SparseObservable as ::std::ops::Sub>::sub(&other, self)).into_py(py)) + } + fn __isub__(slf_: Bound, other: &Bound) -> PyResult<()> { + if slf_.is(other) { + // This is not strictly the same thing as `a - a` if `a` contains non-finite + // floating-point values (`inf - inf` is `NaN`, for example); we don't really have a + // clear view on what floating-point guarantees we're going to make right now. + slf_.borrow_mut().clear() + } else { + let mut slf_ = slf_.borrow_mut(); + let Some(other) = coerce_to_observable(other)? else { + // This is not well behaved - we _should_ return `NotImplemented` to Python space + // without an exception, but limitations in PyO3 prevent this at the moment. See + // https://github.com/PyO3/pyo3/issues/4605. + return Err(PyTypeError::new_err(format!( + "invalid object for in-place subtraction of 'SparseObservable': {}", + other.repr()? + ))); + }; + let other = other.borrow(); + slf_.check_equal_qubits(&other)?; + *slf_ -= &other; + } + Ok(()) + } + + fn __pos__(&self) -> SparseObservable { + self.clone() + } + fn __neg__(&self) -> SparseObservable { + -self + } + + fn __mul__(&self, other: Complex64) -> SparseObservable { + self * other + } + fn __rmul__(&self, other: Complex64) -> SparseObservable { + other * self + } + fn __imul__(&mut self, other: Complex64) { + *self *= other; + } + + fn __truediv__(&self, other: Complex64) -> PyResult { + if other.is_zero() { + return Err(PyZeroDivisionError::new_err("complex division by zero")); + } + Ok(self / other) + } + fn __itruediv__(&mut self, other: Complex64) -> PyResult<()> { + if other.is_zero() { + return Err(PyZeroDivisionError::new_err("complex division by zero")); + } + *self /= other; + Ok(()) + } + + fn __xor__(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Ok(py.NotImplemented()); + }; + Ok(self.tensor(&other.borrow()).into_py(py)) + } + fn __rxor__(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Ok(py.NotImplemented()); + }; + Ok(other.borrow().tensor(self).into_py(py)) + } + // This doesn't actually have any interaction with Python space, but uses the `py_` prefix on // its name to make it clear it's different to the Rust concept of `Copy`. /// Get a copy of this observable. @@ -1116,14 +1451,7 @@ impl SparseObservable { Some(num_qubits) => num_qubits, None => iter[0].0.len() as u32, }; - let mut out = Self { - num_qubits, - coeffs: Vec::with_capacity(iter.len()), - bit_terms: Vec::new(), - indices: Vec::new(), - boundaries: Vec::with_capacity(iter.len() + 1), - }; - out.boundaries.push(0); + let mut out = Self::with_capacity(num_qubits, iter.len(), 0); for (label, coeff) in iter { out.add_dense_label(&label, coeff)?; } @@ -1244,7 +1572,7 @@ impl SparseObservable { /// >>> assert SparseObservable.from_label(label) == SparseObservable.from_pauli(pauli) #[staticmethod] #[pyo3(name = "from_pauli", signature = (pauli, /))] - fn py_from_pauli(pauli: Bound) -> PyResult { + fn py_from_pauli(pauli: &Bound) -> PyResult { let py = pauli.py(); let num_qubits = pauli.getattr(intern!(py, "num_qubits"))?.extract::()?; let z = pauli @@ -1311,7 +1639,7 @@ impl SparseObservable { /// #[staticmethod] #[pyo3(name = "from_sparse_pauli_op", signature = (op, /))] - fn py_from_sparse_pauli_op(op: Bound) -> PyResult { + fn py_from_sparse_pauli_op(op: &Bound) -> PyResult { let py = op.py(); let pauli_list_ob = op.getattr(intern!(py, "paulis"))?; let coeffs = op @@ -1454,11 +1782,411 @@ impl SparseObservable { Ok(unsafe { Self::new_unchecked(num_qubits, coeffs, bit_terms, indices, boundaries) }) } } + + /// Sum any like terms in this operator, removing them if the resulting complex coefficient has + /// an absolute value within tolerance of zero. + /// + /// As a side effect, this sorts the operator into :ref:`canonical order + /// `. + /// + /// .. note:: + /// + /// When using this for equality comparisons, note that floating-point rounding and the + /// non-associativity fo floating-point addition may cause non-zero coefficients of summed + /// terms to compare unequal. To compare two observables up to a tolerance, it is safest to + /// compare the canonicalized difference of the two observables to zero. + /// + /// Args: + /// tol (float): after summing like terms, any coefficients whose absolute value is less + /// than the given absolute tolerance will be suppressed from the output. + /// + /// Examples: + /// + /// Using :meth:`simplify` to compare two operators that represent the same observable, but + /// would compare unequal due to the structural tests by default:: + /// + /// >>> base = SparseObservable.from_sparse_list([ + /// ... ("XZ", (2, 1), 1e-10), # value too small + /// ... ("+-", (3, 1), 2j), + /// ... ("+-", (3, 1), 2j), # can be combined with the above + /// ... ("01", (3, 1), 0.5), # out of order compared to `expected` + /// ... ], num_qubits=5) + /// >>> expected = SparseObservable.from_list([("I0I1I", 0.5), ("I+I-I", 4j)]) + /// >>> assert base != expected # non-canonical comparison + /// >>> assert base.simplify() == expected.simplify() + /// + /// Note that in the above example, the coefficients are chosen such that all floating-point + /// calculations are exact, and there are no intermediate rounding or associativity + /// concerns. If this cannot be guaranteed to be the case, the safer form is:: + /// + /// >>> left = SparseObservable.from_list([("XYZ", 1.0/3.0)] * 3) # sums to 1.0 + /// >>> right = SparseObservable.from_list([("XYZ", 1.0/7.0)] * 7) # doesn't sum to 1.0 + /// >>> assert left.simplify() != right.simplify() + /// >>> assert (left - right).simplify() == SparseObservable.zero(left.num_qubits) + #[pyo3( + signature = (/, tol=1e-8), + name = "simplify", + )] + fn py_simplify(&self, tol: f64) -> SparseObservable { + self.canonicalize(tol) + } + + /// Tensor product of two observables. + /// + /// The bit ordering is defined such that the qubit indices of the argument will remain the + /// same, and the indices of ``self`` will be offset by the number of qubits in ``other``. This + /// is the same convention as used by the rest of Qiskit's :mod:`~qiskit.quantum_info` + /// operators. + /// + /// This function is used for the infix ``^`` operator. If using this operator, beware that + /// `Python's operator-precedence rules + /// `__ may cause the + /// evaluation order to be different to your expectation. In particular, the operator ``+`` + /// binds more tightly than ``^``, just like ``*`` binds more tightly than ``+``. Use + /// parentheses to fix the evaluation order, if needed. + /// + /// The argument will be cast to :class:`SparseObservable` using its default constructor, if it + /// is not already in the correct form. + /// + /// Args: + /// + /// other: the observable to put on the right-hand side of the tensor product. + /// + /// Examples: + /// + /// The bit ordering of this is such that the tensor product of two observables made from a + /// single label "looks like" an observable made by concatenating the two strings:: + /// + /// >>> left = SparseObservable.from_label("XYZ") + /// >>> right = SparseObservable.from_label("+-IIrl") + /// >>> assert left.tensor(right) == SparseObservable.from_label("XYZ+-IIrl") + /// + /// You can also use the infix ``^`` operator for tensor products, which will similarly cast + /// the right-hand side of the operation if it is not already a :class:`SparseObservable`:: + /// + /// >>> assert SparseObservable("rl") ^ Pauli("XYZ") == SparseObservable("rlXYZ") + /// + /// See also: + /// :meth:`expand` + /// + /// The same function, but with the order of arguments flipped. This can be useful if + /// you like using the casting behavior for the argument, but you want your existing + /// :class:`SparseObservable` to be on the right-hand side of the tensor ordering. + #[pyo3(signature = (other, /), name = "tensor")] + fn py_tensor(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Err(PyTypeError::new_err(format!( + "unknown type for tensor: {}", + other.get_type().repr()? + ))); + }; + Ok(self.tensor(&other.borrow()).into_py(py)) + } + + /// Reverse-order tensor product. + /// + /// This is equivalent to ``other.tensor(self)``, except that ``other`` will first be type-cast + /// to :class:`SparseObservable` if it isn't already one (by calling the default constructor). + /// + /// Args: + /// + /// other: the observable to put on the left-hand side of the tensor product. + /// + /// Examples: + /// + /// This is equivalent to :meth:`tensor` with the order of the arguments flipped:: + /// + /// >>> left = SparseObservable.from_label("XYZ") + /// >>> right = SparseObservable.from_label("+-IIrl") + /// >>> assert left.tensor(right) == right.expand(left) + /// + /// See also: + /// :meth:`tensor` + /// + /// The same function with the order of arguments flipped. :meth:`tensor` is the more + /// standard argument ordering, and matches Qiskit's other conventions. + #[pyo3(signature = (other, /), name = "expand")] + fn py_expand(&self, other: &Bound) -> PyResult> { + let py = other.py(); + let Some(other) = coerce_to_observable(other)? else { + return Err(PyTypeError::new_err(format!( + "unknown type for expand: {}", + other.get_type().repr()? + ))); + }; + Ok(other.borrow().tensor(self).into_py(py)) + } + + /// Calculate the adjoint of this observable. + /// + /// This is well defined in the abstract mathematical sense. All the terms of the single-qubit + /// alphabet are self-adjoint, so the result of this operation is the same observable, except + /// its coefficients are all their complex conjugates. + /// + /// Examples: + /// + /// .. code-block:: + /// + /// >>> left = SparseObservable.from_list([("XY+-", 1j)]) + /// >>> right = SparseObservable.from_list([("XY+-", -1j)]) + /// >>> assert left.adjoint() == right + fn adjoint(&self) -> SparseObservable { + SparseObservable { + num_qubits: self.num_qubits, + coeffs: self.coeffs.iter().map(|c| c.conj()).collect(), + bit_terms: self.bit_terms.clone(), + indices: self.indices.clone(), + boundaries: self.boundaries.clone(), + } + } + + /// Calculate the complex conjugation of this observable. + /// + /// This operation is defined in terms of the standard matrix conventions of Qiskit, in that the + /// matrix form is taken to be in the $Z$ computational basis. The $X$- and $Z$-related + /// alphabet terms are unaffected by the complex conjugation, but $Y$-related terms modify their + /// alphabet terms. Precisely: + /// + /// * :math:`Y` conjguates to :math:`-Y` + /// * :math:`\lvert r\rangle\langle r\rvert` conjugates to :math:`\lvert l\rangle\langle l\rvert` + /// * :math:`\lvert l\rangle\langle l\rvert` conjugates to :math:`\lvert r\rangle\langle r\rvert` + /// + /// Additionally, all coefficients are conjugated. + /// + /// Examples: + /// + /// .. code-block:: + /// + /// >>> obs = SparseObservable([("III", 1j), ("Yrl", 0.5)]) + /// >>> assert obs.conjugate() == SparseObservable([("III", -1j), ("Ylr", -0.5)]) + fn conjugate(&self) -> SparseObservable { + let mut out = self.transpose(); + for coeff in out.coeffs.iter_mut() { + *coeff = coeff.conj(); + } + out + } + + /// Calculate the matrix transposition of this observable. + /// + /// This operation is defined in terms of the standard matrix conventions of Qiskit, in that the + /// matrix form is taken to be in the $Z$ computational basis. The $X$- and $Z$-related + /// alphabet terms are unaffected by the transposition, but $Y$-related terms modify their + /// alphabet terms. Precisely: + /// + /// * :math:`Y` transposes to :math:`-Y` + /// * :math:`\lvert r\rangle\langle r\rvert` transposes to :math:`\lvert l\rangle\langle l\rvert` + /// * :math:`\lvert l\rangle\langle l\rvert` transposes to :math:`\lvert r\rangle\langle r\rvert` + /// + /// Examples: + /// + /// .. code-block:: + /// + /// >>> obs = SparseObservable([("III", 1j), ("Yrl", 0.5)]) + /// >>> assert obs.transpose() == SparseObservable([("III", 1j), ("Ylr", -0.5)]) + fn transpose(&self) -> SparseObservable { + let mut out = self.clone(); + for term in out.iter_mut() { + for bit_term in term.bit_terms { + match bit_term { + BitTerm::Y => { + *term.coeff = -*term.coeff; + } + BitTerm::Right => { + *bit_term = BitTerm::Left; + } + BitTerm::Left => { + *bit_term = BitTerm::Right; + } + _ => (), + } + } + } + out + } +} + +impl ::std::ops::Add<&SparseObservable> for SparseObservable { + type Output = SparseObservable; + + fn add(mut self, rhs: &SparseObservable) -> SparseObservable { + self += rhs; + self + } +} +impl ::std::ops::Add for &SparseObservable { + type Output = SparseObservable; + + fn add(self, rhs: &SparseObservable) -> SparseObservable { + let mut out = SparseObservable::with_capacity( + self.num_qubits, + self.coeffs.len() + rhs.coeffs.len(), + self.bit_terms.len() + rhs.bit_terms.len(), + ); + out += self; + out += rhs; + out + } +} +impl ::std::ops::AddAssign<&SparseObservable> for SparseObservable { + fn add_assign(&mut self, rhs: &SparseObservable) { + if self.num_qubits != rhs.num_qubits { + panic!("attempt to add two operators with incompatible qubit counts"); + } + self.coeffs.extend_from_slice(&rhs.coeffs); + self.bit_terms.extend_from_slice(&rhs.bit_terms); + self.indices.extend_from_slice(&rhs.indices); + // We only need to write out the new endpoints, not the initial zero. + let offset = self.boundaries[self.boundaries.len() - 1]; + self.boundaries + .extend(rhs.boundaries[1..].iter().map(|boundary| offset + boundary)); + } +} + +impl ::std::ops::Sub<&SparseObservable> for SparseObservable { + type Output = SparseObservable; + + fn sub(mut self, rhs: &SparseObservable) -> SparseObservable { + self -= rhs; + self + } } +impl ::std::ops::Sub for &SparseObservable { + type Output = SparseObservable; -/// A view object onto a single term of a `SparseObservable`. + fn sub(self, rhs: &SparseObservable) -> SparseObservable { + let mut out = SparseObservable::with_capacity( + self.num_qubits, + self.coeffs.len() + rhs.coeffs.len(), + self.bit_terms.len() + rhs.bit_terms.len(), + ); + out += self; + out -= rhs; + out + } +} +impl ::std::ops::SubAssign<&SparseObservable> for SparseObservable { + fn sub_assign(&mut self, rhs: &SparseObservable) { + if self.num_qubits != rhs.num_qubits { + panic!("attempt to subtract two operators with incompatible qubit counts"); + } + self.coeffs.extend(rhs.coeffs.iter().map(|coeff| -coeff)); + self.bit_terms.extend_from_slice(&rhs.bit_terms); + self.indices.extend_from_slice(&rhs.indices); + // We only need to write out the new endpoints, not the initial zero. + let offset = self.boundaries[self.boundaries.len() - 1]; + self.boundaries + .extend(rhs.boundaries[1..].iter().map(|boundary| offset + boundary)); + } +} + +impl ::std::ops::Mul for SparseObservable { + type Output = SparseObservable; + + fn mul(mut self, rhs: Complex64) -> SparseObservable { + self *= rhs; + self + } +} +impl ::std::ops::Mul for &SparseObservable { + type Output = SparseObservable; + + fn mul(self, rhs: Complex64) -> SparseObservable { + if rhs == Complex64::new(0.0, 0.0) { + SparseObservable::zero(self.num_qubits) + } else { + SparseObservable { + num_qubits: self.num_qubits, + coeffs: self.coeffs.iter().map(|c| c * rhs).collect(), + bit_terms: self.bit_terms.clone(), + indices: self.indices.clone(), + boundaries: self.boundaries.clone(), + } + } + } +} +impl ::std::ops::Mul for Complex64 { + type Output = SparseObservable; + + fn mul(self, mut rhs: SparseObservable) -> SparseObservable { + rhs *= self; + rhs + } +} +impl ::std::ops::Mul<&SparseObservable> for Complex64 { + type Output = SparseObservable; + + fn mul(self, rhs: &SparseObservable) -> SparseObservable { + rhs * self + } +} +impl ::std::ops::MulAssign for SparseObservable { + fn mul_assign(&mut self, rhs: Complex64) { + if rhs == Complex64::new(0.0, 0.0) { + self.coeffs.clear(); + self.bit_terms.clear(); + self.indices.clear(); + self.boundaries.clear(); + self.boundaries.push(0); + } else { + self.coeffs.iter_mut().for_each(|c| *c *= rhs) + } + } +} + +impl ::std::ops::Div for SparseObservable { + type Output = SparseObservable; + + fn div(mut self, rhs: Complex64) -> SparseObservable { + self /= rhs; + self + } +} +impl ::std::ops::Div for &SparseObservable { + type Output = SparseObservable; + + fn div(self, rhs: Complex64) -> SparseObservable { + SparseObservable { + num_qubits: self.num_qubits, + coeffs: self.coeffs.iter().map(|c| c / rhs).collect(), + bit_terms: self.bit_terms.clone(), + indices: self.indices.clone(), + boundaries: self.boundaries.clone(), + } + } +} +impl ::std::ops::DivAssign for SparseObservable { + fn div_assign(&mut self, rhs: Complex64) { + self.coeffs.iter_mut().for_each(|c| *c /= rhs) + } +} + +impl ::std::ops::Neg for &SparseObservable { + type Output = SparseObservable; + + fn neg(self) -> SparseObservable { + SparseObservable { + num_qubits: self.num_qubits, + coeffs: self.coeffs.iter().map(|c| -c).collect(), + bit_terms: self.bit_terms.clone(), + indices: self.indices.clone(), + boundaries: self.boundaries.clone(), + } + } +} +impl ::std::ops::Neg for SparseObservable { + type Output = SparseObservable; + + fn neg(mut self) -> SparseObservable { + self.coeffs.iter_mut().for_each(|c| *c = -*c); + self + } +} + +/// A view object onto a single term of a [SparseObservable]. /// -/// The lengths of `bit_terms` and `indices` are guaranteed to be created equal, but might be zero +/// The lengths of [bit_terms] and [indices] are guaranteed to be created equal, but might be zero /// (in the case that the term is proportional to the identity). #[derive(Clone, Copy, Debug)] pub struct SparseTermView<'a> { @@ -1467,6 +2195,78 @@ pub struct SparseTermView<'a> { pub indices: &'a [u32], } +/// A mutable view object onto a single term of a [SparseObservable]. +/// +/// The lengths of [bit_terms] and [indices] are guaranteed to be created equal, but might be zero +/// (in the case that the term is proportional to the identity). [indices] is not mutable because +/// this would allow data coherence to be broken. +#[derive(Debug)] +pub struct SparseTermViewMut<'a> { + pub coeff: &'a mut Complex64, + pub bit_terms: &'a mut [BitTerm], + pub indices: &'a [u32], +} + +/// Iterator type allowing in-place mutation of the [SparseObservable]. +/// +/// Created by [SparseObservable::iter_mut]. +#[derive(Debug)] +pub struct IterMut<'a> { + coeffs: &'a mut [Complex64], + bit_terms: &'a mut [BitTerm], + indices: &'a [u32], + boundaries: &'a [usize], + i: usize, +} +impl<'a> From<&'a mut SparseObservable> for IterMut<'a> { + fn from(value: &mut SparseObservable) -> IterMut { + IterMut { + coeffs: &mut value.coeffs, + bit_terms: &mut value.bit_terms, + indices: &value.indices, + boundaries: &value.boundaries, + i: 0, + } + } +} +impl<'a> Iterator for IterMut<'a> { + type Item = SparseTermViewMut<'a>; + + fn next(&mut self) -> Option { + // The trick here is that the lifetime of the 'self' borrow is shorter than the lifetime of + // the inner borrows. We can't give out mutable references to our inner borrow, because + // after the lifetime on 'self' expired, there'd be nothing preventing somebody using the + // 'self' borrow to see _another_ mutable borrow of the inner data, which would be an + // aliasing violation. Instead, we keep splitting the inner views we took out so the + // mutable references we return don't overlap with the ones we continue to hold. + let coeffs = ::std::mem::take(&mut self.coeffs); + let (coeff, other_coeffs) = coeffs.split_first_mut()?; + self.coeffs = other_coeffs; + + let len = self.boundaries[self.i + 1] - self.boundaries[self.i]; + self.i += 1; + + let all_bit_terms = ::std::mem::take(&mut self.bit_terms); + let all_indices = ::std::mem::take(&mut self.indices); + let (bit_terms, rest_bit_terms) = all_bit_terms.split_at_mut(len); + let (indices, rest_indices) = all_indices.split_at(len); + self.bit_terms = rest_bit_terms; + self.indices = rest_indices; + + Some(SparseTermViewMut { + coeff, + bit_terms, + indices, + }) + } + + fn size_hint(&self) -> (usize, Option) { + (self.coeffs.len(), Some(self.coeffs.len())) + } +} +impl<'a> ExactSizeIterator for IterMut<'a> {} +impl<'a> ::std::iter::FusedIterator for IterMut<'a> {} + /// Helper class of `ArrayView` that denotes the slot of the `SparseObservable` we're looking at. #[derive(Clone, Copy, PartialEq, Eq)] enum ArraySlot { @@ -1686,6 +2486,37 @@ fn cast_array_type<'py, T>( .map(|obj| obj.into_any()) } +/// Attempt to coerce an arbitrary Python object to a [SparseObservable]. +/// +/// This returns: +/// +/// * `Ok(Some(obs))` if the coercion was completely successful. +/// * `Ok(None)` if the input value was just completely the wrong type and no coercion could be +/// attempted. +/// * `Err` if the input was a valid type for coercion, but the coercion failed with a Python +/// exception. +/// +/// The purpose of this is for conversion the arithmetic operations, which should return +/// [PyNotImplemented] if the type is not valid for coercion. +fn coerce_to_observable<'py>( + value: &Bound<'py, PyAny>, +) -> PyResult>> { + let py = value.py(); + if let Ok(obs) = value.downcast_exact::() { + return Ok(Some(obs.clone())); + } + match SparseObservable::py_new(value, None) { + Ok(obs) => Ok(Some(Bound::new(py, obs)?)), + Err(e) => { + if e.is_instance_of::(py) { + Ok(None) + } else { + Err(e) + } + } + } +} + pub fn sparse_observable(m: &Bound) -> PyResult<()> { m.add_class::()?; Ok(()) diff --git a/test/python/quantum_info/test_sparse_observable.py b/test/python/quantum_info/test_sparse_observable.py index 656d60020bc7..551ea414998e 100644 --- a/test/python/quantum_info/test_sparse_observable.py +++ b/test/python/quantum_info/test_sparse_observable.py @@ -20,9 +20,35 @@ import numpy as np from qiskit.circuit import Parameter +from qiskit.exceptions import QiskitError from qiskit.quantum_info import SparseObservable, SparsePauliOp, Pauli -from test import QiskitTestCase # pylint: disable=wrong-import-order +from test import QiskitTestCase, combine # pylint: disable=wrong-import-order + + +def single_cases(): + return [ + SparseObservable.zero(0), + SparseObservable.zero(10), + SparseObservable.identity(0), + SparseObservable.identity(1_000), + SparseObservable.from_label("IIXIZI"), + SparseObservable.from_list([("YIXZII", -0.25), ("01rl+-", 0.25 + 0.5j)]), + # Includes a duplicate entry. + SparseObservable.from_list([("IXZ", -0.25), ("01I", 0.25 + 0.5j), ("IXZ", 0.75)]), + ] + + +class AllowRightArithmetic: + """Some type that implements only the right-hand-sided arithmatic operations, and allows + `SparseObservable` to pass through them. + + The purpose of this is to detect that `SparseObservable` is correctly delegating binary + operators to the other type if given an object it cannot coerce because of its type.""" + + SENTINEL = object() + + __radd__ = __rsub__ = __rmul__ = __rtruediv__ = __rxor__ = lambda self, other: self.SENTINEL @ddt.ddt @@ -176,14 +202,7 @@ def test_bit_term_enum(self): } self.assertEqual({label: SparseObservable.BitTerm[label] for label in labels}, labels) - @ddt.data( - SparseObservable.zero(0), - SparseObservable.zero(10), - SparseObservable.identity(0), - SparseObservable.identity(1_000), - SparseObservable.from_label("IIXIZI"), - SparseObservable.from_list([("YIXZII", -0.25), ("01rl+-", 0.25 + 0.5j)]), - ) + @ddt.idata(single_cases()) def test_pickle(self, observable): self.assertEqual(observable, copy.copy(observable)) self.assertIsNot(observable, copy.copy(observable)) @@ -577,13 +596,7 @@ def test_identity(self): np.testing.assert_equal(id_0.indices, np.array([], dtype=np.uint32)) np.testing.assert_equal(id_0.boundaries, np.array([0, 0], dtype=np.uintp)) - @ddt.data( - SparseObservable.zero(0), - SparseObservable.zero(5), - SparseObservable.identity(0), - SparseObservable.identity(1_000_000), - SparseObservable.from_list([("+-rl01", -0.5), ("IXZYZI", 1.0j)]), - ) + @ddt.idata(single_cases()) def test_copy(self, obs): self.assertEqual(obs, obs.copy()) self.assertIsNot(obs, obs.copy()) @@ -930,3 +943,593 @@ def test_attributes_repr(self): self.assertIn("bit_terms", repr(obs.bit_terms)) self.assertIn("indices", repr(obs.indices)) self.assertIn("boundaries", repr(obs.boundaries)) + + @combine( + obs=single_cases(), + # This includes some elements that aren't native `complex`, but still should be cast. + coeff=[0.5, 3j, 2, 0.25 - 0.75j], + ) + def test_multiply(self, obs, coeff): + obs = obs.copy() + initial = obs.copy() + expected = obs.copy() + expected.coeffs[:] = np.asarray(expected.coeffs) * complex(coeff) + self.assertEqual(obs * coeff, expected) + self.assertEqual(coeff * obs, expected) + # Check that nothing applied in-place. + self.assertEqual(obs, initial) + obs *= coeff + self.assertEqual(obs, expected) + self.assertIs(obs * AllowRightArithmetic(), AllowRightArithmetic.SENTINEL) + + @ddt.idata(single_cases()) + def test_multiply_zero(self, obs): + initial = obs.copy() + self.assertEqual(obs * 0.0, SparseObservable.zero(initial.num_qubits)) + self.assertEqual(0.0 * obs, SparseObservable.zero(initial.num_qubits)) + self.assertEqual(obs, initial) + + obs *= 0.0 + self.assertEqual(obs, SparseObservable.zero(initial.num_qubits)) + + @combine( + obs=single_cases(), + # This includes some elements that aren't native `complex`, but still should be cast. Be + # careful that the floating-point operation should not involve rounding. + coeff=[0.5, 4j, 2, -0.25], + ) + def test_divide(self, obs, coeff): + obs = obs.copy() + initial = obs.copy() + expected = obs.copy() + expected.coeffs[:] = np.asarray(expected.coeffs) / complex(coeff) + self.assertEqual(obs / coeff, expected) + # Check that nothing applied in-place. + self.assertEqual(obs, initial) + obs /= coeff + self.assertEqual(obs, expected) + self.assertIs(obs / AllowRightArithmetic(), AllowRightArithmetic.SENTINEL) + + @ddt.idata(single_cases()) + def test_divide_zero_raises(self, obs): + with self.assertRaises(ZeroDivisionError): + _ = obs / 0.0j + with self.assertRaises(ZeroDivisionError): + obs /= 0.0j + + def test_add_simple(self): + num_qubits = 12 + terms = [ + ("ZXY", (5, 2, 1), 1.5j), + ("+r", (8, 0), -0.25), + ("-0l1", (10, 9, 4, 3), 0.5 + 1j), + ("XZ", (7, 5), 0.75j), + ("rl01", (5, 3, 1, 0), 0.25j), + ] + expected = SparseObservable.from_sparse_list(terms, num_qubits=num_qubits) + for pivot in range(1, len(terms) - 1): + left = SparseObservable.from_sparse_list(terms[:pivot], num_qubits=num_qubits) + left_initial = left.copy() + right = SparseObservable.from_sparse_list(terms[pivot:], num_qubits=num_qubits) + right_initial = right.copy() + # Addition is documented to be term-stacking, so structural equality without `simplify` + # should hold. + self.assertEqual(left + right, expected) + # This is a different order, so check the simplification and canonicalisation works. + self.assertEqual((right + left).simplify(), expected.simplify()) + # Neither was modified in place. + self.assertEqual(left, left_initial) + self.assertEqual(right, right_initial) + + left += right + self.assertEqual(left, expected) + self.assertEqual(right, right_initial) + + @ddt.idata(single_cases()) + def test_add_self(self, obs): + """Test that addition to `self` works fine, including in-place mutation. This is a case + where we might fall afoul of Rust's borrowing rules.""" + initial = obs.copy() + expected = (2.0 * obs).simplify() + self.assertEqual((obs + obs).simplify(), expected) + self.assertEqual(obs, initial) + + obs += obs + self.assertEqual(obs.simplify(), expected) + + @ddt.idata(single_cases()) + def test_add_zero(self, obs): + expected = obs.copy() + zero = SparseObservable.zero(obs.num_qubits) + self.assertEqual(obs + zero, expected) + self.assertEqual(zero + obs, expected) + + obs += zero + self.assertEqual(obs, expected) + zero += obs + self.assertEqual(zero, expected) + + def test_add_coercion(self): + """Other quantum-info operators coerce with the ``+`` operator, so we do too.""" + base = SparseObservable.zero(9) + + pauli_label = "IIIXYZIII" + expected = SparseObservable.from_label(pauli_label) + self.assertEqual(base + pauli_label, expected) + self.assertEqual(pauli_label + base, expected) + + pauli = Pauli(pauli_label) + self.assertEqual(base + pauli, expected) + self.assertEqual(pauli + base, expected) + + spo = SparsePauliOp(pauli_label) + self.assertEqual(base + spo, expected) + with self.assertRaisesRegex(QiskitError, "Invalid input data for Pauli"): + # This doesn't work because `SparsePauliOp` is badly behaved in its coercion (it gets + # first dibs at `__add__`, not our `__radd__`), and will not return `NotImplemented` for + # bad types. This _shouldn't_ raise, and this test here is to remind us to flip it to a + # proper assertion of correctness if `Pauli` starts playing nicely. + _ = spo + base + + obs_label = "10+-rlXYZ" + expected = SparseObservable.from_label(obs_label) + self.assertEqual(base + obs_label, expected) + self.assertEqual(obs_label + base, expected) + + with self.assertRaises(TypeError): + _ = base + {} + with self.assertRaises(TypeError): + _ = {} + base + with self.assertRaisesRegex(ValueError, "only contain letters from the alphabet"): + _ = base + "$$$" + with self.assertRaisesRegex(ValueError, "only contain letters from the alphabet"): + _ = "$$$" + base + + self.assertIs(base + AllowRightArithmetic(), AllowRightArithmetic.SENTINEL) + with self.assertRaisesRegex(TypeError, "invalid object for in-place addition"): + # This actually _shouldn't_ be a `TypeError` - `__iadd_` should defer to + # `AllowRightArithmetic.__radd__` in the same way that `__add__` does, but a limitation + # in PyO3 (see PyO3/pyo3#4605) prevents this. + base += AllowRightArithmetic() + + def test_add_failures(self): + with self.assertRaisesRegex(ValueError, "incompatible numbers of qubits"): + _ = SparseObservable.zero(4) + SparseObservable.zero(6) + with self.assertRaisesRegex(ValueError, "incompatible numbers of qubits"): + _ = SparseObservable.zero(6) + SparseObservable.zero(4) + + def test_sub_simple(self): + num_qubits = 12 + terms = [ + ("ZXY", (5, 2, 1), 1.5j), + ("+r", (8, 0), -0.25), + ("-0l1", (10, 9, 4, 3), 0.5 + 1j), + ("XZ", (7, 5), 0.75j), + ("rl01", (5, 3, 1, 0), 0.25j), + ] + for pivot in range(1, len(terms) - 1): + expected = SparseObservable.from_sparse_list( + [ + (label, indices, coeff if i < pivot else -coeff) + for i, (label, indices, coeff) in enumerate(terms) + ], + num_qubits=num_qubits, + ) + left = SparseObservable.from_sparse_list(terms[:pivot], num_qubits=num_qubits) + left_initial = left.copy() + right = SparseObservable.from_sparse_list(terms[pivot:], num_qubits=num_qubits) + right_initial = right.copy() + # Addition is documented to be term-stacking, so structural equality without `simplify` + # should hold. + self.assertEqual(left - right, expected) + # This is a different order, so check the simplification and canonicalisation works. + self.assertEqual((right - left).simplify(), -expected.simplify()) + # Neither was modified in place. + self.assertEqual(left, left_initial) + self.assertEqual(right, right_initial) + + left -= right + self.assertEqual(left, expected) + self.assertEqual(right, right_initial) + + @ddt.idata(single_cases()) + def test_sub_self(self, obs): + """Test that subtraction of `self` works fine, including in-place mutation. This is a case + where we might fall afoul of Rust's borrowing rules.""" + initial = obs.copy() + expected = SparseObservable.zero(obs.num_qubits) + self.assertEqual((obs - obs).simplify(), expected) + self.assertEqual(obs, initial) + + obs -= obs + self.assertEqual(obs.simplify(), expected) + + @ddt.idata(single_cases()) + def test_sub_zero(self, obs): + expected = obs.copy() + zero = SparseObservable.zero(obs.num_qubits) + self.assertEqual(obs - zero, expected) + self.assertEqual(zero - obs, -expected) + + obs -= zero + self.assertEqual(obs, expected) + zero -= obs + self.assertEqual(zero, -expected) + + def test_sub_coercion(self): + """Other quantum-info operators coerce with the ``-`` operator, so we do too.""" + base = SparseObservable.zero(9) + + pauli_label = "IIIXYZIII" + expected = SparseObservable.from_label(pauli_label) + self.assertEqual(base - pauli_label, -expected) + self.assertEqual(pauli_label - base, expected) + + pauli = Pauli(pauli_label) + self.assertEqual(base - pauli, -expected) + self.assertEqual(pauli - base, expected) + + spo = SparsePauliOp(pauli_label) + self.assertEqual(base - spo, -expected) + with self.assertRaisesRegex(QiskitError, "Invalid input data for Pauli"): + # This doesn't work because `SparsePauliOp` is badly behaved in its coercion (it gets + # first dibs at `__add__`, not our `__radd__`), and will not return `NotImplemented` for + # bad types. This _shouldn't_ raise, and this test here is to remind us to flip it to a + # proper assertion of correctness if `Pauli` starts playing nicely. + _ = spo + base + + obs_label = "10+-rlXYZ" + expected = SparseObservable.from_label(obs_label) + self.assertEqual(base - obs_label, -expected) + self.assertEqual(obs_label - base, expected) + + with self.assertRaises(TypeError): + _ = base - {} + with self.assertRaises(TypeError): + _ = {} - base + with self.assertRaisesRegex(ValueError, "only contain letters from the alphabet"): + _ = base - "$$$" + with self.assertRaisesRegex(ValueError, "only contain letters from the alphabet"): + _ = "$$$" - base + + self.assertIs(base + AllowRightArithmetic(), AllowRightArithmetic.SENTINEL) + with self.assertRaisesRegex(TypeError, "invalid object for in-place subtraction"): + # This actually _shouldn't_ be a `TypeError` - `__isub_` should defer to + # `AllowRightArithmetic.__rsub__` in the same way that `__sub__` does, but a limitation + # in PyO3 (see PyO3/pyo3#4605) prevents this. + base -= AllowRightArithmetic() + + def test_sub_failures(self): + with self.assertRaisesRegex(ValueError, "incompatible numbers of qubits"): + _ = SparseObservable.zero(4) - SparseObservable.zero(6) + with self.assertRaisesRegex(ValueError, "incompatible numbers of qubits"): + _ = SparseObservable.zero(6) - SparseObservable.zero(4) + + @ddt.idata(single_cases()) + def test_neg(self, obs): + initial = obs.copy() + expected = obs.copy() + expected.coeffs[:] = -np.asarray(expected.coeffs) + self.assertEqual(-obs, expected) + # Test that there's no in-place modification. + self.assertEqual(obs, initial) + + @ddt.idata(single_cases()) + def test_pos(self, obs): + initial = obs.copy() + self.assertEqual(+obs, initial) + self.assertIsNot(+obs, obs) + + @combine(left=single_cases(), right=single_cases()) + def test_tensor(self, left, right): + + def expected(left, right): + coeffs = [] + bit_terms = [] + indices = [] + boundaries = [0] + for left_ptr in range(left.num_terms): + left_start, left_end = left.boundaries[left_ptr], left.boundaries[left_ptr + 1] + for right_ptr in range(right.num_terms): + right_start = right.boundaries[right_ptr] + right_end = right.boundaries[right_ptr + 1] + coeffs.append(left.coeffs[left_ptr] * right.coeffs[right_ptr]) + bit_terms.extend(right.bit_terms[right_start:right_end]) + bit_terms.extend(left.bit_terms[left_start:left_end]) + indices.extend(right.indices[right_start:right_end]) + indices.extend(i + right.num_qubits for i in left.indices[left_start:left_end]) + boundaries.append(len(indices)) + return SparseObservable.from_raw_parts( + left.num_qubits + right.num_qubits, coeffs, bit_terms, indices, boundaries + ) + + # We deliberately have the arguments flipped when appropriate, here. + # pylint: disable=arguments-out-of-order + + left_initial = left.copy() + right_initial = right.copy() + self.assertEqual(left.tensor(right), expected(left, right)) + self.assertEqual(left, left_initial) + self.assertEqual(right, right_initial) + self.assertEqual(right.tensor(left), expected(right, left)) + + self.assertEqual(left.expand(right), expected(right, left)) + self.assertEqual(left, left_initial) + self.assertEqual(right, right_initial) + self.assertEqual(right.expand(left), expected(left, right)) + + self.assertEqual(left.tensor(right), right.expand(left)) + self.assertEqual(left.expand(right), right.tensor(left)) + + @combine( + obs=single_cases(), identity=[SparseObservable.identity(0), SparseObservable.identity(5)] + ) + def test_tensor_identity(self, obs, identity): + initial = obs.copy() + expected_left = SparseObservable.from_raw_parts( + obs.num_qubits + identity.num_qubits, + obs.coeffs, + obs.bit_terms, + [x + identity.num_qubits for x in obs.indices], + obs.boundaries, + ) + expected_right = SparseObservable.from_raw_parts( + obs.num_qubits + identity.num_qubits, + obs.coeffs, + obs.bit_terms, + obs.indices, + obs.boundaries, + ) + self.assertEqual(obs.tensor(identity), expected_left) + self.assertEqual(identity.tensor(obs), expected_right) + self.assertEqual(obs.expand(identity), expected_right) + self.assertEqual(identity.expand(obs), expected_left) + self.assertEqual(obs ^ identity, expected_left) + self.assertEqual(identity ^ obs, expected_right) + self.assertEqual(obs, initial) + obs ^= identity + self.assertEqual(obs, expected_left) + + @combine(obs=single_cases(), zero=[SparseObservable.zero(0), SparseObservable.zero(5)]) + def test_tensor_zero(self, obs, zero): + initial = obs.copy() + expected = SparseObservable.zero(obs.num_qubits + zero.num_qubits) + self.assertEqual(obs.tensor(zero), expected) + self.assertEqual(zero.tensor(obs), expected) + self.assertEqual(obs.expand(zero), expected) + self.assertEqual(zero.expand(obs), expected) + self.assertEqual(obs ^ zero, expected) + self.assertEqual(zero ^ obs, expected) + self.assertEqual(obs, initial) + obs ^= zero + self.assertEqual(obs, expected) + + def test_tensor_coercion(self): + """Other quantum-info operators coerce with the ``tensor`` method and operator, so we do + too.""" + base = SparseObservable.identity(0) + + pauli_label = "IIXYZII" + expected = SparseObservable.from_label(pauli_label) + self.assertEqual(base.tensor(pauli_label), expected) + self.assertEqual(base.expand(pauli_label), expected) + self.assertEqual(base ^ pauli_label, expected) + self.assertEqual(pauli_label ^ base, expected) + + pauli = Pauli(pauli_label) + self.assertEqual(base.tensor(pauli), expected) + self.assertEqual(base.expand(pauli), expected) + self.assertEqual(base ^ pauli, expected) + with self.assertRaisesRegex(QiskitError, "Invalid input data for Pauli"): + # This doesn't work because `Pauli` is badly behaved in its coercion (it gets first dibs + # at `__xor__`, not our `__rxor__`), and will not return `NotImplemented` for bad types. + # This _shouldn't_ raise, and this test here is to remind us to flip it to a proper + # assertion of correctness if `Pauli` starts playing nicely. + _ = pauli ^ base + + spo = SparsePauliOp(pauli_label) + self.assertEqual(base.tensor(spo), expected) + self.assertEqual(base.expand(spo), expected) + self.assertEqual(base ^ spo, expected) + with self.assertRaisesRegex(QiskitError, "Invalid input data for Pauli"): + # This doesn't work because `SparsePauliOp` is badly behaved in its coercion (it gets + # first dibs at `__xor__`, not our `__rxor__`), and will not return `NotImplemented` for + # bad types. This _shouldn't_ raise, and this test here is to remind us to flip it to a + # proper assertion of correctness if `Pauli` starts playing nicely. + _ = spo ^ base + + obs_label = "10+-rlXYZ" + expected = SparseObservable.from_label(obs_label) + self.assertEqual(base.tensor(obs_label), expected) + self.assertEqual(base.expand(obs_label), expected) + self.assertEqual(base ^ obs_label, expected) + self.assertEqual(obs_label ^ base, expected) + + with self.assertRaises(TypeError): + _ = base ^ {} + with self.assertRaises(TypeError): + _ = {} ^ base + with self.assertRaisesRegex(ValueError, "only contain letters from the alphabet"): + _ = base ^ "$$$" + with self.assertRaisesRegex(ValueError, "only contain letters from the alphabet"): + _ = "$$$" ^ base + + self.assertIs(base ^ AllowRightArithmetic(), AllowRightArithmetic.SENTINEL) + + @ddt.idata(single_cases()) + def test_adjoint(self, obs): + initial = obs.copy() + expected = obs.copy() + expected.coeffs[:] = np.conjugate(expected.coeffs) + self.assertEqual(obs.adjoint(), expected) + self.assertEqual(obs, initial) + self.assertEqual(obs.adjoint().adjoint(), initial) + self.assertEqual(obs.adjoint(), obs.conjugate().transpose()) + self.assertEqual(obs.adjoint(), obs.transpose().conjugate()) + + @ddt.idata(single_cases()) + def test_conjugate(self, obs): + initial = obs.copy() + + term_map = {term: (term, 1.0) for term in SparseObservable.BitTerm} + term_map[SparseObservable.BitTerm.Y] = (SparseObservable.BitTerm.Y, -1.0) + term_map[SparseObservable.BitTerm.RIGHT] = (SparseObservable.BitTerm.LEFT, 1.0) + term_map[SparseObservable.BitTerm.LEFT] = (SparseObservable.BitTerm.RIGHT, 1.0) + + expected = obs.copy() + for i in range(expected.num_terms): + start, end = expected.boundaries[i], expected.boundaries[i + 1] + coeff = expected.coeffs[i] + for offset, bit_term in enumerate(expected.bit_terms[start:end]): + new_term, multiplier = term_map[bit_term] + coeff *= multiplier + expected.bit_terms[start + offset] = new_term + expected.coeffs[i] = coeff.conjugate() + + self.assertEqual(obs.conjugate(), expected) + self.assertEqual(obs, initial) + self.assertEqual(obs.conjugate().conjugate(), initial) + self.assertEqual(obs.conjugate(), obs.transpose().adjoint()) + self.assertEqual(obs.conjugate(), obs.adjoint().transpose()) + + def test_conjugate_explicit(self): + # The description of conjugation on the operator is not 100% trivial to see is correct, so + # here's an explicit case to verify. + obs = SparseObservable.from_sparse_list( + [ + ("Y", (1,), 2.0), + ("X+-", (5, 4, 3), 1.5), + ("Z01", (5, 4, 3), 1.5j), + ("YY", (2, 0), 0.25), + ("YY", (3, 1), 0.25j), + ("YYY", (3, 2, 1), 0.75), + ("rlrl", (4, 3, 2, 1), 1.0), + ("lrlr", (4, 3, 2, 1), 1.0j), + ("", (), 1.5j), + ], + num_qubits=6, + ) + expected = SparseObservable.from_sparse_list( + [ + ("Y", (1,), -2.0), + ("X+-", (5, 4, 3), 1.5), + ("Z01", (5, 4, 3), -1.5j), + ("YY", (2, 0), 0.25), + ("YY", (3, 1), -0.25j), + ("YYY", (3, 2, 1), -0.75), + ("lrlr", (4, 3, 2, 1), 1.0), + ("rlrl", (4, 3, 2, 1), -1.0j), + ("", (), -1.5j), + ], + num_qubits=6, + ) + self.assertEqual(obs.conjugate(), expected) + self.assertEqual(obs.conjugate().conjugate(), obs) + + @ddt.idata(single_cases()) + def test_transpose(self, obs): + initial = obs.copy() + + term_map = {term: (term, 1.0) for term in SparseObservable.BitTerm} + term_map[SparseObservable.BitTerm.Y] = (SparseObservable.BitTerm.Y, -1.0) + term_map[SparseObservable.BitTerm.RIGHT] = (SparseObservable.BitTerm.LEFT, 1.0) + term_map[SparseObservable.BitTerm.LEFT] = (SparseObservable.BitTerm.RIGHT, 1.0) + + expected = obs.copy() + for i in range(expected.num_terms): + start, end = expected.boundaries[i], expected.boundaries[i + 1] + coeff = expected.coeffs[i] + for offset, bit_term in enumerate(expected.bit_terms[start:end]): + new_term, multiplier = term_map[bit_term] + coeff *= multiplier + expected.bit_terms[start + offset] = new_term + expected.coeffs[i] = coeff + + self.assertEqual(obs.transpose(), expected) + self.assertEqual(obs, initial) + self.assertEqual(obs.transpose().transpose(), initial) + self.assertEqual(obs.transpose(), obs.conjugate().adjoint()) + self.assertEqual(obs.transpose(), obs.adjoint().conjugate()) + + def test_transpose_explicit(self): + # The description of transposition on the operator is not 100% trivial to see is correct, so + # here's a few explicit cases to verify. + obs = SparseObservable.from_sparse_list( + [ + ("Y", (1,), 2.0), + ("X+-", (5, 4, 3), 1.5), + ("Z01", (5, 4, 3), 1.5j), + ("YY", (2, 0), 0.25), + ("YY", (3, 1), 0.25j), + ("YYY", (3, 2, 1), 0.75), + ("rlrl", (4, 3, 2, 1), 1.0), + ("lrlr", (4, 3, 2, 1), 1.0j), + ("", (), 1.5j), + ], + num_qubits=6, + ) + expected = SparseObservable.from_sparse_list( + [ + ("Y", (1,), -2.0), + ("X+-", (5, 4, 3), 1.5), + ("Z01", (5, 4, 3), 1.5j), + ("YY", (2, 0), 0.25), + ("YY", (3, 1), 0.25j), + ("YYY", (3, 2, 1), -0.75), + ("lrlr", (4, 3, 2, 1), 1.0), + ("rlrl", (4, 3, 2, 1), 1.0j), + ("", (), 1.5j), + ], + num_qubits=6, + ) + self.assertEqual(obs.transpose(), expected) + self.assertEqual(obs.transpose().transpose(), obs) + + def test_simplify(self): + self.assertEqual((1e-10 * SparseObservable("XX")).simplify(1e-8), SparseObservable.zero(2)) + self.assertEqual((1e-10j * SparseObservable("XX")).simplify(1e-8), SparseObservable.zero(2)) + self.assertEqual( + (1e-7 * SparseObservable("XX")).simplify(1e-8), 1e-7 * SparseObservable("XX") + ) + + exact_coeff = 2.0**-10 + self.assertEqual( + (exact_coeff * SparseObservable("XX")).simplify(exact_coeff), SparseObservable.zero(2) + ) + self.assertEqual( + (exact_coeff * 1j * SparseObservable("XX")).simplify(exact_coeff), + SparseObservable.zero(2), + ) + coeff = 3e-5 + 4e-5j + self.assertEqual( + (coeff * SparseObservable("ZZ")).simplify(abs(coeff)), SparseObservable.zero(2) + ) + + sum_alike = SparseObservable.from_list( + [ + ("XX", 1.0), + ("YY", 1j), + ("XX", -1.0), + ] + ) + self.assertEqual(sum_alike.simplify(), 1j * SparseObservable("YY")) + + terms = [ + ("XYIZI", 1.5), + ("+-IYI", 2.0), + ("XYIZI", 2j), + ("+-IYI", -2.0), + ("rlIZI", -2.0), + ] + canonical_forwards = SparseObservable.from_list(terms) + canonical_backwards = SparseObservable.from_list(list(reversed(terms))) + self.assertNotEqual(canonical_forwards.simplify(), canonical_forwards) + self.assertNotEqual(canonical_forwards, canonical_backwards) + self.assertEqual(canonical_forwards.simplify(), canonical_backwards.simplify()) + self.assertEqual(canonical_forwards.simplify(), canonical_forwards.simplify().simplify()) + + @ddt.idata(single_cases()) + def test_clear(self, obs): + num_qubits = obs.num_qubits + obs.clear() + self.assertEqual(obs, SparseObservable.zero(num_qubits))