From 9ad36a3da26ffe98453bd238618445e79307abd2 Mon Sep 17 00:00:00 2001 From: Jake Lishman Date: Wed, 1 Nov 2023 17:07:19 +0000 Subject: [PATCH] Optimise `SparsePauliOp.from_operator` This rewrites the `from_operator` handling (again!) from the initial Rust implementation of the recursive matrix-addition form into an iterative approach that re-uses the same scratch memory all the way down. This is significantly faster, and allocates far less often, although in practice the peak heap memory usage will be not dissimilar. The algorithm is rewritten to be a manual stack-based iteration, rather than a functional recursion. The size of a single stack entry in the iteration is one `usize`, which is drastically smaller than whatever per-function-call stack will have been used before. --- crates/accelerate/src/sparse_pauli_op.rs | 609 +++++++++++++----- ...-pauli-decomposition-faf2be01a6e75fff.yaml | 7 + 2 files changed, 462 insertions(+), 154 deletions(-) create mode 100644 releasenotes/notes/faster-pauli-decomposition-faf2be01a6e75fff.yaml diff --git a/crates/accelerate/src/sparse_pauli_op.rs b/crates/accelerate/src/sparse_pauli_op.rs index 73c4ab7a73d5..889ca422702c 100644 --- a/crates/accelerate/src/sparse_pauli_op.rs +++ b/crates/accelerate/src/sparse_pauli_op.rs @@ -20,10 +20,10 @@ use numpy::prelude::*; use numpy::{PyArray1, PyArray2, PyReadonlyArray1, PyReadonlyArray2, PyUntypedArrayMethods}; use hashbrown::HashMap; -use ndarray::{s, Array1, Array2, ArrayView1, ArrayView2, Axis}; +use ndarray::{s, ArrayView1, ArrayView2, Axis}; use num_complex::Complex64; use num_traits::Zero; -use qiskit_circuit::util::{c64, C_ONE, C_ZERO}; +use qiskit_circuit::util::{c64, C_ZERO}; use rayon::prelude::*; use crate::rayon_ext::*; @@ -70,14 +70,6 @@ pub fn unordered_unique(py: Python, array: PyReadonlyArray2) -> (PyObject, ) } -#[derive(Clone, Copy)] -enum Pauli { - I, - X, - Y, - Z, -} - /// Pack a 2D array of Booleans into a given width. Returns an error if the input array is /// too large to be packed into u64. fn pack_bits(bool_arr: ArrayView2) -> Result, ()> { @@ -188,10 +180,9 @@ impl ZXPaulis { } /// Intermediate structure that represents readonly views onto the Python-space sparse Pauli data. -/// This is used in the chained methods so that the syntactical temporary lifetime extension can -/// occur; we can't have the readonly array temporaries only live within a method that returns -/// [ZXPaulisView], because otherwise the lifetimes of the [PyReadonlyArray] elements will be too -/// short. +/// This is used in the chained methods so that the lifetime extension can occur; we can't have the +/// readonly array temporaries only live within a method that returns [ZXPaulisView], because +/// otherwise the lifetimes of the [PyReadonlyArray] elements will be too short. pub struct ZXPaulisReadonly<'a> { x: PyReadonlyArray2<'a, bool>, z: PyReadonlyArray2<'a, bool>, @@ -325,6 +316,16 @@ impl MatrixCompressedPaulis { } } +struct DecomposeOut { + z: Vec, + x: Vec, + phases: Vec, + coeffs: Vec, + scale: f64, + tol: f64, + num_qubits: usize, +} + /// Decompose a dense complex operator into the symplectic Pauli representation in the /// ZX-convention. /// @@ -337,163 +338,463 @@ pub fn decompose_dense( tolerance: f64, ) -> PyResult { let num_qubits = operator.shape()[0].ilog2() as usize; - let size = 1 << num_qubits; - if operator.shape() != [size, size] { + let side = 1 << num_qubits; + if operator.shape() != [side, side] { return Err(PyValueError::new_err(format!( - "input with shape {:?} cannot be interpreted as a multiqubit operator", + "bad input shape for {} qubits: {:?}", + num_qubits, operator.shape() ))); } - let mut paulis = vec![]; - let mut coeffs = vec![]; - if num_qubits > 0 { - decompose_dense_inner( - C_ONE, - num_qubits, - &[], - operator.as_array(), - &mut paulis, - &mut coeffs, - tolerance * tolerance, - ); - } - if coeffs.is_empty() { - Ok(ZXPaulis { - z: PyArray2::zeros_bound(py, [0, num_qubits], false).into(), - x: PyArray2::zeros_bound(py, [0, num_qubits], false).into(), - phases: PyArray1::zeros_bound(py, [0], false).into(), - coeffs: PyArray1::zeros_bound(py, [0], false).into(), - }) - } else { - // Constructing several arrays of different shapes at once is rather awkward in iterator - // logic, so we just loop manually. - let mut z = Array2::::uninit([paulis.len(), num_qubits]); - let mut x = Array2::::uninit([paulis.len(), num_qubits]); - let mut phases = Array1::::uninit(paulis.len()); - for (i, paulis) in paulis.drain(..).enumerate() { - let mut phase = 0u8; - for (j, pauli) in paulis.into_iter().rev().enumerate() { - match pauli { - Pauli::I => { - z[[i, j]].write(false); - x[[i, j]].write(false); - } - Pauli::X => { - z[[i, j]].write(false); - x[[i, j]].write(true); - } - Pauli::Y => { - z[[i, j]].write(true); - x[[i, j]].write(true); - phase = phase.wrapping_add(1); - } - Pauli::Z => { - z[[i, j]].write(true); - x[[i, j]].write(false); - } + let (stack, mut out_list, mut scratch) = decompose_first_level(operator.as_array(), num_qubits); + decompose_middle_levels(stack, &mut out_list, &mut scratch, num_qubits); + let out = decompose_last_level(&mut out_list, &scratch, num_qubits, tolerance)?; + Ok(ZXPaulis { + z: PyArray1::from_vec_bound(py, out.z) + .reshape([out.phases.len(), num_qubits])? + .into(), + x: PyArray1::from_vec_bound(py, out.x) + .reshape([out.phases.len(), num_qubits])? + .into(), + phases: PyArray1::from_vec_bound(py, out.phases).into(), + coeffs: PyArray1::from_vec_bound(py, out.coeffs).into(), + }) +} + +/// Apply the matrix-addition decomposition at the first level. This is split out because it acts +/// on an `ArrayView2`, and is responsible for populating the initial scratch space. We can't +/// write over the operator the user gave us (it's not ours to do that to), and anyway, we want to +/// drop to a chunk of memory that we can 100% guarantee is contiguous, so we can elide all the +/// stride checking. +fn decompose_first_level( + in_op: ArrayView2, + num_qubits: usize, +) -> (Vec, Vec, Vec) { + let side = 1 << num_qubits; + let mut stack = Vec::::with_capacity(4); + let mut out_list = Vec::::new(); + let mut scratch = Vec::::with_capacity(side * side); + match num_qubits { + 0 => {} + 1 => { + // If we've only got one qubit, we just want to copy the data over in the correct + // continuity and let the base case of the iteration take care of outputting it. + scratch.extend(in_op.iter()); + out_list.push(PauliLocation::begin(num_qubits)); + } + _ => { + unsafe { scratch.set_len(side * side) }; + let mut ptr = 0usize; + + let cur_qubit = num_qubits - 1; + let mid = 1 << cur_qubit; + let loc = PauliLocation::begin(num_qubits); + let mut i_nonzero = false; + let mut x_nonzero = false; + let mut y_nonzero = false; + let mut z_nonzero = false; + + let i_row_0 = loc.row(); + let i_col_0 = loc.col(); + + let x_row_0 = loc.row(); + let x_col_0 = loc.col() + mid; + + let y_row_0 = loc.row() + mid; + let y_col_0 = loc.col(); + + let z_row_0 = loc.row() + mid; + let z_col_0 = loc.col() + mid; + + for off_row in 0..mid { + let i_row = i_row_0 + off_row; + let z_row = z_row_0 + off_row; + for off_col in 0..mid { + let i_col = i_col_0 + off_col; + let z_col = z_col_0 + off_col; + let value = in_op[[i_row, i_col]] + in_op[[z_row, z_col]]; + scratch[ptr] = value; + ptr += 1; + i_nonzero = i_nonzero || (value != C_ZERO); } + + let x_row = x_row_0 + off_row; + let y_row = y_row_0 + off_row; + for off_col in 0..mid { + let x_col = x_col_0 + off_col; + let y_col = y_col_0 + off_col; + let value = in_op[[x_row, x_col]] + in_op[[y_row, y_col]]; + scratch[ptr] = value; + ptr += 1; + x_nonzero = x_nonzero || (value != C_ZERO); + } + } + for off_row in 0..mid { + let x_row = x_row_0 + off_row; + let y_row = y_row_0 + off_row; + for off_col in 0..mid { + let x_col = x_col_0 + off_col; + let y_col = y_col_0 + off_col; + let value = in_op[[x_row, x_col]] - in_op[[y_row, y_col]]; + scratch[ptr] = value; + ptr += 1; + y_nonzero = y_nonzero || (value != C_ZERO); + } + let i_row = i_row_0 + off_row; + let z_row = z_row_0 + off_row; + for off_col in 0..mid { + let i_col = i_col_0 + off_col; + let z_col = z_col_0 + off_col; + let value = in_op[[i_row, i_col]] - in_op[[z_row, z_col]]; + scratch[ptr] = value; + ptr += 1; + z_nonzero = z_nonzero || (value != C_ZERO); + } + } + // The middle-levels `stack` is a LIFO, so if we push in this order, we'll consider the + // Pauli terms in lexicographical order, which is the canonical order from + // `SparsePauliOp.sort`. Populating the `out_list` (an initially empty `Vec`) + // effectively reverses the stack, so we want to push its elements in the IXYZ order. + if loc.qubit() == 1 { + i_nonzero.then(|| out_list.push(loc.push_i())); + x_nonzero.then(|| out_list.push(loc.push_x())); + y_nonzero.then(|| out_list.push(loc.push_y())); + z_nonzero.then(|| out_list.push(loc.push_z())); + } else { + z_nonzero.then(|| stack.push(loc.push_z())); + y_nonzero.then(|| stack.push(loc.push_y())); + x_nonzero.then(|| stack.push(loc.push_x())); + i_nonzero.then(|| stack.push(loc.push_i())); } - phases[i].write(phase % 4); } - // These are safe because the above loops write into every element. It's guaranteed that - // each of the elements of the `paulis` vec will have `num_qubits` because they're all - // reading from the same base array. - let z = unsafe { z.assume_init() }; - let x = unsafe { x.assume_init() }; - let phases = unsafe { phases.assume_init() }; - Ok(ZXPaulis { - z: z.into_pyarray_bound(py).into(), - x: x.into_pyarray_bound(py).into(), - phases: phases.into_pyarray_bound(py).into(), - coeffs: PyArray1::from_vec_bound(py, coeffs).into(), - }) } + (stack, out_list, scratch) } -/// Recurse worker routine of `decompose_dense`. Should be called with at least one qubit. -fn decompose_dense_inner( - factor: Complex64, +/// Iteratively decompose the matrix at all levels other than the first and last. This populates +/// the `out_list` with locations. +fn decompose_middle_levels( + mut stack: Vec, + out_list: &mut Vec, + scratch: &mut [Complex64], num_qubits: usize, - paulis: &[Pauli], - block: ArrayView2, - out_paulis: &mut Vec>, - out_coeffs: &mut Vec, - square_tolerance: f64, ) { - if num_qubits == 0 { - // It would be safe to `return` here, but if it's unreachable then LLVM is allowed to - // optimize out this branch entirely in release mode, which is good for a ~2% speedup. - unreachable!("should not call this with an empty operator") - } - // Base recursion case. - if num_qubits == 1 { - let mut push_if_nonzero = |extra: Pauli, value: Complex64| { - if value.norm_sqr() <= square_tolerance { - return; + let side = 1 << num_qubits; + // The stack is a LIFO, which is how we implement the depth-first iteration. Depth-first + // means `stack` never grows very large; it reaches at most `3*num_qubits - 2` elements (if all + // terms are zero all the way through the first subblock decomposition). `out_list`, on the + // other hand, can be `4 ** (num_qubits - 1)` entries in the worst-case scenario of a + // completely dense (in Pauli terms) operator. + while let Some(loc) = stack.pop() { + // Here we work pairwise, writing out the new values into both I and Z simultaneously (etc + // for X and Y) so we can re-use their scratch space and avoid re-allocating. We're doing + // the multiple assignment `(I, Z) = (I + Z, I - Z)`. + let mid = 1 << loc.qubit(); + let mut i_nonzero = false; + let mut z_nonzero = false; + let i_row_0 = loc.row(); + let i_col_0 = loc.col(); + let z_row_0 = loc.row() + mid; + let z_col_0 = loc.col() + mid; + for off_row in 0..mid { + let i_loc_0 = (i_row_0 + off_row) * side + i_col_0; + let z_loc_0 = (z_row_0 + off_row) * side + z_col_0; + for off_col in 0..mid { + let i_loc = i_loc_0 + off_col; + let z_loc = z_loc_0 + off_col; + let add = scratch[i_loc] + scratch[z_loc]; + let sub = scratch[i_loc] - scratch[z_loc]; + scratch[i_loc] = add; + scratch[z_loc] = sub; + i_nonzero = i_nonzero || (add != C_ZERO); + z_nonzero = z_nonzero || (sub != C_ZERO); } - let paulis = { - let mut vec = Vec::with_capacity(paulis.len() + 1); - vec.extend_from_slice(paulis); - vec.push(extra); - vec - }; - out_paulis.push(paulis); - out_coeffs.push(value); - }; - push_if_nonzero(Pauli::I, 0.5 * factor * (block[[0, 0]] + block[[1, 1]])); - push_if_nonzero(Pauli::X, 0.5 * factor * (block[[0, 1]] + block[[1, 0]])); - push_if_nonzero( - Pauli::Y, - 0.5 * Complex64::i() * factor * (block[[0, 1]] - block[[1, 0]]), - ); - push_if_nonzero(Pauli::Z, 0.5 * factor * (block[[0, 0]] - block[[1, 1]])); - return; - } - let mut recurse_if_nonzero = |extra: Pauli, factor: Complex64, values: Array2| { - let mut is_zero = true; - for value in values.iter() { - if !value.is_zero() { - is_zero = false; - break; + } + + let mut x_nonzero = false; + let mut y_nonzero = false; + let x_row_0 = loc.row(); + let x_col_0 = loc.col() + mid; + let y_row_0 = loc.row() + mid; + let y_col_0 = loc.col(); + for off_row in 0..mid { + let x_loc_0 = (x_row_0 + off_row) * side + x_col_0; + let y_loc_0 = (y_row_0 + off_row) * side + y_col_0; + for off_col in 0..mid { + let x_loc = x_loc_0 + off_col; + let y_loc = y_loc_0 + off_col; + let add = scratch[x_loc] + scratch[y_loc]; + let sub = scratch[x_loc] - scratch[y_loc]; + scratch[x_loc] = add; + scratch[y_loc] = sub; + x_nonzero = x_nonzero || (add != C_ZERO); + y_nonzero = y_nonzero || (sub != C_ZERO); } } - if is_zero { - return; + // The middle-levels `stack` is a LIFO, so if we push in this order, we'll consider the + // Pauli terms in lexicographical order, which is the canonical order from + // `SparsePauliOp.sort`. Populating the `out_list` (an initially empty `Vec`) effectively + // reverses the stack, so we want to push its elements in the IXYZ order. + if loc.qubit() == 1 { + i_nonzero.then(|| out_list.push(loc.push_i())); + x_nonzero.then(|| out_list.push(loc.push_x())); + y_nonzero.then(|| out_list.push(loc.push_y())); + z_nonzero.then(|| out_list.push(loc.push_z())); + } else { + z_nonzero.then(|| stack.push(loc.push_z())); + y_nonzero.then(|| stack.push(loc.push_y())); + x_nonzero.then(|| stack.push(loc.push_x())); + i_nonzero.then(|| stack.push(loc.push_i())); } - let mut new_paulis = Vec::with_capacity(paulis.len() + 1); - new_paulis.extend_from_slice(paulis); - new_paulis.push(extra); - decompose_dense_inner( - factor, - num_qubits - 1, - &new_paulis, - values.view(), - out_paulis, - out_coeffs, - square_tolerance, - ); + } +} + +fn decompose_last_level( + out_list: &mut Vec, + scratch: &[Complex64], + num_qubits: usize, + tolerance: f64, +) -> PyResult { + let side = 1 << num_qubits; + let scale = 0.5f64.powi(num_qubits.try_into()?); + // Pessimistically allocate assuming that there will be no zero terms in the out list. We + // don't really pay much cost if we overallocate, but underallocating means that all four + // outputs have to copy their data across to a new allocation. + let mut out = DecomposeOut { + z: Vec::with_capacity(4 * num_qubits * out_list.len()), + x: Vec::with_capacity(4 * num_qubits * out_list.len()), + phases: Vec::with_capacity(4 * out_list.len()), + coeffs: Vec::with_capacity(4 * out_list.len()), + scale, + tol: (tolerance * tolerance) / (scale * scale), + num_qubits, + }; + + for loc in out_list.drain(..) { + let row = loc.row(); + let col = loc.col(); + let base = row * side + col; + let i_value = scratch[base] + scratch[base + side + 1]; + let z_value = scratch[base] - scratch[base + side + 1]; + let x_value = scratch[base + 1] + scratch[base + side]; + let y_value = scratch[base + 1] - scratch[base + side]; + + let x = row ^ col; + let z = row; + let phase = (x & z).count_ones() as u8; + // Pushing the last Pauli onto the `loc` happens "forwards" to maintain lexicographical + // ordering in `out`, since this is the construction of the final object. + push_pauli_if_nonzero(x, z, phase, i_value, &mut out); + push_pauli_if_nonzero(x | 1, z, phase, x_value, &mut out); + push_pauli_if_nonzero(x | 1, z | 1, phase + 1, y_value, &mut out); + push_pauli_if_nonzero(x, z | 1, phase, z_value, &mut out); + } + // If we _wildly_ overallocated, then shrink back to a sensible size to avoid tying up too much + // memory as we return to Python space. + if out.z.capacity() / 4 > out.z.len() { + out.z.shrink_to_fit(); + out.x.shrink_to_fit(); + out.phases.shrink_to_fit(); + out.coeffs.shrink_to_fit(); + } + Ok(out) +} + +// This generates lookup tables of the form +// const LOOKUP: [[bool; 2] 4] = [[false, false], [true, false], [false, true], [true, true]]; +// when called `pauli_lookup!(LOOKUP, 2, [_, _])`. The last argument is like a dummy version of +// an individual lookup rule, which is consumed to make an inner "loop" with a declarative macro. +macro_rules! pauli_lookup { + ($name:ident, $n:literal, [$head:expr$ (, $($tail:expr),*)?]) => { + static $name: [[bool; $n]; 1<<$n] = pauli_lookup!(@acc, [$($($tail),*)?], [[false], [true]]); + }; + (@acc, [$head:expr $(, $($tail:expr),*)?], [$([$($bools:tt),*]),+]) => { + pauli_lookup!(@acc, [$($($tail),*)?], [$([$($bools),*, false]),+, $([$($bools),*, true]),+]) + }; + (@acc, [], $init:expr) => { $init }; +} +pauli_lookup!(PAULI_LOOKUP_2, 2, [(), ()]); +pauli_lookup!(PAULI_LOOKUP_4, 4, [(), (), (), ()]); +pauli_lookup!(PAULI_LOOKUP_8, 8, [(), (), (), (), (), (), (), ()]); + +/// Push a complete Pauli chain into the output (`out`), if the corresponding entry is non-zero. +/// `x` and `z` represent the symplectic X and Z bitvectors, packed into `usize`, where LSB n +/// corresponds to qubit `n`. +fn push_pauli_if_nonzero( + mut x: usize, + mut z: usize, + phase: u8, + value: Complex64, + out: &mut DecomposeOut, +) { + if value.norm_sqr() <= out.tol { + return; + } + + // This set of `extend` calls is effectively an 8-fold unrolling of the "natural" loop through + // each bit, where the initial `if` statements are handling the remainder (the up-to 7 + // least-significant bits). In practice, it's probably unlikely that people are decomposing + // 16q+ operators, since that's a pretty huge matrix already. + // + // The 8-fold loop unrolling is because going bit-by-bit all the way would be dominated by loop + // and bitwise-operation overhead. + + if out.num_qubits & 1 == 1 { + out.x.push(x & 1 == 1); + out.z.push(z & 1 == 1); + x >>= 1; + z >>= 1; + } + if out.num_qubits & 2 == 2 { + out.x.extend(&PAULI_LOOKUP_2[x & 0b11]); + out.z.extend(&PAULI_LOOKUP_2[z & 0b11]); + x >>= 2; + z >>= 2; + } + if out.num_qubits & 4 == 4 { + out.x.extend(&PAULI_LOOKUP_4[x & 0b1111]); + out.z.extend(&PAULI_LOOKUP_4[z & 0b1111]); + x >>= 4; + z >>= 4; + } + for _ in 0..(out.num_qubits / 8) { + out.x.extend(&PAULI_LOOKUP_8[x & 0b1111_1111]); + out.z.extend(&PAULI_LOOKUP_8[z & 0b1111_1111]); + x >>= 8; + z >>= 8; + } + + let phase = phase % 4; + let value = match phase { + 0 => Complex64::new(out.scale, 0.0) * value, + 1 => Complex64::new(0.0, out.scale) * value, + 2 => Complex64::new(-out.scale, 0.0) * value, + 3 => Complex64::new(0.0, -out.scale) * value, + _ => unreachable!("'x % 4' has only four possible values"), }; - let mid = 1usize << (num_qubits - 1); - recurse_if_nonzero( - Pauli::I, - 0.5 * factor, - &block.slice(s![..mid, ..mid]) + &block.slice(s![mid.., mid..]), - ); - recurse_if_nonzero( - Pauli::X, - 0.5 * factor, - &block.slice(s![..mid, mid..]) + &block.slice(s![mid.., ..mid]), - ); - recurse_if_nonzero( - Pauli::Y, - 0.5 * Complex64::i() * factor, - &block.slice(s![..mid, mid..]) - &block.slice(s![mid.., ..mid]), - ); - recurse_if_nonzero( - Pauli::Z, - 0.5 * factor, - &block.slice(s![..mid, ..mid]) - &block.slice(s![mid.., mid..]), - ); + out.phases.push(phase); + out.coeffs.push(value); +} + +/// Pack the information about which row, column and qubit we're considering into a single `usize`. +/// Complex64 data is 16 bytes long and the operators are square and must be addressable in memory, +/// so the row and column are hardware limited to be of width `usize::BITS / 2 - 2` each. However, +/// we don't need to store at a granularity of 1, because the last 2x2 block we handle manually, so +/// we can remove an extra least significant bit from the row and column. Regardless of the width +/// of `usize`, we can therefore track the state for up to 30 qubits losslessly, which is greater +/// than the maximum addressable memory on a 64-bit system. +/// +/// For a 64-bit usize, the bit pattern is stored like this: +/// +/// 0b__000101__11111111111111111111111110000__11111111111111111111111110000 +/// <-6--> <------------29-------------> <------------29-------------> +/// | | | +/// | uint of the input row uint of the input column +/// | (once a 0 is appended) (once a 0 is appended) +/// | +/// current qubit under consideration +/// +/// The `qubit` field encodes the depth in the call stack that the user of the `PauliLocation` +/// should consider. When the stack is initialised (before any calculation is done), it starts at +/// the highest qubit index (`num_qubits - 1`) and decreases from there until 0. +/// +/// The `row` and `col` methods form the top-left corner of a `(2**(qubit + 1), 2**(qubit + 1))` +/// submatrix (where the top row and leftmost column are 0). The least significant `qubit + 1` +/// bits of the of row and column are therefore always zero; the 0-indexed qubit still corresponds +/// to a 2x2 block. This is why we needn't store it. +#[derive(Debug, Clone, Copy)] +struct PauliLocation(usize); + +impl PauliLocation { + const QUBIT_SHIFT: u32 = usize::BITS - 6; + const QUBIT_MASK: usize = (usize::MAX >> Self::QUBIT_SHIFT) << Self::QUBIT_SHIFT; + const ROW_SHIFT: u32 = usize::BITS / 2 - 3; + const ROW_MASK: usize = + ((usize::MAX >> Self::ROW_SHIFT) << Self::ROW_SHIFT) & !Self::QUBIT_MASK; + const COL_SHIFT: u32 = 0; + const COL_MASK: usize = usize::MAX & !Self::ROW_MASK & !Self::QUBIT_MASK; + + /// Create the base `PauliLocation` for an entire matrix with `num_qubits` qubits. The initial + /// Pauli chain is empty. + #[inline(always)] + fn begin(num_qubits: usize) -> Self { + Self::new(0, 0, num_qubits - 1) + } + + /// Manually create a new `PauliLocation` with the given information. The logic in the rest of + /// the class assumes that `row` and `col` will end with at least `qubit + 1` zeros, since + /// these are the only valid locations. + #[inline(always)] + fn new(row: usize, col: usize, qubit: usize) -> Self { + debug_assert!(row & 1 == 0); + debug_assert!(col & 1 == 0); + debug_assert!(row < 2 * Self::ROW_SHIFT as usize); + debug_assert!(col < 2 * Self::ROW_SHIFT as usize); + debug_assert!(qubit < 64); + Self( + (qubit << Self::QUBIT_SHIFT) + | (row << Self::ROW_SHIFT >> 1) + | (col << Self::COL_SHIFT >> 1), + ) + } + + /// The row in the dense matrix that this location corresponds to. + #[inline(always)] + fn row(&self) -> usize { + ((self.0 & Self::ROW_MASK) >> Self::ROW_SHIFT) << 1 + } + + /// The column in the dense matrix that this location corresponds to. + #[inline(always)] + fn col(&self) -> usize { + ((self.0 & Self::COL_MASK) >> Self::COL_SHIFT) << 1 + } + + /// Which qubit in the Pauli chain we're currently considering. + #[inline(always)] + fn qubit(&self) -> usize { + (self.0 & Self::QUBIT_MASK) >> Self::QUBIT_SHIFT + } + + /// Create a new location corresponding to the Pauli chain so far, plus an identity on the + /// currently considered qubit. + #[inline(always)] + fn push_i(&self) -> Self { + Self::new(self.row(), self.col(), self.qubit() - 1) + } + + /// Create a new location corresponding to the Pauli chain so far, plus an X on the currently + /// considered qubit. + #[inline(always)] + fn push_x(&self) -> Self { + Self::new( + self.row(), + self.col() | (1 << self.qubit()), + self.qubit() - 1, + ) + } + + /// Create a new location corresponding to the Pauli chain so far, plus a Y on the currently + /// considered qubit. + #[inline(always)] + fn push_y(&self) -> Self { + Self::new( + self.row() | (1 << self.qubit()), + self.col(), + self.qubit() - 1, + ) + } + + /// Create a new location corresponding to the Pauli chain so far, plus a Z on the currently + /// considered qubit. + #[inline(always)] + fn push_z(&self) -> Self { + Self::new( + self.row() | (1 << self.qubit()), + self.col() | (1 << self.qubit()), + self.qubit() - 1, + ) + } } /// Convert the given [ZXPaulis] object to a dense 2D Numpy matrix. diff --git a/releasenotes/notes/faster-pauli-decomposition-faf2be01a6e75fff.yaml b/releasenotes/notes/faster-pauli-decomposition-faf2be01a6e75fff.yaml new file mode 100644 index 000000000000..56ad1a725f9a --- /dev/null +++ b/releasenotes/notes/faster-pauli-decomposition-faf2be01a6e75fff.yaml @@ -0,0 +1,7 @@ +--- +features_quantum_info: + - | + The performance of :meth:`.SparsePauliOp.from_operator` has been optimized on top of the + algorithm improvements methods introduced in Qiskit 1.0. It is now approximately five times + faster than before for fully dense matrices, taking approximately 40ms to decompose a 10q + operator involving all Pauli terms.