Skip to content

Commit

Permalink
Add exactn cpp binding (#1014)
Browse files Browse the repository at this point in the history
**Context:**
Adding the backend **Exact Tensor Network** from `lightning.tensor` to
the Python layer

**Description of the Change:**
* Add pybind layer for the `ExaTNCuda` class
* Update the python layer unit tests.
* Python layer refactoring to allow runtime selection of MPS and ExaTN
* Python layer unit tests update (gates, analytical measurement)

**Benefits:**
1. Refactor MPSTNCuda class to TNCuda class
* Both MPS and Exact TensorNetwork backends will be handled by the
TNCuda class
* User can select either MPS or Exact TensorNetwork at runtime by
passing str (`mps` or `exatn`) to the constructor of theTNCuda class.

2. Measurement class
* `expval()` support can be get without changing current code base for
the MPS backend.

**Possible Drawbacks:**
* `qml.StatePrep()` won't be supported for 'exatn'

**Related GitHub Issues:**

[sc-77837][sc-77840]

---------

Co-authored-by: Shuli Shu <[email protected]>
Co-authored-by: ringo-but-quantum <[email protected]>
Co-authored-by: Shuli Shu <[email protected]>
Co-authored-by: Ali Asadi <[email protected]>
  • Loading branch information
5 people authored Dec 11, 2024
1 parent e6827b2 commit 182b9cd
Show file tree
Hide file tree
Showing 20 changed files with 1,635 additions and 509 deletions.
3 changes: 3 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@

### Improvements

* Add Exact Tensor Network cpp binding.
[(#1014)](https://github.com/PennyLaneAI/pennylane-lightning/pull/1014/)

* Catalyst device interfaces support dynamic shots, and no longer parses the device init op's attribute dictionary for a static shots literal.
[(#1017)](https://github.com/PennyLaneAI/pennylane-lightning/pull/1017)

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/tests_gpu_python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ jobs:
run: |
rm -rf build
PL_BACKEND=lightning_qubit python scripts/configure_pyproject_toml.py || true
PL_BACKEND=lightning_qubit SKIP_COMPILATION=True python -m pip install . -vv
PL_BACKEND=lightning_qubit python -m pip install . -vv
rm -rf build
PL_BACKEND=${{ matrix.pl_backend }} python scripts/configure_pyproject_toml.py || true
Expand Down
120 changes: 83 additions & 37 deletions pennylane_lightning/core/_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,18 @@ class QuantumScriptSerializer:
use_csingle (bool): whether to use np.complex64 instead of np.complex128
use_mpi (bool, optional): If using MPI to accelerate calculation. Defaults to False.
split_obs (Union[bool, int], optional): If splitting the observables in a list. Defaults to False.
tensor_backend (str): If using `lightning.tensor` and select the TensorNetwork backend, mps or exact. Default to ''
"""

# pylint: disable=import-outside-toplevel, too-many-instance-attributes, c-extension-no-member, too-many-branches, too-many-statements
# pylint: disable=import-outside-toplevel, too-many-instance-attributes, c-extension-no-member, too-many-branches, too-many-statements too-many-positional-arguments too-many-arguments
def __init__(
self, device_name, use_csingle: bool = False, use_mpi: bool = False, split_obs: bool = False
self,
device_name,
use_csingle: bool = False,
use_mpi: bool = False,
split_obs: bool = False,
tensor_backend: str = str(),
):
self.use_csingle = use_csingle
self.device_name = device_name
Expand Down Expand Up @@ -95,43 +101,14 @@ def __init__(
else:
raise DeviceError(f'The device name "{device_name}" is not a valid option.')

if device_name == "lightning.tensor":
self.tensornetwork_c64 = lightning_ops.TensorNetC64
self.tensornetwork_c128 = lightning_ops.TensorNetC128
else:
self.statevector_c64 = lightning_ops.StateVectorC64
self.statevector_c128 = lightning_ops.StateVectorC128

self.named_obs_c64 = lightning_ops.observables.NamedObsC64
self.named_obs_c128 = lightning_ops.observables.NamedObsC128
self.hermitian_obs_c64 = lightning_ops.observables.HermitianObsC64
self.hermitian_obs_c128 = lightning_ops.observables.HermitianObsC128
self.tensor_prod_obs_c64 = lightning_ops.observables.TensorProdObsC64
self.tensor_prod_obs_c128 = lightning_ops.observables.TensorProdObsC128
self.hamiltonian_c64 = lightning_ops.observables.HamiltonianC64
self.hamiltonian_c128 = lightning_ops.observables.HamiltonianC128

if device_name != "lightning.tensor":
self.sparse_hamiltonian_c64 = lightning_ops.observables.SparseHamiltonianC64
self.sparse_hamiltonian_c128 = lightning_ops.observables.SparseHamiltonianC128

self._use_mpi = use_mpi

if self._use_mpi:
self.statevector_mpi_c64 = lightning_ops.StateVectorMPIC64
self.statevector_mpi_c128 = lightning_ops.StateVectorMPIC128
self.named_obs_mpi_c64 = lightning_ops.observablesMPI.NamedObsMPIC64
self.named_obs_mpi_c128 = lightning_ops.observablesMPI.NamedObsMPIC128
self.hermitian_obs_mpi_c64 = lightning_ops.observablesMPI.HermitianObsMPIC64
self.hermitian_obs_mpi_c128 = lightning_ops.observablesMPI.HermitianObsMPIC128
self.tensor_prod_obs_mpi_c64 = lightning_ops.observablesMPI.TensorProdObsMPIC64
self.tensor_prod_obs_mpi_c128 = lightning_ops.observablesMPI.TensorProdObsMPIC128
self.hamiltonian_mpi_c64 = lightning_ops.observablesMPI.HamiltonianMPIC64
self.hamiltonian_mpi_c128 = lightning_ops.observablesMPI.HamiltonianMPIC128
self.sparse_hamiltonian_mpi_c64 = lightning_ops.observablesMPI.SparseHamiltonianMPIC64
self.sparse_hamiltonian_mpi_c128 = lightning_ops.observablesMPI.SparseHamiltonianMPIC128

self._mpi_manager = lightning_ops.MPIManager
if device_name in ["lightning.qubit", "lightning.kokkos", "lightning.gpu"]:
assert tensor_backend == str()
self._set_lightning_state_bindings(lightning_ops)
else:
self._tensor_backend = tensor_backend
self._set_lightning_tensor_bindings(tensor_backend, lightning_ops)

@property
def ctype(self):
Expand Down Expand Up @@ -193,6 +170,75 @@ def sparse_hamiltonian_obs(self):
)
return self.sparse_hamiltonian_c64 if self.use_csingle else self.sparse_hamiltonian_c128

def _set_lightning_state_bindings(self, lightning_ops):
"""Define the variables needed to access the modules from the C++ bindings for state vector."""

self.statevector_c64 = lightning_ops.StateVectorC64
self.statevector_c128 = lightning_ops.StateVectorC128

self.named_obs_c64 = lightning_ops.observables.NamedObsC64
self.named_obs_c128 = lightning_ops.observables.NamedObsC128
self.hermitian_obs_c64 = lightning_ops.observables.HermitianObsC64
self.hermitian_obs_c128 = lightning_ops.observables.HermitianObsC128
self.tensor_prod_obs_c64 = lightning_ops.observables.TensorProdObsC64
self.tensor_prod_obs_c128 = lightning_ops.observables.TensorProdObsC128
self.hamiltonian_c64 = lightning_ops.observables.HamiltonianC64
self.hamiltonian_c128 = lightning_ops.observables.HamiltonianC128

self.sparse_hamiltonian_c64 = lightning_ops.observables.SparseHamiltonianC64
self.sparse_hamiltonian_c128 = lightning_ops.observables.SparseHamiltonianC128

if self._use_mpi:
self.statevector_mpi_c64 = lightning_ops.StateVectorMPIC64
self.statevector_mpi_c128 = lightning_ops.StateVectorMPIC128

self.named_obs_mpi_c64 = lightning_ops.observablesMPI.NamedObsMPIC64
self.named_obs_mpi_c128 = lightning_ops.observablesMPI.NamedObsMPIC128
self.hermitian_obs_mpi_c64 = lightning_ops.observablesMPI.HermitianObsMPIC64
self.hermitian_obs_mpi_c128 = lightning_ops.observablesMPI.HermitianObsMPIC128
self.tensor_prod_obs_mpi_c64 = lightning_ops.observablesMPI.TensorProdObsMPIC64
self.tensor_prod_obs_mpi_c128 = lightning_ops.observablesMPI.TensorProdObsMPIC128
self.hamiltonian_mpi_c64 = lightning_ops.observablesMPI.HamiltonianMPIC64
self.hamiltonian_mpi_c128 = lightning_ops.observablesMPI.HamiltonianMPIC128

self.sparse_hamiltonian_mpi_c64 = lightning_ops.observablesMPI.SparseHamiltonianMPIC64
self.sparse_hamiltonian_mpi_c128 = lightning_ops.observablesMPI.SparseHamiltonianMPIC128

self._mpi_manager = lightning_ops.MPIManager

def _set_lightning_tensor_bindings(self, tensor_backend, lightning_ops):
"""Define the variables needed to access the modules from the C++ bindings for tensor network."""
if tensor_backend == "mps":
self.tensornetwork_c64 = lightning_ops.mpsTensorNetC64
self.tensornetwork_c128 = lightning_ops.mpsTensorNetC128

self.named_obs_c64 = lightning_ops.observables.mpsNamedObsC64
self.named_obs_c128 = lightning_ops.observables.mpsNamedObsC128
self.hermitian_obs_c64 = lightning_ops.observables.mpsHermitianObsC64
self.hermitian_obs_c128 = lightning_ops.observables.mpsHermitianObsC128
self.tensor_prod_obs_c64 = lightning_ops.observables.mpsTensorProdObsC64
self.tensor_prod_obs_c128 = lightning_ops.observables.mpsTensorProdObsC128
self.hamiltonian_c64 = lightning_ops.observables.mpsHamiltonianC64
self.hamiltonian_c128 = lightning_ops.observables.mpsHamiltonianC128

elif tensor_backend == "tn":
self.tensornetwork_c64 = lightning_ops.exactTensorNetC64
self.tensornetwork_c128 = lightning_ops.exactTensorNetC128

self.named_obs_c64 = lightning_ops.observables.exactNamedObsC64
self.named_obs_c128 = lightning_ops.observables.exactNamedObsC128
self.hermitian_obs_c64 = lightning_ops.observables.exactHermitianObsC64
self.hermitian_obs_c128 = lightning_ops.observables.exactHermitianObsC128
self.tensor_prod_obs_c64 = lightning_ops.observables.exactTensorProdObsC64
self.tensor_prod_obs_c128 = lightning_ops.observables.exactTensorProdObsC128
self.hamiltonian_c64 = lightning_ops.observables.exactHamiltonianC64
self.hamiltonian_c128 = lightning_ops.observables.exactHamiltonianC128

else:
raise ValueError(
f"Unsupported method: {tensor_backend}. Supported methods are 'mps' (Matrix Product State) and 'tn' (Exact Tensor Network)."
)

def _named_obs(self, observable, wires_map: dict = None):
"""Serializes a Named observable"""
wires = [wires_map[w] for w in observable.wires] if wires_map else observable.wires.tolist()
Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/core/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.40.0-dev33"
__version__ = "0.40.0-dev34"
2 changes: 1 addition & 1 deletion pennylane_lightning/core/src/bindings/Bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,6 @@ PYBIND11_MODULE(
// Register bindings for backend-specific info:
registerBackendSpecificInfo(m);

registerLightningTensorClassBindings<TensorNetBackends>(m);
registerLightningTensorClassBindings<TensorNetworkBackends>(m);
}
#endif
141 changes: 136 additions & 5 deletions pennylane_lightning/core/src/bindings/Bindings.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,7 @@ void registerLightningTensorBackendAgnosticMeasurements(PyClass &pyclass) {
"Variance of an observable object.")
.def("generate_samples", [](MeasurementsT &M,
const std::vector<std::size_t> &wires,
const std::size_t num_shots) {
std::size_t num_shots) {
constexpr auto sz = sizeof(std::size_t);
const std::size_t num_wires = wires.size();
const std::size_t ndim = 2;
Expand All @@ -769,23 +769,154 @@ void registerLightningTensorBackendAgnosticMeasurements(PyClass &pyclass) {
});
}

/**
* @brief Register observable classes for TensorNetwork.
*
* @tparam LightningBackendT
* @param m Pybind module
* @param name backend name of TN (mps, tn)
*/
template <class LightningBackendT>
void registerBackendAgnosticObservablesTensor(py::module_ &m,
const std::string &name) {
using PrecisionT =
typename LightningBackendT::PrecisionT; // LightningBackendT's's
// precision.
using ComplexT =
typename LightningBackendT::ComplexT; // LightningBackendT's
// complex type.
using ParamT = PrecisionT; // Parameter's data precision

const std::string bitsize =
std::to_string(sizeof(std::complex<PrecisionT>) * 8);

using np_arr_c = py::array_t<std::complex<ParamT>, py::array::c_style>;
using np_arr_r = py::array_t<ParamT, py::array::c_style>;

using ObservableT = ObservableTNCuda<LightningBackendT>;
using NamedObsT = NamedObsTNCuda<LightningBackendT>;
using HermitianObsT = HermitianObsTNCuda<LightningBackendT>;
using TensorProdObsT = TensorProdObsTNCuda<LightningBackendT>;
using HamiltonianT = HamiltonianTNCuda<LightningBackendT>;

std::string class_name;

class_name = std::string(name) + "ObservableC" + bitsize;
py::class_<ObservableT, std::shared_ptr<ObservableT>>(m, class_name.c_str(),
py::module_local());

class_name = std::string(name) + "NamedObsC" + bitsize;
py::class_<NamedObsT, std::shared_ptr<NamedObsT>, ObservableT>(
m, class_name.c_str(), py::module_local())
.def(py::init(
[](const std::string &name, const std::vector<std::size_t> &wires) {
return NamedObsT(name, wires);
}))
.def("__repr__", &NamedObsT::getObsName)
.def("get_wires", &NamedObsT::getWires, "Get wires of observables")
.def(
"__eq__",
[](const NamedObsT &self, py::handle other) -> bool {
if (!py::isinstance<NamedObsT>(other)) {
return false;
}
auto &&other_cast = other.cast<NamedObsT>();
return self == other_cast;
},
"Compare two observables");

class_name = std::string(name) + "HermitianObsC" + bitsize;
py::class_<HermitianObsT, std::shared_ptr<HermitianObsT>, ObservableT>(
m, class_name.c_str(), py::module_local())
.def(py::init([](const np_arr_c &matrix,
const std::vector<std::size_t> &wires) {
auto const &buffer = matrix.request();
const auto ptr = static_cast<ComplexT *>(buffer.ptr);
return HermitianObsT(std::vector<ComplexT>(ptr, ptr + buffer.size),
wires);
}))
.def("__repr__", &HermitianObsT::getObsName)
.def("get_wires", &HermitianObsT::getWires, "Get wires of observables")
.def("get_matrix", &HermitianObsT::getMatrix,
"Get matrix representation of Hermitian operator")
.def(
"__eq__",
[](const HermitianObsT &self, py::handle other) -> bool {
if (!py::isinstance<HermitianObsT>(other)) {
return false;
}
auto &&other_cast = other.cast<HermitianObsT>();
return self == other_cast;
},
"Compare two observables");

class_name = std::string(name) + "TensorProdObsC" + bitsize;
py::class_<TensorProdObsT, std::shared_ptr<TensorProdObsT>, ObservableT>(
m, class_name.c_str(), py::module_local())
.def(py::init([](const std::vector<std::shared_ptr<ObservableT>> &obs) {
return TensorProdObsT(obs);
}))
.def("__repr__", &TensorProdObsT::getObsName)
.def("get_wires", &TensorProdObsT::getWires, "Get wires of observables")
.def("get_ops", &TensorProdObsT::getObs, "Get operations list")
.def(
"__eq__",
[](const TensorProdObsT &self, py::handle other) -> bool {
if (!py::isinstance<TensorProdObsT>(other)) {
return false;
}
auto &&other_cast = other.cast<TensorProdObsT>();
return self == other_cast;
},
"Compare two observables");

class_name = std::string(name) + "HamiltonianC" + bitsize;
using ObsPtr = std::shared_ptr<ObservableT>;
py::class_<HamiltonianT, std::shared_ptr<HamiltonianT>, ObservableT>(
m, class_name.c_str(), py::module_local())
.def(py::init(
[](const np_arr_r &coeffs, const std::vector<ObsPtr> &obs) {
auto const &buffer = coeffs.request();
const auto ptr = static_cast<ParamT *>(buffer.ptr);
return HamiltonianT{std::vector<ParamT>(ptr, ptr + buffer.size),
obs};
}))
.def("__repr__", &HamiltonianT::getObsName)
.def("get_wires", &HamiltonianT::getWires, "Get wires of observables")
.def("get_ops", &HamiltonianT::getObs,
"Get operations contained by Hamiltonian")
.def("get_coeffs", &HamiltonianT::getCoeffs,
"Get Hamiltonian coefficients")
.def(
"__eq__",
[](const HamiltonianT &self, py::handle other) -> bool {
if (!py::isinstance<HamiltonianT>(other)) {
return false;
}
auto &&other_cast = other.cast<HamiltonianT>();
return self == other_cast;
},
"Compare two observables");
}

/**
* @brief Templated class to build lightning.tensor class bindings.
*
* @tparam TensorNetT Tensor network type
* @tparam TensorNetT Tensor network type.
* @param m Pybind11 module.
*/
template <class TensorNetT> void lightningTensorClassBindings(py::module_ &m) {
using PrecisionT =
typename TensorNetT::PrecisionT; // TensorNet's precision.
// Enable module name to be based on size of complex datatype
auto name = TensorNetT::method; // TensorNet's backend name [mps, exact].
const std::string bitsize =
std::to_string(sizeof(std::complex<PrecisionT>) * 8);

//***********************************************************************//
// TensorNet
//***********************************************************************//
std::string class_name = "TensorNetC" + bitsize;
std::string class_name = std::string(name) + "TensorNetC" + bitsize;
auto pyclass =
py::class_<TensorNetT>(m, class_name.c_str(), py::module_local());

Expand All @@ -797,12 +928,12 @@ template <class TensorNetT> void lightningTensorClassBindings(py::module_ &m) {
/* Observables submodule */
py::module_ obs_submodule =
m.def_submodule("observables", "Submodule for observables classes.");
registerBackendAgnosticObservables<TensorNetT>(obs_submodule);
registerBackendAgnosticObservablesTensor<TensorNetT>(obs_submodule, name);

//***********************************************************************//
// Measurements
//***********************************************************************//
class_name = "MeasurementsC" + bitsize;
class_name = std::string(name) + "MeasurementsC" + bitsize;
auto pyclass_measurements = py::class_<MeasurementsTNCuda<TensorNetT>>(
m, class_name.c_str(), py::module_local());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class ExactTNCuda final : public TNCuda<Precision, ExactTNCuda<Precision>> {
using BaseType = TNCuda<Precision, ExactTNCuda>;

public:
constexpr static auto method = "exacttn";
constexpr static auto method = "exact";

using CFP_t = decltype(cuUtil::getCudaType(Precision{}));
using ComplexT = std::complex<Precision>;
Expand Down
Loading

0 comments on commit 182b9cd

Please sign in to comment.