Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generalize seeding for measurements #1003

Merged
merged 44 commits into from
Dec 3, 2024
Merged
Show file tree
Hide file tree
Changes from 39 commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
b37faa2
Generalize seeding for measurements on LQ
rauletorresc Nov 22, 2024
11398e4
Auto update version from '0.40.0-dev13' to '0.40.0-dev14'
ringo-but-quantum Nov 22, 2024
45dedb1
Genaralize seeding in LK
rauletorresc Nov 22, 2024
09d4cf7
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 22, 2024
fe56e00
Auto update version from '0.40.0-dev14' to '0.40.0-dev15'
ringo-but-quantum Nov 22, 2024
e53d51e
Generalize seeding for LG
rauletorresc Nov 22, 2024
c8b7f4b
Reuse original measure info for new measure
rauletorresc Nov 25, 2024
dd848a3
Prepend 'this->' to gen property
rauletorresc Nov 25, 2024
be61b6c
Add optional argument to 'generate_samples' in LK and LG
rauletorresc Nov 25, 2024
373ff8a
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 26, 2024
1809ba1
Use a seed instead of PRNG in the derived measure class of LQ
rauletorresc Nov 26, 2024
4edd498
Use seed instead of PRNG for LK and LG
rauletorresc Nov 26, 2024
56878a7
Move seeding logic to the base class
rauletorresc Nov 27, 2024
9432fb1
Remove unnecessary get method
rauletorresc Nov 27, 2024
f718fb6
Remove unnecessary header
rauletorresc Nov 27, 2024
d00f168
Change var to a const reference
rauletorresc Nov 27, 2024
24c4a3b
Restore hard-coded seed in test
rauletorresc Nov 27, 2024
2c49df3
Remove const type from member var
rauletorresc Nov 27, 2024
d56da2f
Remove unnecessary header
rauletorresc Nov 27, 2024
541e9cb
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 27, 2024
858e960
Fix method naming
rauletorresc Nov 27, 2024
3e9db45
Remove unnecessary set methods
rauletorresc Nov 27, 2024
6fd513e
Add plugins tests for probs
rauletorresc Nov 28, 2024
77501e9
Auto update version from '0.40.0-dev20' to '0.40.0-dev21'
ringo-but-quantum Nov 28, 2024
86eddf5
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 28, 2024
4f51f28
Remove unnecessary header
rauletorresc Nov 28, 2024
a448c57
Remove unnecessary capture in lambda
rauletorresc Nov 28, 2024
844bf92
Add setDeviceShots to probs tests
rauletorresc Nov 28, 2024
cd2166f
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 28, 2024
ea9bb12
Auto update version from '0.40.0-dev21' to '0.40.0-dev22'
ringo-but-quantum Nov 28, 2024
f419d37
Capture shots in lambda
rauletorresc Nov 28, 2024
3d1f005
Apply code review suggestions
rauletorresc Nov 28, 2024
753dd05
Apply more code review suggestions
rauletorresc Nov 28, 2024
8684357
Auto update version from '0.40.0-dev22' to '0.40.0-dev23'
ringo-but-quantum Nov 28, 2024
59f63af
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 28, 2024
0c7cba3
Auto update version from '0.40.0-dev23' to '0.40.0-dev24'
ringo-but-quantum Nov 28, 2024
604982d
Set seed when generating samples
rauletorresc Nov 29, 2024
d1cb4f2
Add test for Var in LQ, LK and LGPU
rauletorresc Nov 29, 2024
cbb44dd
Add tests for Expval in LQ, LK and LG
rauletorresc Nov 29, 2024
cdb8895
Update changelog
rauletorresc Nov 29, 2024
6c8f11e
Merge branch 'master' into raultorres/generalize_measures_seeding
rauletorresc Nov 28, 2024
95b7f51
Add constexpr qualifier
rauletorresc Dec 3, 2024
56d28fc
Add constexpr
rauletorresc Dec 3, 2024
b46a8de
Remove shots from capture
rauletorresc Dec 3, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@
* Update the `lightning.kokkos` CUDA backend for compatibility with Catalyst.
[(#942)](https://github.com/PennyLaneAI/pennylane-lightning/pull/942)

* Generalize seeding mechanism for all measurements.
[(#1003)](https://github.com/PennyLaneAI/pennylane-lightning/pull/1003)

### Documentation

* Update Lightning-Tensor installation docs and usage suggestions.
Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/core/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.40.0-dev22"
__version__ = "0.40.0-dev23"
21 changes: 18 additions & 3 deletions pennylane_lightning/core/src/measurements/MeasurementsBase.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
#pragma once

#include <optional>
#include <random>
#include <string>
#include <vector>
Expand Down Expand Up @@ -56,7 +57,9 @@ template <class StateVectorT, class Derived> class MeasurementsBase {
#else
const StateVectorT &_statevector;
#endif
std::mt19937 rng;
std::optional<std::size_t> _deviceseed{std::nullopt};
paul0403 marked this conversation as resolved.
Show resolved Hide resolved

std::mt19937 _rng;

public:
#ifdef _ENABLE_PLGPU
Expand All @@ -72,15 +75,22 @@ template <class StateVectorT, class Derived> class MeasurementsBase {
*
* @param seed Seed
*/
void setSeed(const std::size_t seed) { rng.seed(seed); }
void setSeed(const std::optional<std::size_t> &seed = std::nullopt) {
if (seed.has_value()) {
_rng.seed(seed.value());
this->_deviceseed = seed;
} else {
setRandomSeed();
}
}

/**
* @brief Randomly set the seed of the internal random generator
*
*/
void setRandomSeed() {
std::random_device rd;
setSeed(rd());
_rng.seed(rd());
}

/**
Expand Down Expand Up @@ -288,6 +298,7 @@ template <class StateVectorT, class Derived> class MeasurementsBase {
sv.updateData(data_storage.data(), data_storage.size());
obs.applyInPlaceShots(sv, eigenvalues, obs_wires);
Derived measure(sv);
measure.setSeed(this->_deviceseed);
paul0403 marked this conversation as resolved.
Show resolved Hide resolved
if (num_shots > std::size_t{0}) {
return measure.probs(obs_wires, num_shots);
}
Expand All @@ -296,6 +307,7 @@ template <class StateVectorT, class Derived> class MeasurementsBase {
StateVectorT sv(_statevector);
obs.applyInPlaceShots(sv, eigenvalues, obs_wires);
Derived measure(sv);
measure.setSeed(this->_deviceseed);
if (num_shots > std::size_t{0}) {
return measure.probs(obs_wires, num_shots);
}
Expand Down Expand Up @@ -388,6 +400,7 @@ template <class StateVectorT, class Derived> class MeasurementsBase {
*/
auto sample(const std::size_t &num_shots) -> std::vector<std::size_t> {
Derived measure(_statevector);
measure.setSeed(this->_deviceseed);
return measure.generate_samples(num_shots);
}

Expand Down Expand Up @@ -479,11 +492,13 @@ template <class StateVectorT, class Derived> class MeasurementsBase {
StateVectorT sv(data_storage.data(), data_storage.size());
obs.applyInPlaceShots(sv, eigenValues, obs_wires);
Derived measure(sv);
measure.setSeed(this->_deviceseed);
samples = measure.generate_samples(num_shots);
} else {
StateVectorT sv(_statevector);
obs.applyInPlaceShots(sv, eigenValues, obs_wires);
Derived measure(sv);
measure.setSeed(this->_deviceseed);
samples = measure.generate_samples(num_shots);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1257,6 +1257,7 @@ void testSamples(const std::optional<std::size_t> &seed = std::nullopt) {
// This object attaches to the statevector allowing several
// measurements.
Measurements<StateVectorT> Measurer(statevector);
Measurer.setSeed(seed);

std::vector<PrecisionT> expected_probabilities = {
0.67078706, 0.03062806, 0.0870997, 0.00397696,
Expand All @@ -1265,10 +1266,7 @@ void testSamples(const std::optional<std::size_t> &seed = std::nullopt) {
std::size_t num_qubits = 3;
std::size_t N = std::pow(2, num_qubits);
std::size_t num_samples = 100000;
auto &&samples =
seed.has_value()
? Measurer.generate_samples(num_samples, seed.value())
: Measurer.generate_samples(num_samples);
auto &&samples = Measurer.generate_samples(num_samples);

std::vector<std::size_t> counts(N, 0);
std::vector<std::size_t> samples_decimal(num_samples, 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,8 @@ auto LightningGPUSimulator::Expval(ObsIdType obsKey) -> double {
Pennylane::LightningGPU::Measures::Measurements<StateVectorT> m{
*(this->device_sv)};

m.setSeed(this->generateSeed());

return device_shots ? m.expval(*obs, device_shots, {}) : m.expval(*obs);
}

Expand All @@ -273,6 +275,8 @@ auto LightningGPUSimulator::Var(ObsIdType obsKey) -> double {
Pennylane::LightningGPU::Measures::Measurements<StateVectorT> m{
*(this->device_sv)};

m.setSeed(this->generateSeed());

return device_shots ? m.var(*obs, device_shots) : m.var(*obs);
}

Expand All @@ -294,6 +298,9 @@ void LightningGPUSimulator::State(DataView<std::complex<double>, 1> &state) {
void LightningGPUSimulator::Probs(DataView<double, 1> &probs) {
Pennylane::LightningGPU::Measures::Measurements<StateVectorT> m{
*(this->device_sv)};

m.setSeed(this->generateSeed());

auto &&dv_probs = device_shots ? m.probs(device_shots) : m.probs();

RT_FAIL_IF(probs.size() != dv_probs.size(),
Expand All @@ -313,6 +320,9 @@ void LightningGPUSimulator::PartialProbs(
auto dev_wires = getDeviceWires(wires);
Pennylane::LightningGPU::Measures::Measurements<StateVectorT> m{
*(this->device_sv)};

m.setSeed(this->generateSeed());

auto &&dv_probs =
device_shots ? m.probs(dev_wires, device_shots) : m.probs(dev_wires);

Expand All @@ -327,9 +337,8 @@ std::vector<size_t> LightningGPUSimulator::GenerateSamples(size_t shots) {
Pennylane::LightningGPU::Measures::Measurements<StateVectorT> m{
*(this->device_sv)};

if (this->gen) {
return m.generate_samples(shots, (*(this->gen))());
}
m.setSeed(this->generateSeed());

return m.generate_samples(shots);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,14 @@ class LightningGPUSimulator final : public Catalyst::Runtime::QuantumDevice {
return res;
}

inline auto generateSeed() -> std::optional<std::size_t> {
if (this->gen != nullptr) {
return (*(this->gen))();
}

return std::nullopt;
}

auto GenerateSamples(size_t shots) -> std::vector<size_t>;

public:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1823,3 +1823,137 @@ TEST_CASE("Sample with a seeded device", "[Measures]") {
}
}
}

TEST_CASE("Probs with a seeded device", "[Measures]") {
std::size_t shots = 1000;
rauletorresc marked this conversation as resolved.
Show resolved Hide resolved
std::array<std::unique_ptr<LGPUSimulator>, 2> sims;
std::vector<std::vector<double>> probs(2, std::vector<double>(16));

std::vector<DataView<double, 1>> views{DataView<double, 1>(probs[0]),
DataView<double, 1>(probs[1])};

std::vector<std::mt19937> gens{std::mt19937{37}, std::mt19937{37}};

auto circuit = [shots](LGPUSimulator &sim, DataView<double, 1> &view,
std::mt19937 &gen) {
sim.SetDevicePRNG(&gen);
sim.SetDeviceShots(shots);
// state-vector with #qubits = n
constexpr std::size_t n = 4;
std::vector<intptr_t> Qs;
Qs.reserve(n);
for (std::size_t i = 0; i < n; i++) {
Qs.push_back(sim.AllocateQubit());
}
sim.NamedOperation("Hadamard", {}, {Qs[0]}, false);
sim.NamedOperation("PauliY", {}, {Qs[1]}, false);
sim.NamedOperation("Hadamard", {}, {Qs[2]}, false);
sim.NamedOperation("PauliZ", {}, {Qs[3]}, false);
sim.Probs(view);
};

for (std::size_t trial = 0; trial < 5; trial++) {
sims[0] = std::make_unique<LGPUSimulator>();
sims[1] = std::make_unique<LGPUSimulator>();

for (std::size_t sim_idx = 0; sim_idx < sims.size(); sim_idx++) {
circuit(*(sims[sim_idx]), views[sim_idx], gens[sim_idx]);
}

for (std::size_t i = 0; i < probs[0].size(); i++) {
CHECK((probs[0][i] == probs[1][i]));
}
}
}

TEST_CASE("Var with a seeded device", "[Measures]") {
std::size_t shots = 1000;
rauletorresc marked this conversation as resolved.
Show resolved Hide resolved
std::array<std::unique_ptr<LGPUSimulator>, 2> sims;
std::array<std::vector<double>, 2> vars;

std::vector<std::mt19937> gens{std::mt19937{37}, std::mt19937{37}};

auto circuit = [shots](LGPUSimulator &sim, std::vector<double> &var,
std::mt19937 &gen) {
sim.SetDevicePRNG(&gen);
sim.SetDeviceShots(shots);
// state-vector with #qubits = n
constexpr std::size_t n = 4;
std::vector<intptr_t> Qs;
Qs.reserve(n);
for (std::size_t i = 0; i < n; i++) {
Qs.push_back(sim.AllocateQubit());
}
sim.NamedOperation("Hadamard", {}, {Qs[0]}, false);
sim.NamedOperation("PauliY", {}, {Qs[1]}, false);
sim.NamedOperation("Hadamard", {}, {Qs[2]}, false);
sim.NamedOperation("PauliZ", {}, {Qs[3]}, false);

ObsIdType px = sim.Observable(ObsId::PauliX, {}, {Qs[2]});
ObsIdType py = sim.Observable(ObsId::PauliY, {}, {Qs[0]});
ObsIdType pz = sim.Observable(ObsId::PauliZ, {}, {Qs[3]});

var.push_back(sim.Var(px));
var.push_back(sim.Var(py));
var.push_back(sim.Var(pz));
};

for (std::size_t trial = 0; trial < 5; trial++) {
sims[0] = std::make_unique<LGPUSimulator>();
sims[1] = std::make_unique<LGPUSimulator>();

for (std::size_t sim_idx = 0; sim_idx < sims.size(); sim_idx++) {
circuit(*(sims[sim_idx]), vars[sim_idx], gens[sim_idx]);
}

for (std::size_t i = 0; i < vars[0].size(); i++) {
CHECK((vars[0][i] == vars[1][i]));
}
}
}

TEST_CASE("Expval with a seeded device", "[Measures]") {
std::size_t shots = 1000;
rauletorresc marked this conversation as resolved.
Show resolved Hide resolved
std::array<std::unique_ptr<LGPUSimulator>, 2> sims;
std::array<std::vector<double>, 2> expvals;

std::vector<std::mt19937> gens{std::mt19937{37}, std::mt19937{37}};

auto circuit = [shots](LGPUSimulator &sim, std::vector<double> &expval,
std::mt19937 &gen) {
sim.SetDevicePRNG(&gen);
sim.SetDeviceShots(shots);
// state-vector with #qubits = n
constexpr std::size_t n = 4;
std::vector<intptr_t> Qs;
Qs.reserve(n);
for (std::size_t i = 0; i < n; i++) {
Qs.push_back(sim.AllocateQubit());
}
sim.NamedOperation("Hadamard", {}, {Qs[0]}, false);
sim.NamedOperation("PauliY", {}, {Qs[1]}, false);
sim.NamedOperation("Hadamard", {}, {Qs[2]}, false);
sim.NamedOperation("PauliZ", {}, {Qs[3]}, false);

ObsIdType px = sim.Observable(ObsId::PauliX, {}, {Qs[2]});
ObsIdType py = sim.Observable(ObsId::PauliY, {}, {Qs[0]});
ObsIdType pz = sim.Observable(ObsId::PauliZ, {}, {Qs[3]});

expval.push_back(sim.Expval(px));
expval.push_back(sim.Expval(py));
expval.push_back(sim.Expval(pz));
};

for (std::size_t trial = 0; trial < 5; trial++) {
sims[0] = std::make_unique<LGPUSimulator>();
sims[1] = std::make_unique<LGPUSimulator>();

for (std::size_t sim_idx = 0; sim_idx < sims.size(); sim_idx++) {
circuit(*(sims[sim_idx]), expvals[sim_idx], gens[sim_idx]);
}

for (std::size_t i = 0; i < expvals[0].size(); i++) {
CHECK((expvals[0][i] == expvals[1][i]));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
#include <cuda.h>
#include <cusparse.h>
#include <custatevec.h> // custatevecApplyMatrix
#include <optional>
#include <random>
#include <type_traits>
#include <unordered_map>
Expand Down Expand Up @@ -215,9 +214,7 @@ class Measurements final
* be accessed using the stride sample_id*num_qubits, where sample_id is a
* number between 0 and num_samples-1.
*/
auto generate_samples(std::size_t num_samples,
const std::optional<std::size_t> &seed = std::nullopt)
-> std::vector<std::size_t> {
auto generate_samples(std::size_t num_samples) -> std::vector<std::size_t> {
std::vector<double> rand_nums(num_samples);
custatevecSamplerDescriptor_t sampler;

Expand All @@ -236,15 +233,11 @@ class Measurements final
} else {
data_type = CUDA_C_32F;
}
this->setSeed(this->_deviceseed);

if (seed.has_value()) {
this->setSeed(seed.value());
} else {
this->setRandomSeed();
}
std::uniform_real_distribution<PrecisionT> dis(0.0, 1.0);
for (std::size_t n = 0; n < num_samples; n++) {
rand_nums[n] = dis(this->rng);
rand_nums[n] = dis(this->_rng);
}
std::vector<std::size_t> samples(num_samples * num_qubits, 0);
std::unordered_map<std::size_t, std::size_t> cache;
Expand Down
Loading
Loading