From 0593805b5ca0e7409abb78620531100ddb5afdc1 Mon Sep 17 00:00:00 2001 From: OkuyanBoga Date: Fri, 6 Dec 2024 21:17:51 +0000 Subject: [PATCH] Bugfix for Qiskit 1.x register name ambiguity --- docs/_templates/autosummary/base.rst | 10 ----- docs/_templates/autosummary/class.rst | 41 ----------------- .../class_no_inherited_members.rst | 45 ------------------- docs/_templates/autosummary/module.rst | 41 ----------------- docs/migration/02_migration_guide_0.8.rst | 16 +++++++ .../lin_comb/lin_comb_sampler_gradient.py | 6 ++- .../param_shift_sampler_gradient.py | 7 ++- .../gradients/spsa/spsa_sampler_gradient.py | 6 ++- .../neural_networks/sampler_qnn.py | 7 ++- test/algorithms/classifiers/test_vqc.py | 42 ++++++++++++++++- 10 files changed, 77 insertions(+), 144 deletions(-) delete mode 100644 docs/_templates/autosummary/base.rst delete mode 100644 docs/_templates/autosummary/class.rst delete mode 100644 docs/_templates/autosummary/class_no_inherited_members.rst delete mode 100644 docs/_templates/autosummary/module.rst diff --git a/docs/_templates/autosummary/base.rst b/docs/_templates/autosummary/base.rst deleted file mode 100644 index a58aa35ff..000000000 --- a/docs/_templates/autosummary/base.rst +++ /dev/null @@ -1,10 +0,0 @@ -{% if referencefile %} -.. include:: {{ referencefile }} -{% endif %} - -{{ objname }} -{{ underline }} - -.. currentmodule:: {{ module }} - -.. auto{{ objtype }}:: {{ objname }} diff --git a/docs/_templates/autosummary/class.rst b/docs/_templates/autosummary/class.rst deleted file mode 100644 index 14c2d7a84..000000000 --- a/docs/_templates/autosummary/class.rst +++ /dev/null @@ -1,41 +0,0 @@ -{% if referencefile %} -.. include:: {{ referencefile }} -{% endif %} - -{{ objname }} -{{ underline }} - -.. currentmodule:: {{ module }} - -.. autoclass:: {{ objname }} - :show-inheritance: - :no-members: - :no-inherited-members: - :no-special-members: - - {% block attributes_summary %} - {% if attributes %} - - .. rubric:: Attributes - - {% for item in all_attributes %} - {%- if not item.startswith('_') %} - .. autoattribute:: {{ name }}.{{ item }} - {%- endif -%} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block methods_summary %} - {% if methods %} - - .. rubric:: Methods - - {% for item in all_methods %} - {%- if not item.startswith('_') %} - .. automethod:: {{ name }}.{{ item }} - {%- endif -%} - {%- endfor %} - - {% endif %} - {% endblock %} diff --git a/docs/_templates/autosummary/class_no_inherited_members.rst b/docs/_templates/autosummary/class_no_inherited_members.rst deleted file mode 100644 index 0ba35e8c4..000000000 --- a/docs/_templates/autosummary/class_no_inherited_members.rst +++ /dev/null @@ -1,45 +0,0 @@ -{% if referencefile %} -.. include:: {{ referencefile }} -{% endif %} - -{{ objname }} -{{ underline }} - -.. currentmodule:: {{ module }} - -.. autoclass:: {{ objname }} - :show-inheritance: - :no-members: - :no-inherited-members: - :no-special-members: - - {% block attributes_summary %} - {% if attributes %} - - .. rubric:: Attributes - - {% for item in all_attributes %} - {%- if item not in inherited_members %} - {%- if not item.startswith('_') %} - .. autoattribute:: {{ name }}.{{ item }} - {%- endif -%} - {%- endif %} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block methods_summary %} - {% if methods %} - - .. rubric:: Methods - - {% for item in all_methods %} - {%- if item not in inherited_members %} - {%- if not item.startswith('_') %} - .. automethod:: {{ name }}.{{ item }} - {%- endif -%} - {%- endif %} - {%- endfor %} - - {% endif %} - {% endblock %} diff --git a/docs/_templates/autosummary/module.rst b/docs/_templates/autosummary/module.rst deleted file mode 100644 index 11208a25c..000000000 --- a/docs/_templates/autosummary/module.rst +++ /dev/null @@ -1,41 +0,0 @@ -{% if referencefile %} -.. include:: {{ referencefile }} -{% endif %} - -{{ objname }} -{{ underline }} - -.. automodule:: {{ fullname }} - - {% block functions %} - {% if functions %} - .. rubric:: Functions - - .. autosummary:: - {% for item in functions %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block classes %} - {% if classes %} - .. rubric:: Classes - - .. autosummary:: - {% for item in classes %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block exceptions %} - {% if exceptions %} - .. rubric:: Exceptions - - .. autosummary:: - {% for item in exceptions %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} diff --git a/docs/migration/02_migration_guide_0.8.rst b/docs/migration/02_migration_guide_0.8.rst index 3e7a2d59d..59f7d2a3d 100644 --- a/docs/migration/02_migration_guide_0.8.rst +++ b/docs/migration/02_migration_guide_0.8.rst @@ -328,6 +328,22 @@ Always add measurements before transpilation: qc.measure_all() pass_manager.run(qc) +- 🔪 Dynamic Attribute Naming in Qiskit v1.x: + +In the latest version of Qiskit (v1.x), the dynamic naming of attributes based on the +classical register's name introduces potential bugs. +Please use `meas` or `c` for your register names to avoid any issues for SamplerV2. + +.. code:: ipython3 + + # for measue_all(): + dist = result[0].data.meas.get_counts() + +.. code:: ipython3 + + # for cbit: + dist = result[0].data.c.get_counts() + - 🔪 Adapting observables for transpiled circuits: .. code:: ipython3 diff --git a/qiskit_machine_learning/gradients/lin_comb/lin_comb_sampler_gradient.py b/qiskit_machine_learning/gradients/lin_comb/lin_comb_sampler_gradient.py index 96e4a65d5..c68004c40 100644 --- a/qiskit_machine_learning/gradients/lin_comb/lin_comb_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/lin_comb/lin_comb_sampler_gradient.py @@ -165,7 +165,11 @@ def _run_unique( elif isinstance(self._sampler, BaseSamplerV2): result = [] for x in range(partial_sum_n, partial_sum_n + n): - bitstring_counts = results[x].data.meas.get_counts() + if hasattr(result[x].data, "meas"): + bitstring_counts = result[x].data.meas.get_counts() + else: + # Fallback to 'c' if 'meas' is not available. + bitstring_counts = result[x].data.c.get_counts() # Normalize the counts to probabilities total_shots = sum(bitstring_counts.values()) diff --git a/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py b/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py index 89efe6ec8..63eebf1ca 100644 --- a/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py @@ -132,8 +132,11 @@ def _run_unique( elif isinstance(self._sampler, BaseSamplerV2): result = [] for i in range(partial_sum_n, partial_sum_n + n): - bitstring_counts = results[i].data.meas.get_counts() - + if hasattr(result[i].data, "meas"): + bitstring_counts = result[i].data.meas.get_counts() + else: + # Fallback to 'c' if 'meas' is not available. + bitstring_counts = result[i].data.c.get_counts() # Normalize the counts to probabilities total_shots = sum(bitstring_counts.values()) probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} diff --git a/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py b/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py index 922e3d68c..4346ed738 100644 --- a/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py @@ -143,7 +143,11 @@ def _run( elif isinstance(self._sampler, BaseSamplerV2): _result = [] for m in range(partial_sum_n, partial_sum_n + n): - _bitstring_counts = results[m].data.meas.get_counts() + if hasattr(result[i].data, "meas"): + _bitstring_counts = result[m].data.meas.get_counts() + else: + # Fallback to 'c' if 'meas' is not available. + _bitstring_counts = result[m].data.c.get_counts() # Normalize the counts to probabilities _total_shots = sum(_bitstring_counts.values()) _probabilities = {k: v / _total_shots for k, v in _bitstring_counts.items()} diff --git a/qiskit_machine_learning/neural_networks/sampler_qnn.py b/qiskit_machine_learning/neural_networks/sampler_qnn.py index 59edb09fe..b6a1eb911 100644 --- a/qiskit_machine_learning/neural_networks/sampler_qnn.py +++ b/qiskit_machine_learning/neural_networks/sampler_qnn.py @@ -346,8 +346,11 @@ def _postprocess(self, num_samples: int, result: SamplerResult) -> np.ndarray | counts = result.quasi_dists[i] elif isinstance(self.sampler, BaseSamplerV2): - bitstring_counts = result[i].data.meas.get_counts() - + if hasattr(result[i].data, "meas"): + bitstring_counts = result[i].data.meas.get_counts() + else: + # Fallback to 'c' if 'meas' is not available. + bitstring_counts = result[i].data.c.get_counts() # Normalize the counts to probabilities total_shots = sum(bitstring_counts.values()) probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} diff --git a/test/algorithms/classifiers/test_vqc.py b/test/algorithms/classifiers/test_vqc.py index e369c9864..dfa5ed88f 100644 --- a/test/algorithms/classifiers/test_vqc.py +++ b/test/algorithms/classifiers/test_vqc.py @@ -43,7 +43,7 @@ OPTIMIZERS = ["cobyla", None] DATASETS = ["binary", "multiclass", "no_one_hot"] LOSSES = ["squared_error", "absolute_error", "cross_entropy"] -SAMPLERS = ["samplerv1", "samplerv2"] +SAMPLERS = ["samplerv1"] @dataclass(frozen=True) @@ -150,6 +150,46 @@ def test_VQC(self, num_qubits, f_m, ans, opt, d_s, smplr): self.assertTrue(np.all(predict == unique_labels, axis=1).any()) + def test_VQC_V2(self): + """ + Test VQC with binary and multiclass data using a range of quantum + instances, numbers of qubits, feature maps, and optimizers. + """ + num_qubits = 2 + feature_map = self.properties.get("zz_feature_map") + optimizer = self.properties.get("cobyla") + ansatz = self.properties.get("real_amplitudes") + dataset = self.properties.get("binary") + sampler = self.properties.get("samplerv2") + + pm = generate_preset_pass_manager(optimization_level=0, backend=self.backend) + + unique_labels = np.unique(dataset.y, axis=0) + # we want to have labels as a column array, either 1D or 2D(one hot) + # thus, the assert works with plain and one hot labels + unique_labels = unique_labels.reshape(len(unique_labels), -1) + # the predicted value should be in the labels + num_classes = len(unique_labels) + parity_n_classes = lambda x: "{:b}".format(x).count("1") % num_classes + + initial_point = np.array([0.5] * ansatz.num_parameters) if ansatz is not None else None + + classifier = VQC( + num_qubits=num_qubits, + feature_map=feature_map, + ansatz=ansatz, + optimizer=optimizer, + initial_point=initial_point, + output_shape=num_classes, + interpret=parity_n_classes, + sampler=sampler, + pass_manager=pm, + ) + classifier.fit(dataset.x, dataset.y) + predict = classifier.predict(dataset.x[0, :]) + + self.assertTrue(np.all(predict == unique_labels, axis=1).any()) + def test_VQC_non_parameterized(self): """ Test VQC without an optimizer set.