diff --git a/qiskit_machine_learning/kernels/algorithms/quantum_kernel_trainer.py b/qiskit_machine_learning/kernels/algorithms/quantum_kernel_trainer.py index 8dc172ba4..4d272b2ac 100644 --- a/qiskit_machine_learning/kernels/algorithms/quantum_kernel_trainer.py +++ b/qiskit_machine_learning/kernels/algorithms/quantum_kernel_trainer.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2021, 2023. +# (C) Copyright IBM 2021, 2024. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -13,7 +13,6 @@ """Quantum Kernel Trainer""" from __future__ import annotations -import copy from functools import partial from typing import Sequence @@ -96,7 +95,8 @@ def __init__( ): """ Args: - quantum_kernel: a trainable quantum kernel to be trained. + quantum_kernel: a trainable quantum kernel to be trained. The + :attr:`~.TrainableKernel.parameter_values` will be modified in place after the training. loss: A loss function available via string is "svc_loss" which is the same as :class:`~qiskit_machine_learning.utils.loss_functions.SVCLoss`. If a string is passed as the loss function, then the underlying @@ -179,7 +179,7 @@ def fit( ) -> QuantumKernelTrainerResult: """ Train the QuantumKernel by minimizing loss over the kernel parameters. The input - quantum kernel will not be altered, and an optimized quantum kernel will be returned. + quantum kernel will be altered. Args: data (numpy.ndarray): ``(N, D)`` array of training data, where ``N`` is the @@ -198,9 +198,6 @@ def fit( msg = "Quantum kernel cannot be fit because there are no user parameters specified." raise ValueError(msg) - # Bind inputs to objective function - output_kernel = copy.deepcopy(self._quantum_kernel) - # Randomly initialize the initial point if one was not passed if self._initial_point is None: self._initial_point = algorithm_globals.random.random(num_params) @@ -222,11 +219,13 @@ def fit( result.optimizer_evals = opt_results.nfev result.optimal_value = opt_results.fun result.optimal_point = opt_results.x - result.optimal_parameters = dict(zip(output_kernel.training_parameters, opt_results.x)) + result.optimal_parameters = dict( + zip(self.quantum_kernel.training_parameters, opt_results.x) + ) # Return the QuantumKernel in optimized state - output_kernel.assign_training_parameters(result.optimal_parameters) - result.quantum_kernel = output_kernel + self.quantum_kernel.assign_training_parameters(result.optimal_parameters) + result.quantum_kernel = self.quantum_kernel return result diff --git a/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py b/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py index 24a373b09..fc72b9635 100644 --- a/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py +++ b/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2022, 2023. +# (C) Copyright IBM 2022, 2024. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -46,6 +46,7 @@ def __init__( fidelity: BaseStateFidelity | None = None, enforce_psd: bool = True, evaluate_duplicates: str = "off_diagonal", + max_circuits_per_job: int = None, ) -> None: """ Args: @@ -73,6 +74,8 @@ def __init__( - ``none`` when training the diagonal is set to `1` and if two identical samples are found in the dataset the corresponding matrix element is set to `1`. When inferring, matrix elements for identical samples are set to `1`. + max_circuits_per_job: Maximum number of circuits per job for the backend. Please + check the backend specifications. Use ``None`` for all entries per job. Default ``None``. Raises: ValueError: When unsupported value is passed to `evaluate_duplicates`. """ @@ -84,10 +87,15 @@ def __init__( f"Unsupported value passed as evaluate_duplicates: {evaluate_duplicates}" ) self._evaluate_duplicates = eval_duplicates - if fidelity is None: fidelity = ComputeUncompute(sampler=Sampler()) self._fidelity = fidelity + if max_circuits_per_job is not None: + if max_circuits_per_job < 1: + raise ValueError( + f"Unsupported value passed as max_circuits_per_job: {max_circuits_per_job}" + ) + self.max_circuits_per_job = max_circuits_per_job def evaluate(self, x_vec: np.ndarray, y_vec: np.ndarray | None = None) -> np.ndarray: x_vec, y_vec = self._validate_input(x_vec, y_vec) @@ -214,17 +222,38 @@ def _get_kernel_entries( back from the async job. """ num_circuits = left_parameters.shape[0] + kernel_entries = [] + # Check if it is trivial case, only identical samples if num_circuits != 0: - job = self._fidelity.run( - [self._feature_map] * num_circuits, - [self._feature_map] * num_circuits, - left_parameters, - right_parameters, - ) - kernel_entries = job.result().fidelities - else: - # trivial case, only identical samples - kernel_entries = [] + if self.max_circuits_per_job is None: + job = self._fidelity.run( + [self._feature_map] * num_circuits, + [self._feature_map] * num_circuits, + left_parameters, + right_parameters, + ) + kernel_entries = job.result().fidelities + else: + # Determine the number of chunks needed + num_chunks = ( + num_circuits + self.max_circuits_per_job - 1 + ) // self.max_circuits_per_job + for i in range(num_chunks): + # Determine the range of indices for this chunk + start_idx = i * self.max_circuits_per_job + end_idx = min((i + 1) * self.max_circuits_per_job, num_circuits) + # Extract the parameters for this chunk + chunk_left_parameters = left_parameters[start_idx:end_idx] + chunk_right_parameters = right_parameters[start_idx:end_idx] + # Execute this chunk + job = self._fidelity.run( + [self._feature_map] * (end_idx - start_idx), + [self._feature_map] * (end_idx - start_idx), + chunk_left_parameters, + chunk_right_parameters, + ) + # Extend the kernel_entries list with the results from this chunk + kernel_entries.extend(job.result().fidelities) return kernel_entries def _is_trivial( diff --git a/releasenotes/notes/fix-701-max_circuits_per_job-and-600-deepcopy-dependency-e6eda2e5b986c1be.yaml b/releasenotes/notes/fix-701-max_circuits_per_job-and-600-deepcopy-dependency-e6eda2e5b986c1be.yaml new file mode 100644 index 000000000..a9b504fdf --- /dev/null +++ b/releasenotes/notes/fix-701-max_circuits_per_job-and-600-deepcopy-dependency-e6eda2e5b986c1be.yaml @@ -0,0 +1,14 @@ +--- +fixes: + - | + Added a `max_circuits_per_job` parameter to the :class:`.FidelityQuantumKernel` used + in the case that if more circuits are submitted than the job limit for the + backend, the circuits are split up and run through separate jobs. + - | + Removed :class:`.QuantumKernelTrainer` dependency on `copy.deepcopy` that was + throwing an error with real backends. Now, it modifies the :class:`.TrainableKernel` + in place. If you would like to use the initial kernel, please call + :meth:`~.TrainableKernel.assign_training_parameters` of the :class:`~.TrainableKernel` + using the :attr:`~.QuantumKernelTrainer.initial_point` attribute of + :class:`~.QuantumKernelTrainer`. + diff --git a/test/kernels/test_fidelity_qkernel.py b/test/kernels/test_fidelity_qkernel.py index ffe0f56a5..6a132f14e 100644 --- a/test/kernels/test_fidelity_qkernel.py +++ b/test/kernels/test_fidelity_qkernel.py @@ -106,10 +106,27 @@ def test_defaults(self): self.assertGreaterEqual(score, 0.5) + def test_max_circuits_per_job(self): + """Test max_circuits_per_job parameters.""" + kernel_all = FidelityQuantumKernel(feature_map=self.feature_map, max_circuits_per_job=None) + kernel_matrix_all = kernel_all.evaluate(x_vec=self.sample_train) + with self.subTest("Check when max_circuits_per_job > left_parameters"): + kernel_more = FidelityQuantumKernel( + feature_map=self.feature_map, max_circuits_per_job=20 + ) + kernel_matrix_more = kernel_more.evaluate(x_vec=self.sample_train) + np.testing.assert_equal(kernel_matrix_all, kernel_matrix_more) + with self.subTest("Check when max_circuits_per_job = 1"): + kernel_1 = FidelityQuantumKernel(feature_map=self.feature_map, max_circuits_per_job=1) + kernel_matrix_1 = kernel_1.evaluate(x_vec=self.sample_train) + np.testing.assert_equal(kernel_matrix_all, kernel_matrix_1) + def test_exceptions(self): """Test quantum kernel raises exceptions and warnings.""" with self.assertRaises(ValueError, msg="Unsupported value of 'evaluate_duplicates'."): _ = FidelityQuantumKernel(evaluate_duplicates="wrong") + with self.assertRaises(ValueError, msg="Unsupported value of 'max_circuits_per_job'."): + _ = FidelityQuantumKernel(max_circuits_per_job=-1) @idata( # params, fidelity, feature map, enforce_psd, duplicate