diff --git a/.pylintdict b/.pylintdict index cf18023a..708c67c5 100644 --- a/.pylintdict +++ b/.pylintdict @@ -31,6 +31,7 @@ bitstrings bloch boltzmann bool +boolean boyer brassard broyden @@ -238,6 +239,8 @@ optimizer's optimizers otimes o'brien +parallelization +parallelized param parameterizations parametrization @@ -255,6 +258,7 @@ postprocess powell pre preconditioner +prepend preprint preprocess preprocesses @@ -297,6 +301,7 @@ rightarrow robert rosen runarsson +runtime rz sanjiv sashank @@ -310,6 +315,7 @@ scikit scipy sdg seealso +serializable shanno skquant sle diff --git a/qiskit_algorithms/optimizers/aqgd.py b/qiskit_algorithms/optimizers/aqgd.py index cf5695c8..0592a19b 100644 --- a/qiskit_algorithms/optimizers/aqgd.py +++ b/qiskit_algorithms/optimizers/aqgd.py @@ -57,6 +57,7 @@ def __init__( momentum: float | list[float] = 0.25, param_tol: float = 1e-6, averaging: int = 10, + max_evals_grouped: int = 1, ) -> None: """ Performs Analytical Quantum Gradient Descent (AQGD) with Epochs. @@ -73,6 +74,7 @@ def __init__( param_tol: Tolerance for change in norm of parameters. averaging: Length of window over which to average objective values for objective convergence criterion + max_evals_grouped: Max number of default gradient evaluations performed simultaneously. Raises: AlgorithmError: If the length of ``maxiter``, `momentum``, and ``eta`` is not the same. @@ -98,6 +100,7 @@ def __init__( self._param_tol = param_tol self._tol = tol self._averaging = averaging + self.set_max_evals_grouped(max_evals_grouped) # state self._avg_objval: float | None = None @@ -156,7 +159,15 @@ def _compute_objective_fn_and_gradient( ) # Evaluate, # reshaping to flatten, as expected by objective function - values = np.array(obj(param_sets_to_eval.reshape(-1))) + if self._max_evals_grouped > 1: + batches = [ + param_sets_to_eval[i : i + self._max_evals_grouped] + for i in range(0, len(param_sets_to_eval), self._max_evals_grouped) + ] + values = np.array(np.concatenate([obj(b) for b in batches])) + else: + batches = param_sets_to_eval + values = np.array([obj(b) for b in batches]) # Update number of objective function evaluations self._eval_count += 2 * num_params + 1 @@ -312,7 +323,6 @@ def minimize( iter_count = 0 logger.info("Initial Params: %s", params) - epoch = 0 converged = False for (eta, mom_coeff) in zip(self._eta, self._momenta_coeff): @@ -327,7 +337,6 @@ def minimize( converged = self._converged_parameter(params, self._param_tol) if converged: break - # Calculate objective function and estimate of analytical gradient if jac is None: objval, gradient = self._compute_objective_fn_and_gradient(params, fun) diff --git a/releasenotes/notes/fix_aqgd_max_grouped_evals-fbe108c005a9b7ac.yaml b/releasenotes/notes/fix_aqgd_max_grouped_evals-fbe108c005a9b7ac.yaml new file mode 100644 index 00000000..9a0bcc92 --- /dev/null +++ b/releasenotes/notes/fix_aqgd_max_grouped_evals-fbe108c005a9b7ac.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixed the AQGD optimizer grouping objective function calls by default so that a single point is now passed to the + objective function. For algorithms that can handle more than one gradient evaluations in their objective function, + such as a VQE in the algorithms here, the number of grouped evaluations can be controlled via the max_grouped_evals + parameter. Grouped evaluations allows a list of points to be handed over so that they can potentially be assessed + more efficiently in a single job. + diff --git a/test/optimizers/test_optimizer_aqgd.py b/test/optimizers/test_optimizer_aqgd.py index c2136a95..df500648 100644 --- a/test/optimizers/test_optimizer_aqgd.py +++ b/test/optimizers/test_optimizer_aqgd.py @@ -14,6 +14,8 @@ import unittest from test import QiskitAlgorithmsTestCase +import numpy as np +from ddt import ddt, data from qiskit.circuit.library import RealAmplitudes from qiskit.primitives import Estimator from qiskit.quantum_info import SparsePauliOp @@ -26,6 +28,7 @@ from qiskit_algorithms.utils import algorithm_globals +@ddt class TestOptimizerAQGD(QiskitAlgorithmsTestCase): """Test AQGD optimizer using RY for analytic gradient with VQE""" @@ -93,6 +96,44 @@ def test_int_values(self): self.assertAlmostEqual(result.eigenvalue.real, -1.857, places=3) + @data(1, 2, 3) # Values for max_grouped_evals + def test_max_grouped_evals_parallelizable(self, max_grouped_evals): + """Tests max_grouped_evals for an objective function that can be parallelized""" + aqgd = AQGD(momentum=0.0, max_evals_grouped=2) + + vqe = VQE( + self.estimator, + ansatz=RealAmplitudes(), + optimizer=aqgd, + gradient=self.gradient, + ) + + with self.subTest(max_grouped_evals=max_grouped_evals): + aqgd.set_max_evals_grouped(max_grouped_evals) + result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op) + self.assertAlmostEqual(result.eigenvalue.real, -1.857, places=3) + + def test_max_grouped_evals_non_parallelizable(self): + """Tests max_grouped_evals for an objective function that cannot be parallelized""" + # Define the objective function (toy example for functionality) + def quadratic_objective(x: np.ndarray) -> float: + # Check if only a single point as parameters is passed + if np.array(x).ndim != 1: + raise ValueError("The function expects a vector.") + + return x[0] ** 2 + x[1] ** 2 - 2 * x[0] * x[1] + + # Define initial point + x0 = np.array([1, 2.23]) + # Test max_evals_grouped raises no error for max_evals_grouped=1 + aqgd = AQGD(maxiter=100, max_evals_grouped=1) + x_new = aqgd.minimize(quadratic_objective, x0).x + self.assertAlmostEqual(sum(np.round(x_new / max(x_new), 7)), 0) + # Test max_evals_grouped raises an error for max_evals_grouped=2 + aqgd.set_max_evals_grouped(2) + with self.assertRaises(ValueError): + aqgd.minimize(quadratic_objective, x0) + if __name__ == "__main__": unittest.main()