From 2ff792e664d0c15b63017f356dd0f8d69891e57c Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 9 Oct 2023 11:58:57 +0000 Subject: [PATCH 01/47] initial support for backend.run() --- qiskit_ibm_runtime/ibm_backend.py | 298 +++++++++++++++++++++++++++++- 1 file changed, 293 insertions(+), 5 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 7c75c2a79..088ed9b63 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -14,12 +14,15 @@ import logging -from typing import Iterable, Union, Optional, Any, List +from typing import Iterable, Union, Optional, Any, List, Sequence, Dict from datetime import datetime as python_datetime from copy import deepcopy +from dataclasses import asdict +import warnings from qiskit import QuantumCircuit from qiskit.qobj.utils import MeasLevel, MeasReturnType +from qiskit.tools.events.pubsub import Publisher from qiskit.providers.backend import BackendV2 as Backend from qiskit.providers.options import Options from qiskit.providers.models import ( @@ -43,10 +46,16 @@ properties_from_server_data, ) from qiskit_ibm_provider.utils import local_to_utc +from qiskit_ibm_provider.utils import validate_job_tags, are_circuits_dynamic +from qiskit_ibm_provider.utils.options import QASM2Options, QASM3Options +from qiskit_ibm_provider.exceptions import IBMBackendValueError, IBMBackendApiError +from qiskit_ibm_provider.api.exceptions import RequestsApiError + from qiskit_ibm_runtime import ( # pylint: disable=unused-import,cyclic-import qiskit_runtime_service, ) +from .runtime_job import RuntimeJob from .api.clients import RuntimeClient from .api.clients.backend import BaseBackendClient @@ -57,6 +66,8 @@ logger = logging.getLogger(__name__) +QOBJRUNNERPROGRAMID = "circuit-runner" +QASM3RUNNERPROGRAMID = "qasm3-runner" class IBMBackend(Backend): """Backend class interfacing with an IBM Quantum backend. @@ -180,6 +191,8 @@ def __init__( self._defaults = None self._target = None self._max_circuits = configuration.max_experiments + self._session = None # temporarily + self._client_params = None # temporarily if ( not self._configuration.simulator and hasattr(self.options, "noise_model") @@ -492,10 +505,25 @@ def __call__(self) -> "IBMBackend": # For backward compatibility only, can be removed later. return self - def run(self, *args: Any, **kwargs: Any) -> None: - """Not supported method""" - # pylint: disable=arguments-differ - raise RuntimeError("IBMBackend.run() is not supported in the Qiskit Runtime environment.") + def _check_circuits_attributes(self, circuits: List[QuantumCircuit]) -> None: + """Check that circuits can be executed on backend. + Raises: + IBMBackendValueError: + - If one of the circuits contains more qubits than on the backend.""" + + if len(circuits) > self._max_circuits: + raise IBMBackendValueError( + f"Number of circuits, {len(circuits)} exceeds the " + f"maximum for this backend, {self._max_circuits})" + ) + for circ in circuits: + if isinstance(circ, QuantumCircuit): + if circ.num_qubits > self._configuration.num_qubits: + raise IBMBackendValueError( + f"Circuit contains {circ.num_qubits} qubits, " + f"but backend has only {self.num_qubits}." + ) + self.check_faulty(circ) def check_faulty(self, circuit: QuantumCircuit) -> None: """Check if the input circuit uses faulty qubits or edges. @@ -549,6 +577,266 @@ def __deepcopy__(self, _memo: dict = None) -> "IBMBackend": cpy._options = deepcopy(self._options, _memo) return cpy + def run( + self, + circuits: Union[QuantumCircuit, str, List[Union[QuantumCircuit, str]]], + dynamic: bool = None, + job_tags: Optional[List[str]] = None, + init_circuit: Optional[QuantumCircuit] = None, + init_num_resets: Optional[int] = None, + header: Optional[Dict] = None, + shots: Optional[Union[int, float]] = None, + memory: Optional[bool] = None, + meas_level: Optional[Union[int, MeasLevel]] = None, + meas_return: Optional[Union[str, MeasReturnType]] = None, + rep_delay: Optional[float] = None, + init_qubits: Optional[bool] = None, + use_measure_esp: Optional[bool] = None, + noise_model: Optional[Any] = None, + seed_simulator: Optional[int] = None, + **run_config: Dict, + ) -> RuntimeJob: + """Run on the backend. + If a keyword specified here is also present in the ``options`` attribute/object, + the value specified here will be used for this run. + + Args: + circuits: An individual or a + list of :class:`~qiskit.circuits.QuantumCircuit`. + dynamic: Whether the circuit is dynamic (uses in-circuit conditionals) + job_tags: Tags to be assigned to the job. The tags can subsequently be used + as a filter in the :meth:`jobs()` function call. + init_circuit: A quantum circuit to execute for initializing qubits before each circuit. + If specified, ``init_num_resets`` is ignored. Applicable only if ``dynamic=True`` + is specified. + init_num_resets: The number of qubit resets to insert before each circuit execution. + + The following parameters are applicable only if ``dynamic=False`` is specified or + defaulted to. + + header: User input that will be attached to the job and will be + copied to the corresponding result header. Headers do not affect the run. + This replaces the old ``Qobj`` header. + shots: Number of repetitions of each circuit, for sampling. Default: 4000 + or ``max_shots`` from the backend configuration, whichever is smaller. + memory: If ``True``, per-shot measurement bitstrings are returned as well + (provided the backend supports it). For OpenPulse jobs, only + measurement level 2 supports this option. + meas_level: Level of the measurement output for pulse experiments. See + `OpenPulse specification `_ for details: + + * ``0``, measurements of the raw signal (the measurement output pulse envelope) + * ``1``, measurement kernel is selected (a complex number obtained after applying the + measurement kernel to the measurement output signal) + * ``2`` (default), a discriminator is selected and the qubit state is stored (0 or 1) + + meas_return: Level of measurement data for the backend to return. For ``meas_level`` 0 and 1: + + * ``single`` returns information from every shot. + * ``avg`` returns average measurement output (averaged over number of shots). + + rep_delay: Delay between programs in seconds. Only supported on certain + backends (if ``backend.configuration().dynamic_reprate_enabled=True``). + If supported, ``rep_delay`` must be from the range supplied + by the backend (``backend.configuration().rep_delay_range``). Default is given by + ``backend.configuration().default_rep_delay``. + init_qubits: Whether to reset the qubits to the ground state for each shot. + Default: ``True``. + use_measure_esp: Whether to use excited state promoted (ESP) readout for measurements + which are the terminal instruction to a qubit. ESP readout can offer higher fidelity + than standard measurement sequences. See + `here `_. + Default: ``True`` if backend supports ESP readout, else ``False``. Backend support + for ESP readout is determined by the flag ``measure_esp_enabled`` in + ``backend.configuration()``. + noise_model: Noise model. (Simulators only) + seed_simulator: Random seed to control sampling. (Simulators only) + **run_config: Extra arguments used to configure the run. + + Returns: + The job to be executed. + + Raises: + IBMBackendApiError: If an unexpected error occurred while submitting + the job. + IBMBackendApiProtocolError: If an unexpected value received from + the server. + IBMBackendValueError: + - If an input parameter value is not valid. + - If ESP readout is used and the backend does not support this. + """ + # pylint: disable=arguments-differ + validate_job_tags(job_tags, IBMBackendValueError) + if not isinstance(circuits, List): + circuits = [circuits] + self._check_circuits_attributes(circuits) + + if ( + use_measure_esp + and getattr(self.configuration(), "measure_esp_enabled", False) is False + ): + raise IBMBackendValueError( + "ESP readout not supported on this device. Please make sure the flag " + "'use_measure_esp' is unset or set to 'False'." + ) + actually_dynamic = are_circuits_dynamic(circuits) + if dynamic is False and actually_dynamic: + warnings.warn( + "Parameter 'dynamic' is False, but the circuit contains dynamic constructs." + ) + dynamic = dynamic or actually_dynamic + + if dynamic and "qasm3" not in getattr( + self.configuration(), "supported_features", [] + ): + warnings.warn(f"The backend {self.name} does not support dynamic circuits.") + + status = self.status() + if status.operational is True and status.status_msg != "active": + warnings.warn(f"The backend {self.name} is currently paused.") + + program_id = str(run_config.get("program_id", "")) + if not program_id: + if dynamic: + program_id = QASM3RUNNERPROGRAMID + else: + program_id = QOBJRUNNERPROGRAMID + else: + run_config.pop("program_id", None) + + image: Optional[str] = run_config.get("image", None) # type: ignore + if image is not None: + image = str(image) + + if isinstance(init_circuit, bool): + warnings.warn( + "init_circuit does not accept boolean values. " + "A quantum circuit should be passed in instead." + ) + + if isinstance(shots, float): + shots = int(shots) + if not self.configuration().simulator: + circuits = self._deprecate_id_instruction(circuits) + + run_config_dict = self._get_run_config( + program_id=program_id, + init_circuit=init_circuit, + init_num_resets=init_num_resets, + header=header, + shots=shots, + memory=memory, + meas_level=meas_level, + meas_return=meas_return, + rep_delay=rep_delay, + init_qubits=init_qubits, + use_measure_esp=use_measure_esp, + noise_model=noise_model, + seed_simulator=seed_simulator, + **run_config, + ) + print("run_config = ") + print(run_config_dict) + + run_config_dict["circuits"] = circuits + if not program_id.startswith(QASM3RUNNERPROGRAMID): + # Transpiling in circuit-runner is deprecated. + run_config_dict["skip_transpilation"] = True + + return self._runtime_run( + program_id=program_id, + inputs=run_config_dict, + backend_name=self.name, + job_tags=job_tags, + image=image, + ) + + def _runtime_run( + self, + program_id: str, + inputs: Dict, + backend_name: str, + job_tags: Optional[List[str]] = None, + image: Optional[str] = None, + ) -> RuntimeJob: + """Runs the runtime program and returns the corresponding job object""" + hgp_name = self._instance or self._service._get_hgp().name + + session = self._session + + if session: + if not session.active: + raise RuntimeError(f"The session {session.session_id} is closed.") + session_id = session.session_id + max_execution_time = session._max_time + start_session = session_id is None + else: + session_id = None + max_execution_time = None + start_session = False + + log_level = getattr(self.options, "log_level", None) #temporary + try: + response = self._api_client.program_run( + program_id=program_id, + backend_name=backend_name, + params=inputs, + hgp=hgp_name, + log_level = log_level, + job_tags=job_tags, + session_id=session_id, + start_session=start_session, + max_execution_time=max_execution_time, + image=image, + ) + except RequestsApiError as ex: + raise IBMBackendApiError("Error submitting job: {}".format(str(ex))) from ex + session_id = response.get("session_id") + if self._session: + self._session._session_id = session_id + try: + job = RuntimeJob( + backend=self, + api_client=self._api_client, + client_params=self._service._client_params, + job_id=response["id"], + program_id=program_id, + session_id=session_id, + service=self.service + ) + logger.debug("Job %s was successfully submitted.", job.job_id()) + except TypeError as err: + logger.debug("Invalid job data received: %s", response) + raise IBMBackendApiProtocolError( + "Unexpected return value received from the server " + "when submitting job: {}".format(str(err)) + ) from err + Publisher().publish("ibm.job.start", job) + return job + + def _get_run_config(self, program_id: str, **kwargs: Any) -> Dict: + """Return the consolidated runtime configuration.""" + # Check if is a QASM3 like program id. + if program_id.startswith(QASM3RUNNERPROGRAMID): + fields = asdict(QASM3Options()).keys() + run_config_dict = QASM3Options().to_transport_dict() + else: + fields = asdict(QASM2Options()).keys() + run_config_dict = QASM2Options().to_transport_dict() + + backend_options = self._options.__dict__ + for key, val in kwargs.items(): + if val is not None: + run_config_dict[key] = val + if key not in fields and not self.configuration().simulator: + warnings.warn( # type: ignore[unreachable] + f"{key} is not a recognized runtime option and may be ignored by the backend.", + stacklevel=4, + ) + elif backend_options.get(key) is not None and key in fields: + run_config_dict[key] = backend_options[key] + return run_config_dict + class IBMRetiredBackend(IBMBackend): """Backend class interfacing with an IBM Quantum device no longer available.""" From c877f1ee54091919b45dc0f692efeff7a47b0aa4 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 9 Oct 2023 14:35:35 +0000 Subject: [PATCH 02/47] Added temporary session support --- qiskit_ibm_runtime/ibm_backend.py | 48 ++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 088ed9b63..68599b8fd 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -14,7 +14,7 @@ import logging -from typing import Iterable, Union, Optional, Any, List, Sequence, Dict +from typing import Iterable, Union, Optional, Any, List, Dict from datetime import datetime as python_datetime from copy import deepcopy from dataclasses import asdict @@ -51,6 +51,10 @@ from qiskit_ibm_provider.exceptions import IBMBackendValueError, IBMBackendApiError from qiskit_ibm_provider.api.exceptions import RequestsApiError +# temporary until we unite the 2 Session classes +from qiskit_ibm_provider.session import ( + Session as ProviderSession, +) # temporary until we unite the 2 Session classes from qiskit_ibm_runtime import ( # pylint: disable=unused-import,cyclic-import qiskit_runtime_service, @@ -69,6 +73,7 @@ QOBJRUNNERPROGRAMID = "circuit-runner" QASM3RUNNERPROGRAMID = "qasm3-runner" + class IBMBackend(Backend): """Backend class interfacing with an IBM Quantum backend. @@ -191,8 +196,7 @@ def __init__( self._defaults = None self._target = None self._max_circuits = configuration.max_experiments - self._session = None # temporarily - self._client_params = None # temporarily + self._session: ProviderSession = None if ( not self._configuration.simulator and hasattr(self.options, "noise_model") @@ -671,10 +675,7 @@ def run( circuits = [circuits] self._check_circuits_attributes(circuits) - if ( - use_measure_esp - and getattr(self.configuration(), "measure_esp_enabled", False) is False - ): + if use_measure_esp and getattr(self.configuration(), "measure_esp_enabled", False) is False: raise IBMBackendValueError( "ESP readout not supported on this device. Please make sure the flag " "'use_measure_esp' is unset or set to 'False'." @@ -686,9 +687,7 @@ def run( ) dynamic = dynamic or actually_dynamic - if dynamic and "qasm3" not in getattr( - self.configuration(), "supported_features", [] - ): + if dynamic and "qasm3" not in getattr(self.configuration(), "supported_features", []): warnings.warn(f"The backend {self.name} does not support dynamic circuits.") status = self.status() @@ -735,8 +734,6 @@ def run( seed_simulator=seed_simulator, **run_config, ) - print("run_config = ") - print(run_config_dict) run_config_dict["circuits"] = circuits if not program_id.startswith(QASM3RUNNERPROGRAMID): @@ -767,7 +764,7 @@ def _runtime_run( if session: if not session.active: raise RuntimeError(f"The session {session.session_id} is closed.") - session_id = session.session_id + session_id = session.session_id or None max_execution_time = session._max_time start_session = session_id is None else: @@ -775,14 +772,14 @@ def _runtime_run( max_execution_time = None start_session = False - log_level = getattr(self.options, "log_level", None) #temporary + log_level = getattr(self.options, "log_level", None) # temporary try: response = self._api_client.program_run( program_id=program_id, backend_name=backend_name, params=inputs, hgp=hgp_name, - log_level = log_level, + log_level=log_level, job_tags=job_tags, session_id=session_id, start_session=start_session, @@ -802,7 +799,7 @@ def _runtime_run( job_id=response["id"], program_id=program_id, session_id=session_id, - service=self.service + service=self.service, ) logger.debug("Job %s was successfully submitted.", job.job_id()) except TypeError as err: @@ -823,7 +820,6 @@ def _get_run_config(self, program_id: str, **kwargs: Any) -> Dict: else: fields = asdict(QASM2Options()).keys() run_config_dict = QASM2Options().to_transport_dict() - backend_options = self._options.__dict__ for key, val in kwargs.items(): if val is not None: @@ -837,6 +833,24 @@ def _get_run_config(self, program_id: str, **kwargs: Any) -> Dict: run_config_dict[key] = backend_options[key] return run_config_dict + def open_session(self, max_time: Optional[Union[int, str]] = None) -> ProviderSession: + """Open session""" + self._session = ProviderSession(max_time) + return self._session + + @property + def session(self) -> ProviderSession: + """Return session""" + return self._session + + def cancel_session(self) -> None: + """Cancel session. All pending jobs will be cancelled.""" + if self._session: + self._session.cancel() + if self._session.session_id: + self.provider._runtime_client.close_session(self._session.session_id) + self._session = None + class IBMRetiredBackend(IBMBackend): """Backend class interfacing with an IBM Quantum device no longer available.""" From af9f907541b68b09b4dc4effb2fa57b19ccf5f3b Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 10 Oct 2023 09:32:17 +0000 Subject: [PATCH 03/47] Copied test_backend.py from the provider --- qiskit_ibm_runtime/ibm_backend.py | 72 ++++++- test/unit/test_backend.py | 319 ++++++++++++++++++++++++++++++ 2 files changed, 390 insertions(+), 1 deletion(-) create mode 100644 test/unit/test_backend.py diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 68599b8fd..9ee212534 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -12,8 +12,8 @@ """Module for interfacing with an IBM Quantum Backend.""" +import copy import logging - from typing import Iterable, Union, Optional, Any, List, Dict from datetime import datetime as python_datetime from copy import deepcopy @@ -23,6 +23,10 @@ from qiskit import QuantumCircuit from qiskit.qobj.utils import MeasLevel, MeasReturnType from qiskit.tools.events.pubsub import Publisher +from qiskit.transpiler.passmanager import PassManager +from .transpiler.passes.basis.convert_id_to_delay import ( + ConvertIdToDelay, +) from qiskit.providers.backend import BackendV2 as Backend from qiskit.providers.options import Options from qiskit.providers.models import ( @@ -851,6 +855,72 @@ def cancel_session(self) -> None: self.provider._runtime_client.close_session(self._session.session_id) self._session = None + def _deprecate_id_instruction( + self, circuits: List[QuantumCircuit] + ) -> List[QuantumCircuit]: + """Raise a DeprecationWarning if any circuit contains an 'id' instruction. + + Additionally, if 'delay' is a 'supported_instruction', replace each 'id' + instruction (in-place) with the equivalent ('sx'-length) 'delay' instruction. + + Args: + circuits: The individual or list of :class:`~qiskit.circuits.QuantumCircuit` + passed to :meth:`IBMBackend.run()`. Modified in-place. + + Returns: + A modified copy of the original circuit where 'id' instructions are replaced with + 'delay' instructions. A copy is used so the original circuit is not modified. + If there are no 'id' instructions or 'delay' is not supported, return the original circuit. + """ + + id_support = "id" in getattr(self.configuration(), "basis_gates", []) + delay_support = "delay" in getattr( + self.configuration(), "supported_instructions", [] + ) + + if not delay_support: + return circuits + + circuit_has_id = any( + instr.name == "id" + for circuit in circuits + if isinstance(circuit, QuantumCircuit) + for instr, qargs, cargs in circuit.data + ) + if not circuit_has_id: + return circuits + if not self.id_warning_issued: + if id_support and delay_support: + warnings.warn( + "Support for the 'id' instruction has been deprecated " + "from IBM hardware backends. Any 'id' instructions " + "will be replaced with their equivalent 'delay' instruction. " + "Please use the 'delay' instruction instead.", + DeprecationWarning, + stacklevel=4, + ) + else: + warnings.warn( + "Support for the 'id' instruction has been removed " + "from IBM hardware backends. Any 'id' instructions " + "will be replaced with their equivalent 'delay' instruction. " + "Please use the 'delay' instruction instead.", + DeprecationWarning, + stacklevel=4, + ) + + self.id_warning_issued = True + + # Make sure we don't mutate user's input circuits + circuits = copy.deepcopy(circuits) + # Convert id gates to delays. + pm = PassManager( # pylint: disable=invalid-name + ConvertIdToDelay(self.target.durations()) + ) + circuits = pm.run(circuits) + + return circuits + class IBMRetiredBackend(IBMBackend): """Backend class interfacing with an IBM Quantum device no longer available.""" diff --git a/test/unit/test_backend.py b/test/unit/test_backend.py new file mode 100644 index 000000000..472ff2ded --- /dev/null +++ b/test/unit/test_backend.py @@ -0,0 +1,319 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Tests for the backend functions.""" +import copy +from datetime import datetime +from unittest import mock +import warnings + +from qiskit import transpile, qasm3, QuantumCircuit +from qiskit.providers.fake_provider import FakeManila +from qiskit.providers.models import BackendStatus, BackendProperties + +from qiskit_ibm_provider.exceptions import IBMBackendValueError + +from qiskit_ibm_runtime.ibm_backend import IBMBackend + +from ..ibm_test_case import IBMTestCase +from ..utils import ( + create_faulty_backend, +) + +class TestBackend(IBMTestCase): + """Tests for IBMBackend class.""" + + def test_raise_faulty_qubits(self): + """Test faulty qubits is raised.""" + fake_backend = FakeManila() + num_qubits = fake_backend.configuration().num_qubits + circ = QuantumCircuit(num_qubits, num_qubits) + for i in range(num_qubits): + circ.x(i) + + transpiled = transpile(circ, backend=fake_backend) + faulty_qubit = 4 + ibm_backend = create_faulty_backend( + fake_backend, faulty_qubit=faulty_qubit + ) + + with self.assertRaises(ValueError) as err: + ibm_backend.run(transpiled) + + self.assertIn(f"faulty qubit {faulty_qubit}", str(err.exception)) + + def test_raise_faulty_qubits_many(self): + """Test faulty qubits is raised if one circuit uses it.""" + fake_backend = FakeManila() + num_qubits = fake_backend.configuration().num_qubits + + circ1 = QuantumCircuit(1, 1) + circ1.x(0) + circ2 = QuantumCircuit(num_qubits, num_qubits) + for i in range(num_qubits): + circ2.x(i) + + transpiled = transpile([circ1, circ2], backend=fake_backend) + faulty_qubit = 4 + ibm_backend = create_faulty_backend( + fake_backend, faulty_qubit=faulty_qubit + ) + + with self.assertRaises(ValueError) as err: + ibm_backend.run(transpiled) + + self.assertIn(f"faulty qubit {faulty_qubit}", str(err.exception)) + + def test_raise_faulty_edge(self): + """Test faulty edge is raised.""" + fake_backend = FakeManila() + num_qubits = fake_backend.configuration().num_qubits + circ = QuantumCircuit(num_qubits, num_qubits) + for i in range(num_qubits - 2): + circ.cx(i, i + 1) + + transpiled = transpile(circ, backend=fake_backend) + edge_qubits = [0, 1] + ibm_backend = create_faulty_backend( + fake_backend, faulty_edge=("cx", edge_qubits) + ) + + with self.assertRaises(ValueError) as err: + ibm_backend.run(transpiled) + + self.assertIn("cx", str(err.exception)) + self.assertIn(f"faulty edge {tuple(edge_qubits)}", str(err.exception)) + + def test_faulty_qubit_not_used(self): + """Test faulty qubit is not raise if not used.""" + fake_backend = FakeManila() + circ = QuantumCircuit(2, 2) + for i in range(2): + circ.x(i) + + transpiled = transpile(circ, backend=fake_backend, initial_layout=[0, 1]) + faulty_qubit = 4 + ibm_backend = create_faulty_backend( + fake_backend, faulty_qubit=faulty_qubit + ) + print(ibm_backend) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + ibm_backend.run(circuits=transpiled) + + mock_run.assert_called_once() + + def test_faulty_edge_not_used(self): + """Test faulty edge is not raised if not used.""" + + fake_backend = FakeManila() + coupling_map = fake_backend.configuration().coupling_map + + circ = QuantumCircuit(2, 2) + circ.cx(0, 1) + + transpiled = transpile( + circ, backend=fake_backend, initial_layout=coupling_map[0] + ) + edge_qubits = coupling_map[-1] + ibm_backend = create_faulty_backend( + fake_backend, faulty_edge=("cx", edge_qubits) + ) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + ibm_backend.run(circuits=transpiled) + + mock_run.assert_called_once() + + def test_dynamic_circuits_warning(self): + """Test warning when user defines dynamic==False and circuits are dynamic""" + # pylint: disable=not-context-manager + + # backend is not faulty because no faulty parameters given + backend = create_faulty_backend(model_backend=FakeManila()) + + circuits = [] + circ = QuantumCircuit(2, 2) + circ.h(0) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + circuits.append(circ) + + circ = QuantumCircuit(3, 2) + with circ.for_loop(range(4)): + circ.h(0) + circuits.append(circ) + + circ = QuantumCircuit(2, 2) + circ.h(0) + circ.measure([0], [0]) + with circ.switch(target=0) as case: + with case(0): + circ.x(0) + with case(case.DEFAULT): + circ.cx(0, 1) + circuits.append(circ) + + for circuit in circuits: + # using warnings to catch multiple warnings + with warnings.catch_warnings(record=True) as warn: + with mock.patch.object(IBMBackend, "_runtime_run"): + backend.run(circuits=circuit, dynamic=False) + self.assertIn( + "Parameter 'dynamic' is False, but the circuit " + "contains dynamic constructs.", + str(warn[0].message), + ) + self.assertIn( + f"The backend {backend.name} does not support dynamic circuits.", + str(warn[1].message), + ) + + def _create_dc_test_backend(self): + """Create a test backend with an IfElseOp enables.""" + model_backend = FakeManila() + properties = model_backend.properties() + + out_backend = IBMBackend( + configuration=model_backend.configuration(), + service=mock.MagicMock(), + api_client=None, + instance=None, + ) + + out_backend.status = lambda: BackendStatus( + backend_name="foo", + backend_version="1.0", + operational=True, + pending_jobs=0, + status_msg="", + ) + out_backend.properties = lambda: properties + + return out_backend + + def test_single_dynamic_circuit_submission(self): + """Test submitting single circuit with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circ, dynamic=True) + + mock_run.assert_called_once() + + def test_multi_dynamic_circuit_submission(self): + """Test submitting multiple circuits with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + circuits = [circ, circ] + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circuits, dynamic=True) + + mock_run.assert_called_once() + + def test_single_openqasm3_submission(self): + """Test submitting a single openqasm3 strings with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + qasm3_circ = qasm3.dumps(circ, disable_constants=True) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=qasm3_circ, dynamic=True) + + mock_run.assert_called_once() + + def test_runtime_image_selection_submission(self): + """Test image selection from runtime""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circ, dynamic=True) + + mock_run.assert_called_once() + + def test_multi_openqasm3_submission(self): + """Test submitting multiple openqasm3 strings with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + image = "test-image" + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circ, dynamic=True, image=image) + + mock_run.assert_called_once() + self.assertEqual(mock_run.call_args.kwargs["image"], image) + + def test_deepcopy(self): + """Test that deepcopy of a backend works properly""" + backend = self._create_dc_test_backend() + backend_copy = copy.deepcopy(backend) + self.assertEqual(backend_copy.name, backend.name) + + def test_too_many_circuits(self): + """Test exception when number of circuits exceeds backend._max_circuits""" + model_backend = FakeManila() + backend = IBMBackend( + configuration=model_backend.configuration(), + service=mock.MagicMock(), + api_client=None, + instance=None, + ) + max_circs = backend.configuration().max_experiments + + circs = [] + for _ in range(max_circs + 1): + circ = QuantumCircuit(1) + circ.x(0) + circs.append(circ) + with self.assertRaises(IBMBackendValueError) as err: + backend.run(circs) + self.assertIn( + f"Number of circuits, {max_circs+1} exceeds the maximum for this backend, {max_circs}", + str(err.exception), + ) From 7867c17da65dc8b524ce874e6ce79530dfd0d10f Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 10 Oct 2023 13:10:57 +0000 Subject: [PATCH 04/47] Added all status types from the provider --- qiskit_ibm_runtime/constants.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/qiskit_ibm_runtime/constants.py b/qiskit_ibm_runtime/constants.py index 3a5568cab..ed69c468c 100644 --- a/qiskit_ibm_runtime/constants.py +++ b/qiskit_ibm_runtime/constants.py @@ -23,10 +23,21 @@ QISKIT_IBM_RUNTIME_API_URL = "https://auth.quantum-computing.ibm.com/api" API_TO_JOB_STATUS = { + "CREATING": JobStatus.INITIALIZING, + "CREATED": JobStatus.INITIALIZING, + "TRANSPILING": JobStatus.INITIALIZING, + "TRANSPILED": JobStatus.INITIALIZING, + "VALIDATING": JobStatus.VALIDATING, + "VALIDATED": JobStatus.VALIDATING, "QUEUED": JobStatus.QUEUED, + "PENDING_IN_QUEUE": JobStatus.QUEUED, "RUNNING": JobStatus.RUNNING, "COMPLETED": JobStatus.DONE, "FAILED": JobStatus.ERROR, + "ERROR_CREATING_JOB": JobStatus.ERROR, + "ERROR_VALIDATING_JOB": JobStatus.ERROR, + "ERROR_RUNNING_JOB": JobStatus.ERROR, + "ERROR_TRANSPILING_JOB": JobStatus.ERROR, "CANCELLED": JobStatus.CANCELLED, } From 0c4e0cb9c21e9b336c5808619cf92614567ffa9e Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 10 Oct 2023 13:29:56 +0000 Subject: [PATCH 05/47] Added test_ibm_job_states.py from provider. Added 'transpiler' directory to support convert_id_to_delay --- qiskit_ibm_runtime/ibm_backend.py | 12 +- qiskit_ibm_runtime/transpiler/__init__.py | 31 + .../transpiler/passes/__init__.py | 36 + .../transpiler/passes/basis/__init__.py | 23 + .../passes/basis/convert_id_to_delay.py | 87 +++ .../transpiler/passes/scheduling/__init__.py | 397 +++++++++++ .../passes/scheduling/block_base_padder.py | 620 +++++++++++++++++ .../passes/scheduling/dynamical_decoupling.py | 553 +++++++++++++++ .../transpiler/passes/scheduling/pad_delay.py | 78 +++ .../transpiler/passes/scheduling/scheduler.py | 643 ++++++++++++++++++ .../transpiler/passes/scheduling/utils.py | 287 ++++++++ qiskit_ibm_runtime/transpiler/plugin.py | 98 +++ test/jobtestcase.py | 33 + test/unit/test_backend.py | 29 +- test/unit/test_ibm_job_states.py | 597 ++++++++++++++++ 15 files changed, 3494 insertions(+), 30 deletions(-) create mode 100644 qiskit_ibm_runtime/transpiler/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py create mode 100644 qiskit_ibm_runtime/transpiler/plugin.py create mode 100644 test/jobtestcase.py create mode 100644 test/unit/test_ibm_job_states.py diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 9ee212534..d83c47344 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -855,9 +855,7 @@ def cancel_session(self) -> None: self.provider._runtime_client.close_session(self._session.session_id) self._session = None - def _deprecate_id_instruction( - self, circuits: List[QuantumCircuit] - ) -> List[QuantumCircuit]: + def _deprecate_id_instruction(self, circuits: List[QuantumCircuit]) -> List[QuantumCircuit]: """Raise a DeprecationWarning if any circuit contains an 'id' instruction. Additionally, if 'delay' is a 'supported_instruction', replace each 'id' @@ -874,9 +872,7 @@ def _deprecate_id_instruction( """ id_support = "id" in getattr(self.configuration(), "basis_gates", []) - delay_support = "delay" in getattr( - self.configuration(), "supported_instructions", [] - ) + delay_support = "delay" in getattr(self.configuration(), "supported_instructions", []) if not delay_support: return circuits @@ -914,9 +910,7 @@ def _deprecate_id_instruction( # Make sure we don't mutate user's input circuits circuits = copy.deepcopy(circuits) # Convert id gates to delays. - pm = PassManager( # pylint: disable=invalid-name - ConvertIdToDelay(self.target.durations()) - ) + pm = PassManager(ConvertIdToDelay(self.target.durations())) # pylint: disable=invalid-name circuits = pm.run(circuits) return circuits diff --git a/qiskit_ibm_runtime/transpiler/__init__.py b/qiskit_ibm_runtime/transpiler/__init__.py new file mode 100644 index 000000000..d6e62daa4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/__init__.py @@ -0,0 +1,31 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +==================================================================== +IBM Backend Transpiler Tools (:mod:`qiskit_ibm_provider.transpiler`) +==================================================================== + +A collection of transpiler tools for working with IBM Quantum's +next-generation backends that support advanced "dynamic circuit" +capabilities. Ie., circuits with support for classical +compute and control-flow/feedback based off of measurement results. + +Transpiler Passes +================== + +.. autosummary:: + :toctree: ../stubs/ + + passes + +""" diff --git a/qiskit_ibm_runtime/transpiler/passes/__init__.py b/qiskit_ibm_runtime/transpiler/passes/__init__.py new file mode 100644 index 000000000..2fe16514c --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/__init__.py @@ -0,0 +1,36 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +================================================================ +Transpiler Passes (:mod:`qiskit_ibm_provider.transpiler.passes`) +================================================================ + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes + +A collection of transpiler passes for IBM backends. + +.. autosummary:: + :toctree: ../stubs/ + + basis + scheduling + + +""" + +from .basis import ConvertIdToDelay + +# circuit scheduling +from .scheduling import ASAPScheduleAnalysis +from .scheduling import PadDynamicalDecoupling +from .scheduling import PadDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py new file mode 100644 index 000000000..0a71af010 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py @@ -0,0 +1,23 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +========================================================== +Basis (:mod:`qiskit_ibm_provider.transpiler.passes.basis`) +========================================================== + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes.basis + +Passes to layout circuits to IBM backend's instruction sets. +""" + +from .convert_id_to_delay import ConvertIdToDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py new file mode 100644 index 000000000..3906d9046 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py @@ -0,0 +1,87 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Pass to convert Id gate operations to a delay instruction.""" + +from typing import Dict + +from qiskit.converters import dag_to_circuit, circuit_to_dag + +from qiskit.circuit import ControlFlowOp +from qiskit.circuit import Delay +from qiskit.circuit.library import IGate +from qiskit.dagcircuit import DAGCircuit +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.instruction_durations import InstructionDurations + + +class ConvertIdToDelay(TransformationPass): + """Convert :class:`qiskit.circuit.library.standard_gates.IGate` to + a delay of the corresponding length. + """ + + def __init__(self, durations: InstructionDurations, gate: str = "sx"): + """Convert :class:`qiskit.circuit.library.IGate` to a + Convert :class:`qiskit.circuit.Delay`. + + Args: + duration: Duration of the delay to replace the identity gate with. + gate: Single qubit gate to extract duration from. + """ + self.durations = durations + self.gate = gate + self._cached_durations: Dict[int, int] = {} + + super().__init__() + + def run(self, dag: DAGCircuit) -> DAGCircuit: + self._run_inner(dag) + return dag + + def _run_inner(self, dag: DAGCircuit) -> bool: + """Run the pass on one :class:`.DAGCircuit`, mutating it. Returns ``True`` if the circuit + was modified and ``False`` if not.""" + modified = False + qubit_index_map = {bit: index for index, bit in enumerate(dag.qubits)} + for node in dag.op_nodes(): + if isinstance(node.op, ControlFlowOp): + modified_blocks = False + new_dags = [] + for block in node.op.blocks: + new_dag = circuit_to_dag(block) + modified_blocks |= self._run_inner(new_dag) + new_dags.append(new_dag) + if not modified_blocks: + continue + dag.substitute_node( + node, + node.op.replace_blocks(dag_to_circuit(block) for block in new_dags), + inplace=True, + ) + elif isinstance(node.op, IGate): + delay_op = Delay(self._get_duration(qubit_index_map[node.qargs[0]])) + dag.substitute_node(node, delay_op, inplace=True) + + modified = True + + return modified + + def _get_duration(self, qubit: int) -> int: + """Get the duration of a gate in dt.""" + duration = self._cached_durations.get(qubit, None) + if duration: + return duration + + duration = self.durations.get(self.gate, qubit) + self._cached_durations[qubit] = duration + + return duration diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py new file mode 100644 index 000000000..c3017e9bc --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py @@ -0,0 +1,397 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +==================================================================== +Scheduling (:mod:`qiskit_ibm_provider.transpiler.passes.scheduling`) +==================================================================== + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes.scheduling + +A collection of scheduling passes for working with IBM Quantum's next-generation +backends that support advanced "dynamic circuit" capabilities. Ie., +circuits with support for classical control-flow/feedback based off +of measurement results. + +.. warning:: + You should not mix these scheduling passes with Qiskit's builtin scheduling + passes as they will negatively interact with the scheduling routines for + dynamic circuits. This includes setting ``scheduling_method`` in + :func:`~qiskit.compiler.transpile` or + :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. + +Below we demonstrate how to schedule and pad a teleportation circuit with delays +for a dynamic circuit backend's execution model: + +.. jupyter-execute:: + + from qiskit.circuit import ClassicalRegister, QuantumCircuit, QuantumRegister + from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager + from qiskit.transpiler.passmanager import PassManager + + from qiskit_ibm_provider.transpiler.passes.scheduling import DynamicCircuitInstructionDurations + from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDelay + from qiskit.providers.fake_provider import FakeJakarta + + + backend = FakeJakarta() + + # Temporary workaround for mock backends. For real backends this is not required. + backend.configuration().basis_gates.append("if_else") + + + # Use this duration class to get appropriate durations for dynamic + # circuit backend scheduling + durations = DynamicCircuitInstructionDurations.from_backend(backend) + # Generate the main Qiskit transpile passes. + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + # Configure the as-late-as-possible scheduling pass + pm.scheduling = PassManager([ALAPScheduleAnalysis(durations), PadDelay()]) + + qr = QuantumRegister(3) + crz = ClassicalRegister(1, name="crz") + crx = ClassicalRegister(1, name="crx") + result = ClassicalRegister(1, name="result") + + teleport = QuantumCircuit(qr, crz, crx, result, name="Teleport") + + teleport.h(qr[1]) + teleport.cx(qr[1], qr[2]) + teleport.cx(qr[0], qr[1]) + teleport.h(qr[0]) + teleport.measure(qr[0], crz) + teleport.measure(qr[1], crx) + with teleport.if_test((crz, 1)): + teleport.z(qr[2]) + with teleport.if_test((crx, 1)): + teleport.x(qr[2]) + teleport.measure(qr[2], result) + + # Transpile. + scheduled_teleport = pm.run(teleport) + + scheduled_teleport.draw(output="mpl") + + +Instead of padding with delays we may also insert a dynamical decoupling sequence +using the :class:`PadDynamicalDecoupling` pass as shown below: + +.. jupyter-execute:: + + from qiskit.circuit.library import XGate + + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling + + + dd_sequence = [XGate(), XGate()] + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + dd_teleport = pm.run(teleport) + + dd_teleport.draw(output="mpl") + +When compiling a circuit with Qiskit, it is more efficient and more robust to perform all the +transformations in a single transpilation. This has been done above by extending Qiskit's preset +pass managers. For example, Qiskit's :func:`~qiskit.compiler.transpile` function internally builds +its pass set by using :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. +This returns instances of :class:`~qiskit.transpiler.StagedPassManager`, which can be extended. + + +Scheduling old format ``c_if`` conditioned gates +------------------------------------------------ + +Scheduling with old format ``c_if`` conditioned gates is not supported. + +.. jupyter-execute:: + + qc_c_if = QuantumCircuit(1, 1) + qc_c_if.x(0).c_if(0, 1) + qc_c_if.draw(output="mpl") + +The :class:`.IBMBackend` configures a translation plugin +:class:`.IBMTranslationPlugin` to automatically +apply transformations and optimizations for IBM hardware backends when invoking +:func:`~qiskit.compiler.transpile`. This will automatically convert all old style ``c_if`` +conditioned gates to new-style control-flow. +We may then schedule the transpiled circuit without further modification. + +.. jupyter-execute:: + + # Temporary workaround for mock backends. For real backends this is not required. + backend.get_translation_stage_plugin = lambda: "ibm_dynamic_circuits" + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + qc_if_dd = pm.run(qc_c_if, backend) + qc_if_dd.draw(output="mpl") + + +If you are not using the transpiler plugin stages to +work around this please manually run the pass +:class:`qiskit.transpiler.passes.ConvertConditionsToIfOps` +prior to your scheduling pass. + +.. jupyter-execute:: + + from qiskit.transpiler.passes import ConvertConditionsToIfOps + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ConvertConditionsToIfOps(), + ALAPScheduleAnalysis(durations), + PadDelay(), + ] + ) + + qc_if_dd = pm.run(qc_c_if) + qc_if_dd.draw(output="mpl") + + +Exploiting IBM backend's local parallel "fast-path" +--------------------------------------------------- + +IBM quantum hardware supports a localized "fast-path" which enables a block of gates +applied to a *single qubit* that are conditional on an immediately predecessor measurement +*of the same qubit* to be completed with lower latency. The hardware is also +able to do this in *parallel* on disjoint qubits that satisfy this condition. + +For example, the conditional gates below are performed in parallel with lower latency +as the measurements flow directly into the conditional blocks which in turn only apply +gates to the same measurement qubit. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + qc.x(0) + with qc.if_test((1, 1)): + qc.x(1) + + qc.draw(output="mpl") + + +The circuit below will not use the fast-path as the conditional gate is +on a different qubit than the measurement qubit. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(1) + + qc.draw(output="mpl") + +Similarly, the circuit below contains gates on multiple qubits +and will not be performed using the fast-path. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(0) + qc.x(1) + + qc.draw(output="mpl") + +A fast-path block may contain multiple gates as long as they are on the fast-path qubit. +If there are multiple fast-path blocks being performed in parallel each block will be +padded out to the duration of the longest block. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + qc.x(0) + # Will be padded out to a duration of 1600 on the backend. + with qc.if_test((1, 1)): + qc.delay(1600, 1) + + qc.draw(output="mpl") + +This behavior is also applied to the else condition of a fast-path eligible branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 1) + qc.measure(0, 0) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)) as else_: + qc.x(0) + # Will be padded out to a duration of 1600 on the backend. + with else_: + qc.delay(1600, 0) + + qc.draw(output="mpl") + + +If a single measurement result is used with several conditional blocks, if there is a fast-path +eligible block it will be applied followed by the non-fast-path blocks which will execute with +the standard higher latency conditional branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + # Uses fast-path + qc.x(0) + with qc.if_test((0, 1)): + # Does not use fast-path + qc.x(1) + + qc.draw(output="mpl") + +If you wish to prevent the usage of the fast-path you may insert a barrier between the measurement and +the conditional branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 2) + qc.measure(0, 0) + # Barrier prevents the fast-path. + qc.barrier() + with qc.if_test((0, 1)): + qc.x(0) + + qc.draw(output="mpl") + +Conditional measurements are not eligible for the fast-path. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + # Does not use the fast-path + qc.measure(0, 1) + + qc.draw(output="mpl") + +Similarly nested control-flow is not eligible. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 1) + qc.measure(0, 0) + with qc.if_test((0, 1)): + # Does not use the fast-path + qc.x(0) + with qc.if_test((0, 1)): + qc.x(0) + + qc.draw(output="mpl") + + +The scheduler is aware of the fast-path behavior and will not insert delays on idle qubits +in blocks that satisfy the fast-path conditions so as to avoid preventing the backend +compiler from performing the necessary optimizations to utilize the fast-path. If +there are fast-path blocks that will be performed in parallel they currently *will not* +be padded out by the scheduler to ensure they are of the same duration in Qiskit + +.. jupyter-execute:: + + dd_sequence = [XGate(), XGate()] + + pm = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + with qc.if_test((0, 1)): + qc.x(0) + # Is currently not padded to ensure + # a duration of 1000. If you desire + # this you would need to manually add + # qc.delay(840, 0) + with qc.if_test((1, 1)): + qc.delay(1000, 0) + + + qc.draw(output="mpl") + + qc_dd = pm.run(qc) + + qc_dd.draw(output="mpl") + +.. note:: + If there are qubits that are *not* involved in a fast-path decision it is not + currently possible to use them in a fast-path branch in parallel with the fast-path + qubits resulting from a measurement. This will be revised in the future as we + further improve these capabilities. + + For example: + + .. jupyter-execute:: + + qc = QuantumCircuit(3, 2) + qc.x(1) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(0) + # Qubit 1 sits idle throughout the fast-path decision + with qc.if_test((1, 0)): + # Qubit 2 is idle but there is no measurement + # to make it fast-path eligible. This will + # however avoid a communication event in the hardware + # since the condition is compile time evaluated. + qc.x(2) + + qc.draw(output="mpl") + + +Scheduling & Dynamical Decoupling +================================= +.. autosummary:: + :toctree: ../stubs/ + + BlockBasePadder + ALAPScheduleAnalysis + ASAPScheduleAnalysis + DynamicCircuitInstructionDurations + PadDelay + PadDynamicalDecoupling +""" + +from .block_base_padder import BlockBasePadder +from .dynamical_decoupling import PadDynamicalDecoupling +from .pad_delay import PadDelay +from .scheduler import ALAPScheduleAnalysis, ASAPScheduleAnalysis +from .utils import DynamicCircuitInstructionDurations diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py new file mode 100644 index 000000000..1232750a5 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py @@ -0,0 +1,620 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Padding pass to fill timeslots for IBM (dynamic circuit) backends.""" + +from typing import Dict, Iterable, List, Optional, Union, Set + +from qiskit.circuit import ( + Qubit, + Clbit, + ControlFlowOp, + Gate, + IfElseOp, + Instruction, + Measure, +) +from qiskit.circuit.bit import Bit +from qiskit.circuit.library import Barrier +from qiskit.circuit.delay import Delay +from qiskit.circuit.parameterexpression import ParameterExpression +from qiskit.converters import dag_to_circuit +from qiskit.dagcircuit import DAGCircuit, DAGNode +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.exceptions import TranspilerError + +from .utils import block_order_op_nodes + + +class BlockBasePadder(TransformationPass): + """The base class of padding pass. + + This pass requires one of scheduling passes to be executed before itself. + Since there are multiple scheduling strategies, the selection of scheduling + pass is left in the hands of the pass manager designer. + Once a scheduling analysis pass is run, ``node_start_time`` is generated + in the :attr:`property_set`. This information is represented by a python dictionary of + the expected instruction execution times keyed on the node instances. + The padding pass expects all ``DAGOpNode`` in the circuit to be scheduled. + + This base class doesn't define any sequence to interleave, but it manages + the location where the sequence is inserted, and provides a set of information necessary + to construct the proper sequence. Thus, a subclass of this pass just needs to implement + :meth:`_pad` method, in which the subclass constructs a circuit block to insert. + This mechanism removes lots of boilerplate logic to manage whole DAG circuits. + + Note that padding pass subclasses should define interleaving sequences satisfying: + + - Interleaved sequence does not change start time of other nodes + - Interleaved sequence should have total duration of the provided ``time_interval``. + + Any manipulation violating these constraints may prevent this base pass from correctly + tracking the start time of each instruction, + which may result in violation of hardware alignment constraints. + """ + + def __init__(self, schedule_idle_qubits: bool = False) -> None: + self._node_start_time = None + self._node_block_dags = None + self._idle_after: Optional[Dict[Qubit, int]] = None + self._root_dag = None + self._dag = None + self._block_dag = None + self._prev_node: Optional[DAGNode] = None + self._wire_map: Optional[Dict[Bit, Bit]] = None + self._block_duration = 0 + self._current_block_idx = 0 + self._conditional_block = False + self._bit_indices: Optional[Dict[Qubit, int]] = None + # Nodes that the scheduling of this node is tied to. + + self._last_node_to_touch: Optional[Dict[Qubit, DAGNode]] = None + # Last node to touch a bit + + self._fast_path_nodes: Set[DAGNode] = set() + + self._dirty_qubits: Set[Qubit] = set() + # Qubits that are dirty in the circuit. + self._schedule_idle_qubits = schedule_idle_qubits + self._idle_qubits: Set[Qubit] = set() + super().__init__() + + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the padding pass on ``dag``. + + Args: + dag: DAG to be checked. + + Returns: + DAGCircuit: DAG with idle time filled with instructions. + + Raises: + TranspilerError: When a particular node is not scheduled, likely some transform pass + is inserted before this node is called. + """ + if not self._schedule_idle_qubits: + self._idle_qubits = set(wire for wire in dag.idle_wires() if isinstance(wire, Qubit)) + self._pre_runhook(dag) + + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + new_dag = self._visit_block(dag, wire_map) + + return new_dag + + def _init_run(self, dag: DAGCircuit) -> None: + """Setup for initial run.""" + self._node_start_time = self.property_set["node_start_time"].copy() + self._node_block_dags = self.property_set["node_block_dags"] + self._idle_after = {bit: 0 for bit in dag.qubits} + self._current_block_idx = 0 + self._conditional_block = False + self._block_duration = 0 + + # Prepare DAG to pad + self._root_dag = dag + self._dag = self._empty_dag_like(dag) + self._block_dag = self._dag + self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} + self._last_node_to_touch = {} + self._fast_path_nodes = set() + self._dirty_qubits = set() + + self.property_set["node_start_time"].clear() + self._prev_node = None + self._wire_map = {} + + def _empty_dag_like( + self, + dag: DAGCircuit, + pad_wires: bool = True, + wire_map: Optional[Dict[Qubit, Qubit]] = None, + ignore_idle: bool = False, + ) -> DAGCircuit: + """Create an empty dag like the input dag.""" + new_dag = DAGCircuit() + + # Ensure *all* registers are included from the input circuit + # so that they are scheduled in sub-blocks + + # The top-level QuantumCircuit has the full registers available + # Control flow blocks do not get the full register added to the + # block but just the bits. When testing for equivalency the register + # information is taken into account. To work around this we try to + # while enabling generic handling of QuantumCircuits we + # add the register if available and otherwise add the bits directly. + # We need this work around as otherwise the padded circuit will + # not be equivalent to one written manually as bits will not + # be defined on registers like in the test case. + + source_wire_dag = self._root_dag if pad_wires else dag + + # trivial wire map if not provided, or if the top-level dag is used + if not wire_map or pad_wires: + wire_map = {wire: wire for wire in source_wire_dag.wires} + if dag.qregs and self._schedule_idle_qubits or not ignore_idle: + for qreg in source_wire_dag.qregs.values(): + new_dag.add_qreg(qreg) + else: + new_dag.add_qubits( + [ + wire_map[qubit] + for qubit in source_wire_dag.qubits + if qubit not in self._idle_qubits or not ignore_idle + ] + ) + + # Don't add root cargs as these will not be padded. + # Just focus on current block dag. + if dag.cregs: + for creg in dag.cregs.values(): + new_dag.add_creg(creg) + else: + new_dag.add_clbits(dag.clbits) + + new_dag.name = dag.name + new_dag.metadata = dag.metadata + new_dag.unit = self.property_set["time_unit"] or "dt" + if new_dag.unit != "dt": + raise TranspilerError( + 'All blocks must have time units of "dt". ' + "Please run TimeUnitConversion pass prior to padding." + ) + + new_dag.calibrations = dag.calibrations + new_dag.global_phase = dag.global_phase + return new_dag + + def _pre_runhook(self, dag: DAGCircuit) -> None: + """Extra routine inserted before running the padding pass. + + Args: + dag: DAG circuit on which the sequence is applied. + + Raises: + TranspilerError: If the whole circuit or instruction is not scheduled. + """ + if "node_start_time" not in self.property_set: + raise TranspilerError( + f"The input circuit {dag.name} is not scheduled. Call one of scheduling passes " + f"before running the {self.__class__.__name__} pass." + ) + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + """Interleave instruction sequence in between two nodes. + + .. note:: + If a DAGOpNode is added here, it should update node_start_time property + in the property set so that the added node is also scheduled. + This is achieved by adding operation via :meth:`_apply_scheduled_op`. + + .. note:: + + This method doesn't check if the total duration of new DAGOpNode added here + is identical to the interval (``t_end - t_start``). + A developer of the pass must guarantee this is satisfied. + If the duration is greater than the interval, your circuit may be + compiled down to the target code with extra duration on the backend compiler, + which is then played normally without error. However, the outcome of your circuit + might be unexpected due to erroneous scheduling. + + Args: + block_idx: Execution block index for this node. + qubit: The wire that the sequence is applied on. + t_start: Absolute start time of this interval. + t_end: Absolute end time of this interval. + next_node: Node that follows the sequence. + prev_node: Node ahead of the sequence. + """ + raise NotImplementedError + + def _get_node_duration(self, node: DAGNode) -> int: + """Get the duration of a node.""" + if node.op.condition_bits or isinstance(node.op, ControlFlowOp): + # As we cannot currently schedule through conditionals model + # as zero duration to avoid padding. + return 0 + + indices = [self._bit_indices[qarg] for qarg in self._map_wires(node.qargs)] + + if self._block_dag.has_calibration_for(node): + # If node has calibration, this value should be the highest priority + cal_key = tuple(indices), tuple(float(p) for p in node.op.params) + duration = self._block_dag.calibrations[node.op.name][cal_key].duration + else: + duration = node.op.duration + + if isinstance(duration, ParameterExpression): + raise TranspilerError( + f"Parameterized duration ({duration}) " + f"of {node.op.name} on qubits {indices} is not bounded." + ) + if duration is None: + raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") + + return duration + + def _needs_block_terminating_barrier(self, prev_node: DAGNode, curr_node: DAGNode) -> bool: + # Only barrier if not in fast-path nodes + is_fast_path_node = curr_node in self._fast_path_nodes + + def _is_terminating_barrier(node: DAGNode) -> bool: + return ( + isinstance(node.op, (Barrier, ControlFlowOp)) + and len(node.qargs) == self._block_dag.num_qubits() + ) + + return not ( + prev_node is None + or (isinstance(prev_node.op, ControlFlowOp) and isinstance(curr_node.op, ControlFlowOp)) + or _is_terminating_barrier(prev_node) + or _is_terminating_barrier(curr_node) + or is_fast_path_node + ) + + def _add_block_terminating_barrier( + self, block_idx: int, time: int, current_node: DAGNode, force: bool = False + ) -> None: + """Add a block terminating barrier to prevent topological ordering slide by. + + TODO: Fix by ensuring control-flow is a block terminator in the core circuit IR. + """ + # Only add a barrier to the end if a viable barrier is not already present on all qubits + # Only barrier if not in fast-path nodes + needs_terminating_barrier = True + if not force: + needs_terminating_barrier = self._needs_block_terminating_barrier( + self._prev_node, current_node + ) + + if needs_terminating_barrier: + # Terminate with a barrier to ensure topological ordering does not slide past + if self._schedule_idle_qubits: + barrier = Barrier(self._block_dag.num_qubits()) + qubits = self._block_dag.qubits + else: + barrier = Barrier(self._block_dag.num_qubits() - len(self._idle_qubits)) + qubits = [x for x in self._block_dag.qubits if x not in self._idle_qubits] + + barrier_node = self._apply_scheduled_op( + block_idx, + time, + barrier, + qubits, + [], + ) + barrier_node.op.duration = 0 + + def _visit_block( + self, + block: DAGCircuit, + wire_map: Dict[Qubit, Qubit], + pad_wires: bool = True, + ignore_idle: bool = False, + ) -> DAGCircuit: + # Push the previous block dag onto the stack + prev_node = self._prev_node + self._prev_node = None + prev_wire_map, self._wire_map = self._wire_map, wire_map + + prev_block_dag = self._block_dag + self._block_dag = new_block_dag = self._empty_dag_like( + block, pad_wires, wire_map=wire_map, ignore_idle=ignore_idle + ) + + self._block_duration = 0 + self._conditional_block = False + + for node in block_order_op_nodes(block): + self._visit_node(node) + + # Terminate the block to pad it after scheduling. + prev_block_duration = self._block_duration + prev_block_idx = self._current_block_idx + self._terminate_block(self._block_duration, self._current_block_idx) + + # Edge-case: Add a barrier if the final node is a fast-path + if self._prev_node in self._fast_path_nodes: + self._add_block_terminating_barrier( + prev_block_duration, prev_block_idx, self._prev_node, force=True + ) + + # Pop the previous block dag off the stack restoring it + self._block_dag = prev_block_dag + self._prev_node = prev_node + self._wire_map = prev_wire_map + + return new_block_dag + + def _visit_node(self, node: DAGNode) -> None: + if isinstance(node.op, ControlFlowOp): + if isinstance(node.op, IfElseOp): + self._visit_if_else_op(node) + else: + self._visit_control_flow_op(node) + elif node in self._node_start_time: + if isinstance(node.op, Delay): + self._visit_delay(node) + else: + self._visit_generic(node) + else: + raise TranspilerError( + f"Operation {repr(node)} is likely added after the circuit is scheduled. " + "Schedule the circuit again if you transformed it." + ) + self._prev_node = node + + def _visit_if_else_op(self, node: DAGNode) -> None: + """check if is fast-path eligible otherwise fall back + to standard ControlFlowOp handling.""" + + if self._will_use_fast_path(node): + self._fast_path_nodes.add(node) + self._visit_control_flow_op(node) + + def _will_use_fast_path(self, node: DAGNode) -> bool: + """Check if this conditional operation will be scheduled on the fastpath. + This will happen if + 1. This operation is a direct descendent of a current measurement block to be flushed + 2. The operation only operates on the qubit that is measured. + """ + # Verify IfElseOp has a direct measurement predecessor + condition_bits = node.op.condition_bits + # Fast-path valid only with a single bit. + if not condition_bits or len(condition_bits) > 1: + return False + + bit = condition_bits[0] + last_node, last_node_dag = self._last_node_to_touch.get(bit, (None, None)) + + last_node_in_block = last_node_dag is self._block_dag + + if not ( + last_node_in_block + and isinstance(last_node.op, Measure) + and set(self._map_wires(node.qargs)) == set(self._map_wires(last_node.qargs)) + ): + return False + + # Fast path contents are limited to gates and delays + for block in node.op.blocks: + if not all(isinstance(inst.operation, (Gate, Delay)) for inst in block.data): + return False + return True + + def _visit_control_flow_op(self, node: DAGNode) -> None: + """Visit a control-flow node to pad.""" + + # Control-flow terminator ends scheduling of block currently + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + self._terminate_block(t0, block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + # Only pad non-fast path nodes + fast_path_node = node in self._fast_path_nodes + + # TODO: This is a hack required to tie nodes of control-flow + # blocks across the scheduler and block_base_padder. This is + # because the current control flow nodes store the block as a + # circuit which is not hashable. For processing we are currently + # required to convert each circuit block to a dag which is inefficient + # and causes node relationships stored in analysis to be lost between + # passes as we are constantly recreating the block dags. + # We resolve this here by extracting the cached dag blocks that were + # stored by the scheduling pass. + new_node_block_dags = [] + for block_idx, _ in enumerate(node.op.blocks): + block_dag = self._node_block_dags[node][block_idx] + inner_wire_map = { + inner: outer + for outer, inner in zip( + self._map_wires(node.qargs + node.cargs), + block_dag.qubits + block_dag.clbits, + ) + } + new_node_block_dags.append( + self._visit_block( + block_dag, + pad_wires=not fast_path_node, + wire_map=inner_wire_map, + ignore_idle=True, + ) + ) + + # Build new control-flow operation containing scheduled blocks + # and apply to the DAG. + new_control_flow_op = node.op.replace_blocks( + dag_to_circuit(block) for block in new_node_block_dags + ) + # Enforce that this control-flow operation contains all wires since it has now been padded + # such that each qubit is scheduled within each block. Don't added all cargs as these will not + # be padded. + if fast_path_node: + padded_qubits = node.qargs + elif not self._schedule_idle_qubits: + padded_qubits = [q for q in self._block_dag.qubits if q not in self._idle_qubits] + else: + padded_qubits = self._block_dag.qubits + self._apply_scheduled_op( + block_idx, + t0, + new_control_flow_op, + padded_qubits, + self._map_wires(node.cargs), + ) + + def _visit_delay(self, node: DAGNode) -> None: + """The padding class considers a delay instruction as idle time + rather than instruction. Delay node is not added so that + we can extract non-delay predecessors. + """ + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + # Trigger the end of a block + if block_idx > self._current_block_idx: + self._terminate_block(self._block_duration, self._current_block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + self._conditional_block = bool(node.op.condition_bits) + + self._current_block_idx = block_idx + + t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name + self._block_duration = max(self._block_duration, t1) + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node to pad.""" + # Note: t0 is the relative time with respect to the current block specified + # by block_idx. + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + + # Trigger the end of a block + if block_idx > self._current_block_idx: + self._terminate_block(self._block_duration, self._current_block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + # This block will not be padded as it is conditional. + # See TODO below. + self._conditional_block = bool(node.op.condition_bits) + + # Now set the current block index. + self._current_block_idx = block_idx + + t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name + self._block_duration = max(self._block_duration, t1) + + for bit in self._map_wires(node.qargs): + if bit in self._idle_qubits: + continue + # Fill idle time with some sequence + if t0 - self._idle_after.get(bit, 0) > 0: + # Find previous node on the wire, i.e. always the latest node on the wire + prev_node = next(self._block_dag.predecessors(self._block_dag.output_map[bit])) + self._pad( + block_idx=block_idx, + qubit=bit, + t_start=self._idle_after[bit], + t_end=t0, + next_node=node, + prev_node=prev_node, + ) + + self._idle_after[bit] = t1 + + if not isinstance(node.op, (Barrier, Delay)): + self._dirty_qubits |= set(self._map_wires(node.qargs)) + + new_node = self._apply_scheduled_op( + block_idx, + t0, + node.op, + self._map_wires(node.qargs), + self._map_wires(node.cargs), + ) + self._last_node_to_touch.update( + {bit: (new_node, self._block_dag) for bit in new_node.qargs + new_node.cargs} + ) + + def _terminate_block(self, block_duration: int, block_idx: int) -> None: + """Terminate the end of a block scheduling region.""" + # Update all other qubits as not idle so that delays are *not* + # inserted. This is because we need the delays to be inserted in + # the conditional circuit block. + self._block_duration = 0 + self._pad_until_block_end(block_duration, block_idx) + self._idle_after = {bit: 0 for bit in self._block_dag.qubits} + + def _pad_until_block_end(self, block_duration: int, block_idx: int) -> None: + # Add delays until the end of circuit. + for bit in self._block_dag.qubits: + if bit in self._idle_qubits: + continue + idle_after = self._idle_after.get(bit, 0) + if block_duration - idle_after > 0: + node = self._block_dag.output_map[bit] + prev_node = next(self._block_dag.predecessors(node)) + self._pad( + block_idx=block_idx, + qubit=bit, + t_start=idle_after, + t_end=block_duration, + next_node=node, + prev_node=prev_node, + ) + + def _apply_scheduled_op( + self, + block_idx: int, + t_start: int, + oper: Instruction, + qubits: Union[Qubit, Iterable[Qubit]], + clbits: Union[Clbit, Iterable[Clbit]] = (), + ) -> DAGNode: + """Add new operation to DAG with scheduled information. + + This is identical to apply_operation_back + updating the node_start_time propety. + + Args: + block_idx: Execution block index for this node. + t_start: Start time of new node. + oper: New operation that is added to the DAG circuit. + qubits: The list of qubits that the operation acts on. + clbits: The list of clbits that the operation acts on. + + Returns: + The DAGNode applied to. + """ + if isinstance(qubits, Qubit): + qubits = [qubits] + if isinstance(clbits, Clbit): + clbits = [clbits] + + new_node = self._block_dag.apply_operation_back(oper, qubits, clbits) + self.property_set["node_start_time"][new_node] = (block_idx, t_start) + return new_node + + def _map_wires(self, wires: Iterable[Bit]) -> List[Bit]: + """Map the wires from the current block to the top-level block's wires. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + return [self._wire_map[w] for w in wires] diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py new file mode 100644 index 000000000..006c53feb --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py @@ -0,0 +1,553 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Dynamical decoupling insertion pass for IBM (dynamic circuit) backends.""" + +import warnings +from typing import Dict, List, Optional, Union + +import numpy as np +import rustworkx as rx +from qiskit.circuit import Qubit, Gate +from qiskit.circuit.delay import Delay +from qiskit.circuit.library.standard_gates import IGate, UGate, U3Gate +from qiskit.circuit.reset import Reset +from qiskit.dagcircuit import DAGCircuit, DAGNode, DAGInNode, DAGOpNode +from qiskit.quantum_info.operators.predicates import matrix_equal +from qiskit.quantum_info.synthesis import OneQubitEulerDecomposer +from qiskit.transpiler.exceptions import TranspilerError +from qiskit.transpiler.instruction_durations import InstructionDurations +from qiskit.transpiler.passes.optimization import Optimize1qGates +from qiskit.transpiler import CouplingMap + +from .block_base_padder import BlockBasePadder + + +class PadDynamicalDecoupling(BlockBasePadder): + """Dynamical decoupling insertion pass for IBM dynamic circuit backends. + + This pass works on a scheduled, physical circuit. It scans the circuit for + idle periods of time (i.e. those containing delay instructions) and inserts + a DD sequence of gates in those spots. These gates amount to the identity, + so do not alter the logical action of the circuit, but have the effect of + mitigating decoherence in those idle periods. + As a special case, the pass allows a length-1 sequence (e.g. [XGate()]). + In this case the DD insertion happens only when the gate inverse can be + absorbed into a neighboring gate in the circuit (so we would still be + replacing Delay with something that is equivalent to the identity). + This can be used, for instance, as a Hahn echo. + This pass ensures that the inserted sequence preserves the circuit exactly + (including global phase). + + .. jupyter-execute:: + + import numpy as np + from qiskit.circuit import QuantumCircuit + from qiskit.circuit.library import XGate + from qiskit.transpiler import PassManager, InstructionDurations + from qiskit.visualization import timeline_drawer + + from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling + + circ = QuantumCircuit(4) + circ.h(0) + circ.cx(0, 1) + circ.cx(1, 2) + circ.cx(2, 3) + circ.measure_all() + durations = InstructionDurations( + [("h", 0, 50), ("cx", [0, 1], 700), ("reset", None, 10), + ("cx", [1, 2], 200), ("cx", [2, 3], 300), + ("x", None, 50), ("measure", None, 1000)] + ) + + .. jupyter-execute:: + + # balanced X-X sequence on all qubits + dd_sequence = [XGate(), XGate()] + pm = PassManager([ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence)]) + circ_dd = pm.run(circ) + circ_dd.draw() + + .. jupyter-execute:: + + # Uhrig sequence on qubit 0 + n = 8 + dd_sequence = [XGate()] * n + def uhrig_pulse_location(k): + return np.sin(np.pi * (k + 1) / (2 * n + 2)) ** 2 + spacings = [] + for k in range(n): + spacings.append(uhrig_pulse_location(k) - sum(spacings)) + spacings.append(1 - sum(spacings)) + pm = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence, qubits=[0], spacings=spacings), + ] + ) + circ_dd = pm.run(circ) + circ_dd.draw() + + .. note:: + + You need to call + :class:`~qiskit_ibm_provider.transpiler.passes.scheduling.ALAPScheduleAnalysis` + before running dynamical decoupling to guarantee your circuit satisfies acquisition + alignment constraints for dynamic circuit backends. + """ + + def __init__( + self, + durations: InstructionDurations, + dd_sequences: Union[List[Gate], List[List[Gate]]], + qubits: Optional[List[int]] = None, + spacings: Optional[Union[List[List[float]], List[float]]] = None, + skip_reset_qubits: bool = True, + pulse_alignment: int = 16, + extra_slack_distribution: str = "middle", + sequence_min_length_ratios: Optional[Union[int, List[int]]] = None, + insert_multiple_cycles: bool = False, + coupling_map: CouplingMap = None, + alt_spacings: Optional[Union[List[List[float]], List[float]]] = None, + schedule_idle_qubits: bool = False, + ): + """Dynamical decoupling initializer. + + Args: + durations: Durations of instructions to be used in scheduling. + dd_sequences: Sequence of gates to apply in idle spots. + Alternatively a list of gate sequences may be supplied that + will preferentially be inserted if there is a delay of sufficient + duration. This may be tuned by the optionally supplied + ``sequence_min_length_ratios``. + qubits: Physical qubits on which to apply DD. + If None, all qubits will undergo DD (when possible). + spacings: A list of lists of spacings between the DD gates. + The available slack will be divided according to this. + The list length must be one more than the length of dd_sequence, + and the elements must sum to 1. If None, a balanced spacing + will be used [d/2, d, d, ..., d, d, d/2]. This spacing only + applies to the first subcircuit, if a ``coupling_map`` is + specified + skip_reset_qubits: If True, does not insert DD on idle periods that + immediately follow initialized/reset qubits + (as qubits in the ground state are less susceptible to decoherence). + pulse_alignment: The hardware constraints for gate timing allocation. + This is usually provided from ``backend.configuration().timing_constraints``. + If provided, the delay length, i.e. ``spacing``, is implicitly adjusted to + satisfy this constraint. + extra_slack_distribution: The option to control the behavior of DD sequence generation. + The duration of the DD sequence should be identical to an idle time in the + scheduled quantum circuit, however, the delay in between gates comprising the sequence + should be integer number in units of dt, and it might be further truncated + when ``pulse_alignment`` is specified. This sometimes results in the duration of + the created sequence being shorter than the idle time + that you want to fill with the sequence, i.e. `extra slack`. + This option takes following values. + + * "middle": Put the extra slack to the interval at the middle of the sequence. + * "edges": Divide the extra slack as evenly as possible into + intervals at beginning and end of the sequence. + sequence_min_length_ratios: List of minimum delay length to DD sequence ratio to satisfy + in order to insert the DD sequence. For example if the X-X dynamical decoupling sequence + is 320dt samples long and the available delay is 384dt it has a ratio of 384dt/320dt=1.2. + From the perspective of dynamical decoupling this is likely to add more control noise + than decoupling error rate reductions. The defaults value is 2.0. + insert_multiple_cycles: If the available duration exceeds + 2*sequence_min_length_ratio*duration(dd_sequence) enable the insertion of multiple + rounds of the dynamical decoupling sequence in that delay. + coupling_map: directed graph representing the coupling map for the device. Specifying a + coupling map partitions the device into subcircuits, in order to apply DD sequences + with different pulse spacings within each. Currently support 2 subcircuits. + alt_spacings: A list of lists of spacings between the DD gates, for the second subcircuit, + as determined by the coupling map. If None, a balanced spacing that is staggered with + respect to the first subcircuit will be used [d, d, d, ..., d, d, 0]. + schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. + This is useful for timeline visualizations, but may cause issues + for execution on large backends. + Raises: + TranspilerError: When invalid DD sequence is specified. + TranspilerError: When pulse gate with the duration which is + non-multiple of the alignment constraint value is found. + TranspilerError: When the coupling map is not supported (i.e., if degree > 3) + """ + + super().__init__(schedule_idle_qubits=schedule_idle_qubits) + self._durations = durations + + # Enforce list of DD sequences + if dd_sequences: + try: + iter(dd_sequences[0]) + except TypeError: + dd_sequences = [dd_sequences] + self._dd_sequences = dd_sequences + self._qubits = qubits + self._skip_reset_qubits = skip_reset_qubits + self._alignment = pulse_alignment + self._coupling_map = coupling_map + self._coupling_coloring = None + + if spacings is not None: + try: + iter(spacings[0]) # type: ignore + except TypeError: + spacings = [spacings] # type: ignore + if alt_spacings is not None: + try: + iter(alt_spacings[0]) # type: ignore + except TypeError: + alt_spacings = [alt_spacings] # type: ignore + self._spacings = spacings + self._alt_spacings = alt_spacings + + if self._spacings and len(self._spacings) != len(self._dd_sequences): + raise TranspilerError("Number of sequence spacings must equal number of DD sequences.") + + if self._alt_spacings: + if not self._coupling_map: + warnings.warn( + "Alternate spacings are ignored because a coupling map was not provided" + ) + elif len(self._alt_spacings) != len(self._dd_sequences): + raise TranspilerError( + "Number of alternate sequence spacings must equal number of DD sequences." + ) + + self._extra_slack_distribution = extra_slack_distribution + + self._dd_sequence_lengths: Dict[Qubit, List[List[Gate]]] = {} + self._sequence_phase = 0 + + if sequence_min_length_ratios is None: + # Use 2.0 as a sane default + self._sequence_min_length_ratios = [2.0 for _ in self._dd_sequences] + else: + try: + iter(sequence_min_length_ratios) # type: ignore + except TypeError: + sequence_min_length_ratios = [sequence_min_length_ratios] # type: ignore + self._sequence_min_length_ratios = sequence_min_length_ratios # type: ignore + + if len(self._sequence_min_length_ratios) != len(self._dd_sequences): + raise TranspilerError("Number of sequence lengths must equal number of DD sequences.") + + self._insert_multiple_cycles = insert_multiple_cycles + + def _pre_runhook(self, dag: DAGCircuit) -> None: + super()._pre_runhook(dag) + + if self._coupling_map: + physical_qubits = [dag.qubits.index(q) for q in dag.qubits] + subgraph = self._coupling_map.graph.subgraph(physical_qubits) + self._coupling_coloring = rx.graph_greedy_color(subgraph.to_undirected()) + if any(c > 1 for c in self._coupling_coloring.values()): + raise TranspilerError( + "This circuit topology is not supported for staggered dynamical decoupling." + "The maximum connectivity is 3 nearest neighbors per qubit." + ) + + spacings_required = self._spacings is None + if spacings_required: + self._spacings = [] # type: ignore + alt_spacings_required = self._alt_spacings is None and self._coupling_map is not None + if alt_spacings_required: + self._alt_spacings = [] # type: ignore + + for seq_idx, seq in enumerate(self._dd_sequences): + num_pulses = len(self._dd_sequences[seq_idx]) + + # Check if physical circuit is given + if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: + raise TranspilerError("DD runs on physical circuits only.") + + # Set default spacing otherwise validate user input + if spacings_required: + mid = 1 / num_pulses + end = mid / 2 + self._spacings.append([end] + [mid] * (num_pulses - 1) + [end]) # type: ignore + else: + if sum(self._spacings[seq_idx]) != 1 or any( # type: ignore + a < 0 for a in self._spacings[seq_idx] # type: ignore + ): + raise TranspilerError( + "The spacings must be given in terms of fractions " + "of the slack period and sum to 1." + ) + + if self._coupling_map: + if alt_spacings_required: + mid = 1 / num_pulses + self._alt_spacings.append([mid] * num_pulses + [0]) # type: ignore + else: + if sum(self._alt_spacings[seq_idx]) != 1 or any( # type: ignore + a < 0 for a in self._alt_spacings[seq_idx] # type: ignore + ): + raise TranspilerError( + "The spacings must be given in terms of fractions " + "of the slack period and sum to 1." + ) + + # Check if DD sequence is identity + if num_pulses != 1: + if num_pulses % 2 != 0: + raise TranspilerError( + "DD sequence must contain an even number of gates (or 1)." + ) + # TODO: this check should use the quantum info package in Qiskit. + noop = np.eye(2) + for gate in self._dd_sequences[seq_idx]: + noop = noop.dot(gate.to_matrix()) + if not matrix_equal(noop, IGate().to_matrix(), ignore_phase=True): + raise TranspilerError("The DD sequence does not make an identity operation.") + self._sequence_phase = np.angle(noop[0][0]) + + # Precompute qubit-wise DD sequence length for performance + for qubit in dag.qubits: + seq_length_ = [] + if qubit not in self._dd_sequence_lengths: + self._dd_sequence_lengths[qubit] = [] + + physical_index = dag.qubits.index(qubit) + if self._qubits and physical_index not in self._qubits: + continue + + for index, gate in enumerate(seq): + try: + # Check calibration. + gate_length = dag.calibrations[gate.name][(physical_index, gate.params)] + if gate_length % self._alignment != 0: + # This is necessary to implement lightweight scheduling logic for this pass. + # Usually the pulse alignment constraint and pulse data chunk size take + # the same value, however, we can intentionally violate this pattern + # at the gate level. For example, we can create a schedule consisting of + # a pi-pulse of 32 dt followed by a post buffer, i.e. delay, of 4 dt + # on the device with 16 dt constraint. Note that the pi-pulse length + # is multiple of 16 dt but the gate length of 36 is not multiple of it. + # Such pulse gate should be excluded. + raise TranspilerError( + f"Pulse gate {gate.name} with length non-multiple of {self._alignment} " + f"is not acceptable in {self.__class__.__name__} pass." + ) + except KeyError: + gate_length = self._durations.get(gate, physical_index) + seq_length_.append(gate_length) + # Update gate duration. + # This is necessary for current timeline drawer, i.e. scheduled. + + if hasattr( + gate, "to_mutable" + ): # TODO this check can be removed after Qiskit 1.0, as it is always True + gate = gate.to_mutable() + seq[index] = gate + gate.duration = gate_length + self._dd_sequence_lengths[qubit].append(seq_length_) + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + # This routine takes care of the pulse alignment constraint for the DD sequence. + # Note that the alignment constraint acts on the t0 of the DAGOpNode. + # Now this constrained scheduling problem is simplified to the problem of + # finding a delay amount which is a multiple of the constraint value by assuming + # that the duration of every DAGOpNode is also a multiple of the constraint value. + # + # For example, given the constraint value of 16 and XY4 with 160 dt gates. + # Here we assume current interval is 992 dt. + # + # relative spacing := [0.125, 0.25, 0.25, 0.25, 0.125] + # slack = 992 dt - 4 x 160 dt = 352 dt + # + # unconstrained sequence: 44dt-X1-88dt-Y2-88dt-X3-88dt-Y4-44dt + # constrained sequence : 32dt-X1-80dt-Y2-80dt-X3-80dt-Y4-32dt + extra slack 48 dt + # + # Now we evenly split extra slack into start and end of the sequence. + # The distributed slack should be multiple of 16. + # Start = +16, End += 32 + # + # final sequence : 48dt-X1-80dt-Y2-80dt-X3-80dt-Y4-64dt / in total 992 dt + # + # Now we verify t0 of every node starts from multiple of 16 dt. + # + # X1: 48 dt (3 x 16 dt) + # Y2: 48 dt + 160 dt + 80 dt = 288 dt (18 x 16 dt) + # Y3: 288 dt + 160 dt + 80 dt = 528 dt (33 x 16 dt) + # Y4: 368 dt + 160 dt + 80 dt = 768 dt (48 x 16 dt) + # + # As you can see, constraints on t0 are all satified without explicit scheduling. + time_interval = t_end - t_start + + if self._qubits and self._block_dag.qubits.index(qubit) not in self._qubits: + # Target physical qubit is not the target of this DD sequence. + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return + + if ( + not isinstance(prev_node, DAGInNode) + and self._skip_reset_qubits + and isinstance(prev_node.op, Reset) + and qubit in prev_node.qargs + ): + self._dirty_qubits.remove(qubit) + + if qubit not in self._dirty_qubits: + # Previous node is the start edge or reset, i.e. qubit is ground state. + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return + + for sequence_idx, _ in enumerate(self._dd_sequences): + dd_sequence = self._dd_sequences[sequence_idx] + seq_lengths = self._dd_sequence_lengths[qubit][sequence_idx] + seq_length = np.sum(seq_lengths) + seq_ratio = self._sequence_min_length_ratios[sequence_idx] + spacings = self._spacings[sequence_idx] + alt_spacings = ( + np.asarray(self._alt_spacings[sequence_idx]) if self._coupling_map else None + ) + + # Verify the delay duration exceeds the minimum time to insert + if time_interval / seq_length <= seq_ratio: + continue + + if self._insert_multiple_cycles: + num_sequences = max(int(time_interval // (seq_length * seq_ratio)), 1) + if (num_sequences % 2 == 1) and len(dd_sequence) == 1: + warnings.warn( + "Sequence would result in an odd number of DD cycles with original DD " + "sequence of length 1. This may result in non-identity sequence insertion " + "and so are defaulting to 1 cycle insertion." + ) + num_sequences = 1 + else: + num_sequences = 1 + + # multiple dd sequences may be inserted + if num_sequences > 1: + dd_sequence = list(dd_sequence) * num_sequences + seq_lengths = seq_lengths * num_sequences + seq_length = np.sum(seq_lengths) + spacings = spacings * num_sequences + + spacings = np.asarray(spacings) / num_sequences + slack = time_interval - seq_length + sequence_gphase = self._sequence_phase + + if slack <= 0: + continue + + if len(dd_sequence) == 1: + # Special case of using a single gate for DD + u_inv = dd_sequence[0].inverse().to_matrix() + theta, phi, lam, phase = OneQubitEulerDecomposer().angles_and_phase(u_inv) + if isinstance(next_node, DAGOpNode) and isinstance(next_node.op, (UGate, U3Gate)): + # Absorb the inverse into the successor (from left in circuit) + theta_r, phi_r, lam_r = next_node.op.params + next_node.op.params = Optimize1qGates.compose_u3( + theta_r, phi_r, lam_r, theta, phi, lam + ) + sequence_gphase += phase + elif isinstance(prev_node, DAGOpNode) and isinstance(prev_node.op, (UGate, U3Gate)): + # Absorb the inverse into the predecessor (from right in circuit) + theta_l, phi_l, lam_l = prev_node.op.params + prev_node.op.params = Optimize1qGates.compose_u3( + theta, phi, lam, theta_l, phi_l, lam_l + ) + sequence_gphase += phase + else: + # Don't do anything if there's no single-qubit gate to absorb the inverse + self._apply_scheduled_op( + block_idx, + t_start, + Delay(time_interval, self._block_dag.unit), + qubit, + ) + return + + def _constrained_length(values: np.array) -> np.array: + return self._alignment * np.floor(values / self._alignment) + + if self._coupling_map: + if self._coupling_coloring[self._dag.qubits.index(qubit)] == 0: + sub_spacings = spacings + else: + sub_spacings = alt_spacings + else: + sub_spacings = spacings + + # (1) Compute DD intervals satisfying the constraint + taus = _constrained_length(slack * sub_spacings) + extra_slack = slack - np.sum(taus) + # (2) Distribute extra slack + if self._extra_slack_distribution == "middle": + mid_ind = int((len(taus) - 1) / 2) + to_middle = _constrained_length(extra_slack) + taus[mid_ind] += to_middle + if extra_slack - to_middle: + # If to_middle is not a multiple value of the pulse alignment, + # it is truncated to the nearest multiple value and + # the rest of slack is added to the end. + taus[-1] += extra_slack - to_middle + elif self._extra_slack_distribution == "edges": + to_begin_edge = _constrained_length(extra_slack / 2) + taus[0] += to_begin_edge + taus[-1] += extra_slack - to_begin_edge + else: + raise TranspilerError( + f"Option extra_slack_distribution = {self._extra_slack_distribution} is invalid." + ) + + # (3) Construct DD sequence with delays + idle_after = t_start + dd_ind = 0 + # Interleave delays with DD sequence operations + for tau_idx, tau in enumerate(taus): + if tau > 0: + self._apply_scheduled_op( + block_idx, idle_after, Delay(tau, self._dag.unit), qubit + ) + idle_after += tau + + # Detect if we are on a sequence boundary + # If so skip insert of sequence to allow delays to combine + # There are two cases. + # 1. The number of delays to be inserted is equal to the number of gates. + # 2. There is an extra delay inserted after the last operation. + # The condition below handles both. + seq_length = int(len(taus) / num_sequences) + if len(dd_sequence) == len(taus) or tau_idx % seq_length != (seq_length - 1): + gate = dd_sequence[dd_ind] + gate_length = seq_lengths[dd_ind] + self._apply_scheduled_op(block_idx, idle_after, gate, qubit) + idle_after += gate_length + dd_ind += 1 + + self._block_dag.global_phase = self._block_dag.global_phase + sequence_gphase + return + + # DD could not be applied, delay instead + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py new file mode 100644 index 000000000..fd61f8c49 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py @@ -0,0 +1,78 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Padding pass to insert Delay into empty timeslots for dynamic circuit backends.""" + +from qiskit.circuit import Qubit +from qiskit.circuit.delay import Delay +from qiskit.dagcircuit import DAGNode, DAGOutNode + +from .block_base_padder import BlockBasePadder + + +class PadDelay(BlockBasePadder): + """Padding idle time with Delay instructions. + + Consecutive delays will be merged in the output of this pass. + + .. code-block::python + + durations = InstructionDurations([("x", None, 160), ("cx", None, 800)]) + + qc = QuantumCircuit(2) + qc.delay(100, 0) + qc.x(1) + qc.cx(0, 1) + + The ASAP-scheduled circuit output may become + + .. parsed-literal:: + + ┌────────────────┐ + q_0: ┤ Delay(160[dt]) ├──■── + └─────┬───┬──────┘┌─┴─┐ + q_1: ──────┤ X ├───────┤ X ├ + └───┘ └───┘ + + Note that the additional idle time of 60dt on the ``q_0`` wire coming from the duration difference + between ``Delay`` of 100dt (``q_0``) and ``XGate`` of 160 dt (``q_1``) is absorbed in + the delay instruction on the ``q_0`` wire, i.e. in total 160 dt. + + See :class:`BlockBasePadder` pass for details. + """ + + def __init__(self, fill_very_end: bool = True, schedule_idle_qubits: bool = False): + """Create new padding delay pass. + + Args: + fill_very_end: Set ``True`` to fill the end of circuit with delay. + schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. + This is useful for timeline visualizations, but may cause issues for execution + on large backends. + """ + super().__init__(schedule_idle_qubits=schedule_idle_qubits) + self.fill_very_end = fill_very_end + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + if not self.fill_very_end and isinstance(next_node, DAGOutNode): + return + + time_interval = t_end - t_start + self._apply_scheduled_op(block_idx, t_start, Delay(time_interval, "dt"), qubit) diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py new file mode 100644 index 000000000..b18ee32c6 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py @@ -0,0 +1,643 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Scheduler for dynamic circuit backends.""" + +from abc import abstractmethod +from typing import Dict, List, Optional, Union, Set, Tuple +import itertools + +import qiskit +from qiskit.circuit.parameterexpression import ParameterExpression +from qiskit.converters import circuit_to_dag +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.passes.scheduling.time_unit_conversion import TimeUnitConversion + +from qiskit.circuit import Barrier, Clbit, ControlFlowOp, Measure, Qubit, Reset +from qiskit.circuit.bit import Bit +from qiskit.dagcircuit import DAGCircuit, DAGNode +from qiskit.transpiler.exceptions import TranspilerError + +from .utils import block_order_op_nodes + + +class BaseDynamicCircuitAnalysis(TransformationPass): + """Base class for scheduling analysis + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def __init__( + self, durations: qiskit.transpiler.instruction_durations.InstructionDurations + ) -> None: + """Scheduler for dynamic circuit backends. + + Args: + durations: Durations of instructions to be used in scheduling. + """ + self._durations = durations + + self._dag: Optional[DAGCircuit] = None + self._block_dag: Optional[DAGCircuit] = None + self._wire_map: Optional[Dict[Bit, Bit]] = None + self._node_mapped_wires: Optional[Dict[DAGNode, List[Bit]]] = None + self._node_block_dags: Dict[DAGNode, DAGCircuit] = {} + # Mapping of control-flow nodes to their containing blocks + self._block_idx_dag_map: Dict[int, DAGCircuit] = {} + # Mapping of block indices to the respective DAGCircuit + + self._current_block_idx = 0 + self._max_block_t1: Optional[Dict[int, int]] = None + # Track as we build to avoid extra pass + self._control_flow_block = False + self._node_start_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None + self._node_stop_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None + self._bit_stop_times: Optional[Dict[int, Dict[Union[Qubit, Clbit], int]]] = None + # Dictionary of blocks each containing a dictionary with the key for each bit + # in the block and its value being the final time of the bit within the block. + self._current_block_measures: Set[DAGNode] = set() + self._current_block_measures_has_reset: bool = False + self._node_tied_to: Optional[Dict[DAGNode, Set[DAGNode]]] = None + # Nodes that the scheduling of this node is tied to. + self._bit_indices: Optional[Dict[Qubit, int]] = None + + self._time_unit_converter = TimeUnitConversion(durations) + + super().__init__() + + @property + def _current_block_bit_times(self) -> Dict[Union[Qubit, Clbit], int]: + return self._bit_stop_times[self._current_block_idx] + + def _visit_block(self, block: DAGCircuit, wire_map: Dict[Qubit, Qubit]) -> None: + # Push the previous block dag onto the stack + prev_block_dag = self._block_dag + self._block_dag = block + prev_wire_map, self._wire_map = self._wire_map, wire_map + + # We must run this on the individual block + # as the current implementation does not recurse + # into the circuit structure. + self._time_unit_converter.run(block) + self._begin_new_circuit_block() + + for node in block_order_op_nodes(block): + self._visit_node(node) + + # Final flush + self._flush_measures() + + # Pop the previous block dag off the stack restoring it + self._block_dag = prev_block_dag + self._wire_map = prev_wire_map + + def _visit_node(self, node: DAGNode) -> None: + if isinstance(node.op, ControlFlowOp): + self._visit_control_flow_op(node) + elif node.op.condition_bits: + raise TranspilerError( + "c_if control-flow is not supported by this pass. " + 'Please apply "ConvertConditionsToIfOps" to convert these ' + "conditional operations to new-style Qiskit control-flow." + ) + else: + if isinstance(node.op, Measure): + self._visit_measure(node) + elif isinstance(node.op, Reset): + self._visit_reset(node) + else: + self._visit_generic(node) + + def _visit_control_flow_op(self, node: DAGNode) -> None: + # TODO: This is a hack required to tie nodes of control-flow + # blocks across the scheduler and block_base_padder. This is + # because the current control flow nodes store the block as a + # circuit which is not hashable. For processing we are currently + # required to convert each circuit block to a dag which is inefficient + # and causes node relationships stored in analysis to be lost between + # passes as we are constantly recreating the block dags. + # We resolve this here by caching these dags in the property set. + self._node_block_dags[node] = node_block_dags = [] + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + # Duration is 0 as we do not schedule across terminator + t1 = t0 # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + for block in node.op.blocks: + self._control_flow_block = True + + new_dag = circuit_to_dag(block) + inner_wire_map = { + inner: outer + for outer, inner in zip(self._map_wires(node), new_dag.qubits + new_dag.clbits) + } + node_block_dags.append(new_dag) + self._visit_block(new_dag, inner_wire_map) + + # Begin new block for exit to "then" block. + self._begin_new_circuit_block() + + @abstractmethod + def _visit_measure(self, node: DAGNode) -> None: + raise NotImplementedError + + @abstractmethod + def _visit_reset(self, node: DAGNode) -> None: + raise NotImplementedError + + @abstractmethod + def _visit_generic(self, node: DAGNode) -> None: + raise NotImplementedError + + def _init_run(self, dag: DAGCircuit) -> None: + """Setup for initial run.""" + + self._dag = dag + self._block_dag = None + self._wire_map = {wire: wire for wire in dag.wires} + self._node_mapped_wires = {} + self._node_block_dags = {} + self._block_idx_dag_map = {} + + self._current_block_idx = 0 + self._max_block_t1 = {} + self._control_flow_block = False + + if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: + raise TranspilerError("ASAP schedule runs on physical circuits only") + + self._node_start_time = {} + self._node_stop_time = {} + self._bit_stop_times = {0: {q: 0 for q in dag.qubits + dag.clbits}} + self._current_block_measures = set() + self._current_block_measures_has_reset = False + self._node_tied_to = {} + self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} + + def _get_duration(self, node: DAGNode, dag: Optional[DAGCircuit] = None) -> int: + if node.op.condition_bits or isinstance(node.op, ControlFlowOp): + # As we cannot currently schedule through conditionals model + # as zero duration to avoid padding. + return 0 + + indices = [self._bit_indices[qarg] for qarg in self._map_qubits(node)] + + # Fall back to current block dag if not specified. + dag = dag or self._block_dag + + if dag.has_calibration_for(node): + # If node has calibration, this value should be the highest priority + cal_key = tuple(indices), tuple(float(p) for p in node.op.params) + duration = dag.calibrations[node.op.name][cal_key].duration + node.op.duration = duration + else: + duration = node.op.duration + + if isinstance(duration, ParameterExpression): + raise TranspilerError( + f"Parameterized duration ({duration}) " + f"of {node.op.name} on qubits {indices} is not bounded." + ) + if duration is None: + raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") + + return duration + + def _update_bit_times( # pylint: disable=invalid-name + self, node: DAGNode, t0: int, t1: int, update_cargs: bool = True + ) -> None: + self._max_block_t1[self._current_block_idx] = max( + self._max_block_t1.get(self._current_block_idx, 0), t1 + ) + + update_bits = self._map_wires(node) if update_cargs else self._map_qubits(node) + for bit in update_bits: + self._current_block_bit_times[bit] = t1 + + self._node_start_time[node] = (self._current_block_idx, t0) + self._node_stop_time[node] = (self._current_block_idx, t1) + + def _begin_new_circuit_block(self) -> None: + """Create a new timed circuit block completing the previous block.""" + self._current_block_idx += 1 + self._block_idx_dag_map[self._current_block_idx] = self._block_dag + self._control_flow_block = False + self._bit_stop_times[self._current_block_idx] = { + self._wire_map[wire]: 0 for wire in self._block_dag.wires + } + self._flush_measures() + + def _flush_measures(self) -> None: + """Flush currently accumulated measurements by resetting block measures.""" + for node in self._current_block_measures: + self._node_tied_to[node] = self._current_block_measures.copy() + + self._current_block_measures = set() + self._current_block_measures_has_reset = False + + def _current_block_measure_qargs(self) -> Set[Qubit]: + return set( + qarg for measure in self._current_block_measures for qarg in self._map_qubits(measure) + ) + + def _check_flush_measures(self, node: DAGNode) -> None: + if self._current_block_measure_qargs() & set(self._map_qubits(node)): + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + + def _map_wires(self, node: DAGNode) -> List[Qubit]: + """Map the wires from the current node to the top-level block's wires. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + if node not in self._node_mapped_wires: + self._node_mapped_wires[node] = wire_map = [ + self._wire_map[q] for q in node.qargs + node.cargs + ] + return wire_map + + return self._node_mapped_wires[node] + + def _map_qubits(self, node: DAGNode) -> List[Qubit]: + """Map the qubits from the current node to the top-level block's qubits. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + return [wire for wire in self._map_wires(node) if isinstance(wire, Qubit)] + + +class ASAPScheduleAnalysis(BaseDynamicCircuitAnalysis): + """Dynamic circuits as-soon-as-possible (ASAP) scheduling analysis pass. + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + In its current form this is similar to Qiskit's ASAP scheduler in which instructions + start as early as possible. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the ALAPSchedule pass on `dag`. + Args: + dag (DAGCircuit): DAG to schedule. + Raises: + TranspilerError: if the circuit is not mapped on physical qubits. + TranspilerError: if conditional bit is added to non-supported instruction. + Returns: + The scheduled DAGCircuit. + """ + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + self._visit_block(dag, wire_map) + + self.property_set["node_start_time"] = self._node_start_time + self.property_set["node_block_dags"] = self._node_block_dags + return dag + + def _visit_measure(self, node: DAGNode) -> None: + """Visit a measurement node. + + Measurement currently triggers the end of a deterministically scheduled block + of instructions in IBM dynamic circuits hardware. + This means that it is possible to schedule *up to* a measurement (and during its pulses) + but the measurement will be followed by a period of indeterminism. + All measurements on disjoint qubits that topologically follow another + measurement will be collected and performed in parallel. A measurement on a qubit + intersecting with the set of qubits to be measured in parallel will trigger the + end of a scheduling block with said measurement occurring in a following block + which begins another grouping sequence. This behavior will change in future + backend software updates.""" + + current_block_measure_qargs = self._current_block_measure_qargs() + # We handle a set of qubits here as _visit_reset currently calls + # this method and a reset may have multiple qubits. + measure_qargs = set(self._map_qubits(node)) + + t0q = max( + self._current_block_bit_times[q] for q in measure_qargs + ) # pylint: disable=invalid-name + + # If the measurement qubits overlap, we need to flush measurements and start a + # new scheduling block. + if current_block_measure_qargs & measure_qargs: + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + t0q = 0 + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + else: + # Otherwise we need to increment all measurements to start at the same time within the block. + t0q = max( # pylint: disable=invalid-name + itertools.chain( + [t0q], + (self._node_start_time[measure][1] for measure in self._current_block_measures), + ) + ) + + # Insert this measure into the block + self._current_block_measures.add(node) + + for measure in self._current_block_measures: + t0 = t0q # pylint: disable=invalid-name + bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} + measure_duration = self._durations.get( + Measure(), + [bit_indices[qarg] for qarg in self._map_qubits(measure)], + unit="dt", + ) + t1 = t0 + measure_duration # pylint: disable=invalid-name + self._update_bit_times(measure, t0, t1) + + def _visit_reset(self, node: DAGNode) -> None: + """Visit a reset node. + + Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware + as conditional reset is performed internally using a c_if. This means that it is + possible to schedule *up to* a reset (and during its measurement pulses) + but the reset will be followed by a period of conditional indeterminism. + All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. + """ + # Process as measurement + self._current_block_measures_has_reset = True + self._visit_measure(node) + # Then set that we are now a conditional node. + self._control_flow_block = True + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node such as a gate or barrier.""" + op_duration = self._get_duration(node) + + # If the measurement qubits overlap, we need to flush the measurement group + self._check_flush_measures(node) + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + t1 = t0 + op_duration # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + +class ALAPScheduleAnalysis(BaseDynamicCircuitAnalysis): + """Dynamic circuits as-late-as-possible (ALAP) scheduling analysis pass. + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + In its current form this is similar to Qiskit's ALAP scheduler in which instructions + start as late as possible. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def run(self, dag: DAGCircuit) -> None: + """Run the ASAPSchedule pass on `dag`. + Args: + dag (DAGCircuit): DAG to schedule. + Raises: + TranspilerError: if the circuit is not mapped on physical qubits. + TranspilerError: if conditional bit is added to non-supported instruction. + Returns: + The scheduled DAGCircuit. + """ + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + self._visit_block(dag, wire_map) + self._push_block_durations() + self.property_set["node_start_time"] = self._node_start_time + self.property_set["node_block_dags"] = self._node_block_dags + return dag + + def _visit_measure(self, node: DAGNode) -> None: + """Visit a measurement node. + + Measurement currently triggers the end of a deterministically scheduled block + of instructions in IBM dynamic circuits hardware. + This means that it is possible to schedule *up to* a measurement (and during its pulses) + but the measurement will be followed by a period of indeterminism. + All measurements on disjoint qubits that topologically follow another + measurement will be collected and performed in parallel. A measurement on a qubit + intersecting with the set of qubits to be measured in parallel will trigger the + end of a scheduling block with said measurement occurring in a following block + which begins another grouping sequence. This behavior will change in future + backend software updates.""" + + current_block_measure_qargs = self._current_block_measure_qargs() + # We handle a set of qubits here as _visit_reset currently calls + # this method and a reset may have multiple qubits. + measure_qargs = set(self._map_qubits(node)) + + t0q = max( + self._current_block_bit_times[q] for q in measure_qargs + ) # pylint: disable=invalid-name + + # If the measurement qubits overlap, we need to flush measurements and start a + # new scheduling block. + if current_block_measure_qargs & measure_qargs: + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + t0q = 0 + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + else: + # Otherwise we need to increment all measurements to start at the same time within the block. + t0q = max( # pylint: disable=invalid-name + itertools.chain( + [t0q], + (self._node_start_time[measure][1] for measure in self._current_block_measures), + ) + ) + + # Insert this measure into the block + self._current_block_measures.add(node) + + for measure in self._current_block_measures: + t0 = t0q # pylint: disable=invalid-name + bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} + measure_duration = self._durations.get( + Measure(), + [bit_indices[qarg] for qarg in self._map_qubits(measure)], + unit="dt", + ) + t1 = t0 + measure_duration # pylint: disable=invalid-name + self._update_bit_times(measure, t0, t1) + + def _visit_reset(self, node: DAGNode) -> None: + """Visit a reset node. + + Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware + as conditional reset is performed internally using a c_if. This means that it is + possible to schedule *up to* a reset (and during its measurement pulses) + but the reset will be followed by a period of conditional indeterminism. + All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. + """ + # Process as measurement + self._current_block_measures_has_reset = True + self._visit_measure(node) + # Then set that we are now a conditional node. + self._control_flow_block = True + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node such as a gate or barrier.""" + + # If True we are coming from a conditional block. + # start a new block for the unconditional operations. + if self._control_flow_block: + self._begin_new_circuit_block() + + op_duration = self._get_duration(node) + + # If the measurement qubits overlap, we need to flush the measurement group + self._check_flush_measures(node) + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + t1 = t0 + op_duration # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + def _push_block_durations(self) -> None: + """After scheduling of each block, pass over and push the times of all nodes.""" + + # Store the next available time to push to for the block by bit + block_bit_times = {} + # Iterated nodes starting at the first, from the node with the + # last time, preferring barriers over non-barriers + + def order_ops(item: Tuple[DAGNode, Tuple[int, int]]) -> Tuple[int, int, bool, int]: + """Iterated nodes ordering by channel, time and preferring that barriers are processed + first.""" + return ( + item[1][0], + -item[1][1], + not isinstance(item[0].op, Barrier), + self._get_duration(item[0], dag=self._block_idx_dag_map[item[1][0]]), + ) + + iterate_nodes = sorted(self._node_stop_time.items(), key=order_ops) + + new_node_start_time = {} + new_node_stop_time = {} + + def _calculate_new_times( + block: int, node: DAGNode, block_bit_times: Dict[int, Dict[Qubit, int]] + ) -> int: + max_block_time = min(block_bit_times[block][bit] for bit in self._map_qubits(node)) + + t0 = self._node_start_time[node][1] # pylint: disable=invalid-name + t1 = self._node_stop_time[node][1] # pylint: disable=invalid-name + # Determine how much to shift by + node_offset = max_block_time - t1 + new_t0 = t0 + node_offset + return new_t0 + + scheduled = set() + + def _update_time( + block: int, + node: DAGNode, + new_time: int, + block_bit_times: Dict[int, Dict[Qubit, int]], + ) -> None: + scheduled.add(node) + + new_node_start_time[node] = (block, new_time) + new_node_stop_time[node] = ( + block, + new_time + self._get_duration(node, dag=self._block_idx_dag_map[block]), + ) + + # Update available times by bit + for bit in self._map_qubits(node): + block_bit_times[block][bit] = new_time + + for node, ( + block, + _, + ) in iterate_nodes: # pylint: disable=invalid-name + # skip already scheduled + if node in scheduled: + continue + # Start with last time as the time to push to + if block not in block_bit_times: + block_bit_times[block] = {q: self._max_block_t1[block] for q in self._dag.wires} + + # Calculate the latest available time to push to collectively for tied nodes + tied_nodes = self._node_tied_to.get(node, None) + if tied_nodes is not None: + # Take the minimum time that will be schedulable + # self._node_tied_to includes the node itself. + new_times = [ + _calculate_new_times(block, tied_node, block_bit_times) + for tied_node in self._node_tied_to[node] + ] + new_time = min(new_times) + for tied_node in tied_nodes: + _update_time(block, tied_node, new_time, block_bit_times) + + else: + new_t0 = _calculate_new_times(block, node, block_bit_times) + _update_time(block, node, new_t0, block_bit_times) + + self._node_start_time = new_node_start_time + self._node_stop_time = new_node_stop_time diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py new file mode 100644 index 000000000..bf7665cd1 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py @@ -0,0 +1,287 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Utility functions for scheduling passes.""" + +import warnings +from typing import List, Generator, Optional, Tuple, Union + +from qiskit.circuit import ControlFlowOp, Measure, Reset, Parameter +from qiskit.dagcircuit import DAGCircuit, DAGOpNode +from qiskit.transpiler.instruction_durations import ( + InstructionDurations, + InstructionDurationsType, +) +from qiskit.transpiler.exceptions import TranspilerError + + +def block_order_op_nodes(dag: DAGCircuit) -> Generator[DAGOpNode, None, None]: + """Yield nodes such that they are sorted into groups of blocks that minimize synchronization. + + Measurements are also grouped. + """ + + def _is_grouped_measure(node: DAGOpNode) -> bool: + """Does this node need to be grouped?""" + return isinstance(node.op, (Reset, Measure)) + + def _is_block_trigger(node: DAGOpNode) -> bool: + """Does this node trigger the end of a block?""" + return isinstance(node.op, ControlFlowOp) + + def _emit( + node: DAGOpNode, + grouped_measure: List[DAGOpNode], + block_triggers: List[DAGOpNode], + ) -> bool: + """Should we emit this node?""" + for measure in grouped_measure: + if dag.is_predecessor(node, measure): + return True + for block_trigger in block_triggers: + if dag.is_predecessor(node, block_trigger): + return True + + return _is_grouped_measure(node) or _is_block_trigger(node) + + # Begin processing nodes in order + next_nodes = dag.topological_op_nodes() + while next_nodes: + curr_nodes = next_nodes # Setup the next iteration nodes + next_nodes_set = set() # Nodes that will make it into the next iteration + next_nodes = [] # Nodes to process in order in the next iteration + to_push = [] # Do we push this to the very last block? + yield_measures = [] # Measures/resets we will yield first + yield_block_triggers = [] # Followed by block triggers (conditionals) + block_break = False # Did we encounter a block trigger in this iteration? + for node in curr_nodes: + # If we have added this node to the next set of nodes + # skip for now. + if node in next_nodes_set: + next_nodes.append(node) + continue + + # If this nodes is a measurement + # push on the measurements to process + if _is_grouped_measure(node): + block_break = True + node_descendants = dag.descendants(node) + next_nodes_set |= set(node_descendants) + yield_measures.append(node) + # If this node is a block push this onto + # the block trigger list. + elif _is_block_trigger(node): + block_break = True + node_descendants = dag.descendants(node) + next_nodes_set |= set(node_descendants) + yield_block_triggers.append(node) + # Otherwise we push onto the final list of blocks to emit + # as part of the final block. + else: + to_push.append(node) + + new_to_push = [] + for node in to_push: + node_descendants = dag.descendants(node) + if any( + _emit(descendant, yield_measures, yield_block_triggers) + for descendant in node_descendants + if isinstance(descendant, DAGOpNode) + ): + yield node + else: + new_to_push.append(node) + + to_push = new_to_push + + # First emit the measurements which will feed + for node in yield_measures: + yield node + # Into the block triggers we will emit. + for node in yield_block_triggers: + yield node + + # We're at the last block and emit the final nodes + if not block_break: + for node in to_push: + yield node + break + # Otherwise emit the final nodes + # Add to the front of the list to be processed next + to_push.extend(next_nodes) + next_nodes = to_push + + +InstrKey = Union[ + Tuple[str, None, None], + Tuple[str, Tuple[int], None], + Tuple[str, Tuple[int], Tuple[Parameter]], +] + + +class DynamicCircuitInstructionDurations(InstructionDurations): + """For dynamic circuits the IBM Qiskit backend currently + reports instruction durations that differ compared with those + required for the legacy Qobj-based path. For now we use this + class to report updated InstructionDurations. + TODO: This would be mitigated by a specialized Backend/Target for + dynamic circuit backends. + """ + + MEASURE_PATCH_CYCLES = 160 + MEASURE_PATCH_ODD_OFFSET = 64 + + def __init__( + self, + instruction_durations: Optional[InstructionDurationsType] = None, + dt: float = None, + enable_patching: bool = True, + ): + """Dynamic circuit instruction durations.""" + self._enable_patching = enable_patching + super().__init__(instruction_durations=instruction_durations, dt=dt) + + def update( + self, inst_durations: Optional[InstructionDurationsType], dt: float = None + ) -> "DynamicCircuitInstructionDurations": + """Update self with inst_durations (inst_durations overwrite self). Overrides the default + durations for certain hardcoded instructions. + + Args: + inst_durations: Instruction durations to be merged into self (overwriting self). + dt: Sampling duration in seconds of the target backend. + + Returns: + InstructionDurations: The updated InstructionDurations. + + Raises: + TranspilerError: If the format of instruction_durations is invalid. + """ + + # First update as normal + super().update(inst_durations, dt=dt) + + if not self._enable_patching or inst_durations is None: + return self + + # Then update required instructions. This code is ugly + # because the InstructionDurations code is handling too many + # formats in update and this code must also. + if isinstance(inst_durations, InstructionDurations): + for key in inst_durations.keys(): + self._patch_instruction(key) + else: + for name, qubits, _, parameters, _ in inst_durations: + if isinstance(qubits, int): + qubits = [qubits] + + if isinstance(parameters, (int, float)): + parameters = [parameters] + + if qubits is None: + key = (name, None, None) + elif parameters is None: + key = (name, tuple(qubits), None) + else: + key = (name, tuple(qubits), tuple(parameters)) + + self._patch_instruction(key) + + return self + + def _patch_instruction(self, key: InstrKey) -> None: + """Dispatcher logic for instruction patches""" + name = key[0] + if name == "measure": + self._patch_measurement(key) + elif name == "reset": + self._patch_reset(key) + + def _patch_measurement(self, key: InstrKey) -> None: + """Patch measurement duration by extending duration by 160dt as temporarily + required by the dynamic circuit backend. + """ + prev_duration, unit = self._get_duration_dt(key) + if unit != "dt": + raise TranspilerError('Can currently only patch durations of "dt".') + odd_cycle_correction = self._get_odd_cycle_correction() + self._patch_key(key, prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, unit) + # Enforce patching of reset on measurement update + self._patch_reset(("reset", key[1], key[2])) + + def _patch_reset(self, key: InstrKey) -> None: + """Patch reset duration by extending duration by measurement patch as temporarily + required by the dynamic circuit backend. + """ + # We patch the reset to be the duration of the measurement if it + # is available as it currently + # triggers the end of scheduling after the measurement pulse + measure_key = ("measure", key[1], key[2]) + try: + measure_duration, unit = self._get_duration_dt(measure_key) + self._patch_key(key, measure_duration, unit) + except KeyError: + # Fall back to reset key if measure not available + prev_duration, unit = self._get_duration_dt(key) + if unit != "dt": + raise TranspilerError('Can currently only patch durations of "dt".') + odd_cycle_correction = self._get_odd_cycle_correction() + self._patch_key( + key, + prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, + unit, + ) + + def _get_duration_dt(self, key: InstrKey) -> Tuple[int, str]: + """Handling for the complicated structure of this class. + + TODO: This class implementation should be simplified in Qiskit. Too many edge cases. + """ + if key[1] is None and key[2] is None: + return self.duration_by_name[key[0]] + elif key[2] is None: + return self.duration_by_name_qubits[(key[0], key[1])] + + return self.duration_by_name_qubits_params[key] + + def _patch_key(self, key: InstrKey, duration: int, unit: str) -> None: + """Handling for the complicated structure of this class. + + TODO: This class implementation should be simplified in Qiskit. Too many edge cases. + """ + if key[1] is None and key[2] is None: + self.duration_by_name[key[0]] = (duration, unit) + elif key[2] is None: + self.duration_by_name_qubits[(key[0], key[1])] = (duration, unit) + + self.duration_by_name_qubits_params[key] = (duration, unit) + + def _get_odd_cycle_correction(self) -> int: + """Determine the amount of the odd cycle correction to apply + For devices with short gates with odd lenghts we add an extra 16dt to the measurement + + TODO: Eliminate the need for this correction + """ + key_pulse = "sx" + key_qubit = 0 + try: + key_duration = self.get(key_pulse, key_qubit, "dt") + except TranspilerError: + warnings.warn( + f"No {key_pulse} gate found for {key_qubit} for detection of " + "short odd gate lengths, default measurement timing will be used." + ) + key_duration = 160 # keyPulse gate not found + + if key_duration < 160 and key_duration % 32: + return self.MEASURE_PATCH_ODD_OFFSET + return 0 diff --git a/qiskit_ibm_runtime/transpiler/plugin.py b/qiskit_ibm_runtime/transpiler/plugin.py new file mode 100644 index 000000000..75f70cfe4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/plugin.py @@ -0,0 +1,98 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Plugin for IBM provider backend transpiler stages.""" + +from typing import Optional + +from qiskit.transpiler.passmanager import PassManager +from qiskit.transpiler.passmanager_config import PassManagerConfig +from qiskit.transpiler.preset_passmanagers.plugin import PassManagerStagePlugin +from qiskit.transpiler.preset_passmanagers import common +from qiskit.transpiler.passes import ConvertConditionsToIfOps + +from qiskit_ibm_provider.transpiler.passes.basis.convert_id_to_delay import ( + ConvertIdToDelay, +) + + +class IBMTranslationPlugin(PassManagerStagePlugin): + """A translation stage plugin for targeting Qiskit circuits + to IBM Quantum systems.""" + + def pass_manager( + self, + pass_manager_config: PassManagerConfig, + optimization_level: Optional[int] = None, + ) -> PassManager: + """Build IBMTranslationPlugin PassManager.""" + + translator_pm = common.generate_translation_passmanager( + target=pass_manager_config.target, + basis_gates=pass_manager_config.basis_gates, + approximation_degree=pass_manager_config.approximation_degree, + coupling_map=pass_manager_config.coupling_map, + backend_props=pass_manager_config.backend_properties, + unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, + unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, + hls_config=pass_manager_config.hls_config, + ) + + plugin_passes = [] + instruction_durations = pass_manager_config.instruction_durations + if instruction_durations: + plugin_passes.append(ConvertIdToDelay(instruction_durations)) + + return PassManager(plugin_passes) + translator_pm + + +class IBMDynamicTranslationPlugin(PassManagerStagePlugin): + """A translation stage plugin for targeting Qiskit circuits + to IBM Quantum systems.""" + + def pass_manager( + self, + pass_manager_config: PassManagerConfig, + optimization_level: Optional[int] = None, + ) -> PassManager: + """Build IBMTranslationPlugin PassManager.""" + + translator_pm = common.generate_translation_passmanager( + target=pass_manager_config.target, + basis_gates=pass_manager_config.basis_gates, + approximation_degree=pass_manager_config.approximation_degree, + coupling_map=pass_manager_config.coupling_map, + backend_props=pass_manager_config.backend_properties, + unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, + unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, + hls_config=pass_manager_config.hls_config, + ) + + instruction_durations = pass_manager_config.instruction_durations + plugin_passes = [] + if pass_manager_config.target is not None: + id_supported = "id" in pass_manager_config.target + else: + id_supported = "id" in pass_manager_config.basis_gates + + if instruction_durations and not id_supported: + plugin_passes.append(ConvertIdToDelay(instruction_durations)) + + # Only inject control-flow conversion pass at level 0 and level 1. As of + # qiskit 0.22.x transpile() with level 2 and 3 does not support + # control flow instructions (including if_else). This can be + # removed when higher optimization levels support control flow + # instructions. + if optimization_level in {0, 1}: + plugin_passes += [ConvertConditionsToIfOps()] + + return PassManager(plugin_passes) + translator_pm diff --git a/test/jobtestcase.py b/test/jobtestcase.py new file mode 100644 index 000000000..4c38170d3 --- /dev/null +++ b/test/jobtestcase.py @@ -0,0 +1,33 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Custom TestCase for Jobs.""" + +import time + +from qiskit.providers import JobStatus + +from .ibm_test_case import IBMTestCase + + +class JobTestCase(IBMTestCase): + """Include common functionality when testing jobs.""" + + def wait_for_initialization(self, job, timeout=1): + """Waits until job progresses from `INITIALIZING` to other status.""" + waited = 0 + wait = 0.1 + while job.status() is JobStatus.INITIALIZING: + time.sleep(wait) + waited += wait + if waited > timeout: + self.fail(msg="The JOB is still initializing after timeout ({}s)".format(timeout)) diff --git a/test/unit/test_backend.py b/test/unit/test_backend.py index 472ff2ded..44ad39a61 100644 --- a/test/unit/test_backend.py +++ b/test/unit/test_backend.py @@ -12,7 +12,6 @@ """Tests for the backend functions.""" import copy -from datetime import datetime from unittest import mock import warnings @@ -29,6 +28,7 @@ create_faulty_backend, ) + class TestBackend(IBMTestCase): """Tests for IBMBackend class.""" @@ -42,9 +42,7 @@ def test_raise_faulty_qubits(self): transpiled = transpile(circ, backend=fake_backend) faulty_qubit = 4 - ibm_backend = create_faulty_backend( - fake_backend, faulty_qubit=faulty_qubit - ) + ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) with self.assertRaises(ValueError) as err: ibm_backend.run(transpiled) @@ -64,9 +62,7 @@ def test_raise_faulty_qubits_many(self): transpiled = transpile([circ1, circ2], backend=fake_backend) faulty_qubit = 4 - ibm_backend = create_faulty_backend( - fake_backend, faulty_qubit=faulty_qubit - ) + ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) with self.assertRaises(ValueError) as err: ibm_backend.run(transpiled) @@ -83,9 +79,7 @@ def test_raise_faulty_edge(self): transpiled = transpile(circ, backend=fake_backend) edge_qubits = [0, 1] - ibm_backend = create_faulty_backend( - fake_backend, faulty_edge=("cx", edge_qubits) - ) + ibm_backend = create_faulty_backend(fake_backend, faulty_edge=("cx", edge_qubits)) with self.assertRaises(ValueError) as err: ibm_backend.run(transpiled) @@ -102,9 +96,7 @@ def test_faulty_qubit_not_used(self): transpiled = transpile(circ, backend=fake_backend, initial_layout=[0, 1]) faulty_qubit = 4 - ibm_backend = create_faulty_backend( - fake_backend, faulty_qubit=faulty_qubit - ) + ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) print(ibm_backend) with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: @@ -121,13 +113,9 @@ def test_faulty_edge_not_used(self): circ = QuantumCircuit(2, 2) circ.cx(0, 1) - transpiled = transpile( - circ, backend=fake_backend, initial_layout=coupling_map[0] - ) + transpiled = transpile(circ, backend=fake_backend, initial_layout=coupling_map[0]) edge_qubits = coupling_map[-1] - ibm_backend = create_faulty_backend( - fake_backend, faulty_edge=("cx", edge_qubits) - ) + ibm_backend = create_faulty_backend(fake_backend, faulty_edge=("cx", edge_qubits)) with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: ibm_backend.run(circuits=transpiled) @@ -170,8 +158,7 @@ def test_dynamic_circuits_warning(self): with mock.patch.object(IBMBackend, "_runtime_run"): backend.run(circuits=circuit, dynamic=False) self.assertIn( - "Parameter 'dynamic' is False, but the circuit " - "contains dynamic constructs.", + "Parameter 'dynamic' is False, but the circuit " "contains dynamic constructs.", str(warn[0].message), ) self.assertIn( diff --git a/test/unit/test_ibm_job_states.py b/test/unit/test_ibm_job_states.py new file mode 100644 index 000000000..f54f6e572 --- /dev/null +++ b/test/unit/test_ibm_job_states.py @@ -0,0 +1,597 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +# pylint: disable=missing-docstring + +"""IBMJob states test-suite.""" + +import copy +import time +import json +from datetime import datetime +from concurrent import futures +from concurrent.futures import ThreadPoolExecutor +from contextlib import suppress +from unittest import mock +from unittest.mock import MagicMock +from typing import List, Any, Dict + +from qiskit import transpile +from qiskit.providers import JobTimeoutError +from qiskit.providers.jobstatus import JobStatus +from qiskit.providers.fake_provider.backends.bogota.fake_bogota import FakeBogota + +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.apiconstants import API_JOB_FINAL_STATES, ApiJobStatus + +from qiskit_ibm_runtime.api.exceptions import ( + ApiError, + UserTimeoutExceededError, + ApiIBMProtocolError, +) +from qiskit_ibm_runtime import IBMBackend +from qiskit_ibm_runtime.exceptions import RuntimeInvalidStateError +from ..jobtestcase import JobTestCase + +MOCKED_ERROR_RESULT: Dict[str, Any] = { + "qObjectResult": { + "backend_name": "fake_backend", + "backend_version": "0.1.1", + "qobj_id": "123", + "job_id": "123", + "success": False, + "results": [ + {"status": "DONE", "success": True, "shots": 1, "data": {}}, + {"status": "Error 1", "success": False, "shots": 1, "data": {}}, + {"status": "Error 2", "success": False, "shots": 1, "data": {}}, + ], + } +} + +VALID_QOBJ_RESPONSE = { + "status": "COMPLETED", + "kind": "q-object", + "creationDate": "2019-01-01T12:57:15.052Z", + "id": "0123456789", + "qObjectResult": { + "backend_name": "ibmqx2", + "backend_version": "1.1.1", + "job_id": "XC1323XG2", + "qobj_id": "Experiment1", + "success": True, + "status": "COMPLETED", + "results": [ + { + "header": { + "name": "Bell state", + "creg_sizes": [["c", 2]], + "clbit_labels": [["c", 0], ["c", 1]], + "qubit_labels": [["q", 0], ["q", 1]], + }, + "shots": 1024, + "status": "DONE", + "success": True, + "data": {"counts": {"0x0": 480, "0x3": 490, "0x1": 20, "0x2": 34}}, + }, + { + "header": { + "name": "Bell state XY", + "creg_sizes": [["c", 2]], + "clbit_labels": [["c", 0], ["c", 1]], + "qubit_labels": [["q", 0], ["q", 1]], + }, + "shots": 1024, + "status": "DONE", + "success": True, + "data": {"counts": {"0x0": 29, "0x3": 15, "0x1": 510, "0x2": 480}}, + }, + ], + }, +} + + +VALID_JOB_RESPONSE = { + "id": "TEST_ID", + "job_id": "TEST_ID", + "kind": "q-object", + "status": "CREATING", + "creation_date": "2019-01-01T13:15:58.425972", +} + + +class TestIBMJobStates(JobTestCase): + """Test the states of an IBMJob.""" + + def setUp(self): + """Initial test setup.""" + super().setUp() + self._current_api = None + self._current_qjob = None + + def test_done_status(self): + """Test job status progresses to done.""" + job = self.run_with_api(QueuedAPI()) + + self.assertFalse(job.done()) + self.wait_for_initialization(job) + + self._current_api.progress() + self.assertFalse(job.done()) + + self._current_api.progress() + self.assertTrue(job.done()) + + def test_running_status(self): + """Test job status progresses to running.""" + job = self.run_with_api(ValidatingAPI()) + + self.assertFalse(job.running()) + self.wait_for_initialization(job) + + self._current_api.progress() + self.assertTrue(job.running()) + + def test_cancelled_status(self): + """Test job status is cancelled.""" + job = self.run_with_api(CancellableAPI()) + + self.assertFalse(job.cancelled()) + self.wait_for_initialization(job) + + self._current_api.progress() + self.assertTrue(job.cancelled()) + + def test_validating_job(self): + """Test job status is validating.""" + job = self.run_with_api(ValidatingAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.VALIDATING) + + def test_error_while_creating_job(self): + """Test job failing during creation.""" + job = self.run_with_api(ErrorWhileCreatingAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.ERROR) + + def test_error_while_validating_job(self): + """Test job failing during validation.""" + job = self.run_with_api(ErrorWhileValidatingAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.VALIDATING) + + self._current_api.progress() + self.assertEqual(job.status(), JobStatus.ERROR) + + def test_status_flow_for_non_queued_job(self): + """Test job status progressing to done without being queued.""" + job = self.run_with_api(NonQueuedAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.RUNNING) + + self._current_api.progress() + self.assertEqual(job.status(), JobStatus.DONE) + + def test_status_flow_for_queued_job(self): + """Test job status progressing from queued to done.""" + job = self.run_with_api(QueuedAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.QUEUED) + + self._current_api.progress() + self.assertEqual(job.status(), JobStatus.RUNNING) + + self._current_api.progress() + self.assertEqual(job.status(), JobStatus.DONE) + + def test_status_flow_for_cancellable_job(self): + """Test job status going from running to cancelled.""" + job = self.run_with_api(CancellableAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.RUNNING) + + job.cancel() + + self._current_api.progress() + self.assertEqual(job.status(), JobStatus.CANCELLED) + + def test_status_flow_for_unable_to_run_valid_qobj(self): + """Test API error while running a job.""" + with self.assertRaises(ApiError): + self.run_with_api(UnavailableRunAPI()) + + # TODO fix test case + def test_error_while_running_job(self): + """Test job failed.""" + job = self.run_with_api(ErrorWhileRunningAPI()) + + self.wait_for_initialization(job) + self.assertEqual(job.status(), JobStatus.RUNNING) + + self._current_api.progress() + self.assertEqual(job.status(), JobStatus.ERROR) + # self.assertIn("Error 1", job.error_message()) + # self.assertIn("Error 2", job.error_message()) + + def test_cancelled_result(self): + """Test getting results for a cancelled job.""" + job = self.run_with_api(CancellableAPI()) + + self.wait_for_initialization(job) + job.cancel() + self._current_api.progress() + with self.assertRaises(RuntimeInvalidStateError): + _ = job.result() + self.assertEqual(job.status(), JobStatus.CANCELLED) + + def test_completed_result(self): + """Test getting results for a completed job.""" + job = self.run_with_api(NonQueuedAPI()) + + self.wait_for_initialization(job) + self._current_api.progress() + self.assertEqual(job.result().success, True) + self.assertEqual(job.status(), JobStatus.DONE) + + def test_block_on_result_waiting_until_completed(self): + """Test waiting for job results.""" + + job = self.run_with_api(NonQueuedAPI()) + with futures.ThreadPoolExecutor() as executor: + executor.submit(_auto_progress_api, self._current_api) + + result = job.result() + self.assertEqual(result.success, True) + self.assertEqual(job.status(), JobStatus.DONE) + + def test_block_on_result_waiting_until_cancelled(self): + """Test canceling job while waiting for results.""" + + job = self.run_with_api(CancellableAPI()) + with ThreadPoolExecutor() as executor: + executor.submit(_auto_progress_api, self._current_api) + + with self.assertRaises(RuntimeInvalidStateError): + job.result() + + self.assertEqual(job.status(), JobStatus.CANCELLED) + + def test_never_complete_result_with_timeout(self): + """Test timing out while waiting for job results.""" + job = self.run_with_api(NonQueuedAPI()) + + self.wait_for_initialization(job) + with self.assertRaises(JobTimeoutError): + job.result(timeout=0.2) + + def test_only_final_states_cause_detailed_request(self): + """Test job status call does not provide detailed information.""" + # The state ERROR_CREATING_JOB is only handled when running the job, + # and not while checking the status, so it is not tested. + all_state_apis = { + "COMPLETED": NonQueuedAPI, + "CANCELLED": CancellableAPI, + "ERROR_VALIDATING_JOB": ErrorWhileValidatingAPI, + "ERROR_RUNNING_JOB": ErrorWhileRunningAPI, + } + + for status, api in all_state_apis.items(): + with self.subTest(status=status): + job = self.run_with_api(api()) + self.wait_for_initialization(job) + + with suppress(BaseFakeAPI.NoMoreStatesError): + self._current_api.progress() + + with mock.patch.object( + self._current_api, "job_get", wraps=self._current_api.job_get + ): + job.status() + if ApiJobStatus(status) in API_JOB_FINAL_STATES: + self.assertTrue(self._current_api.job_get.called) + else: + self.assertFalse(self._current_api.job_get.called) + + def test_transpiling_status(self): + """Test transpiling job state.""" + job = self.run_with_api(TranspilingStatusAPI()) + time.sleep(0.2) + self.assertEqual(job.status(), JobStatus.INITIALIZING) + + def run_with_api(self, api): + """Creates a new ``IBMJob`` running with the provided API object.""" + backend = IBMBackend(FakeBogota().configuration(), MagicMock(), api_client=api) + backend._api_client = api + circuit = transpile(ReferenceCircuits.bell()) + self._current_api = api + self._current_qjob = backend.run(circuit) + self._current_qjob.refresh = MagicMock() + return self._current_qjob + + +def _auto_progress_api(api, interval=0.2): + """Progress a ``BaseFakeAPI`` instance every `interval` seconds until reaching + the final state. + """ + with suppress(BaseFakeAPI.NoMoreStatesError): + while True: + time.sleep(interval) + api.progress() + + +class BaseFakeAPI: + """Base class for faking the IBM Quantum API.""" + + class NoMoreStatesError(Exception): + """Raised when it is not possible to progress more.""" + + _job_status: List[Any] = [] + + _can_cancel = False + + def __init__(self): + """BaseFakeAPI constructor.""" + self._params = MagicMock() + self._state = 0 + self.config = {"hub": None, "group": None, "project": None} + if self._can_cancel: + self.config.update( + {"hub": "test-hub", "group": "test-group", "project": "test-project"} + ) + + def job_get(self, job_id): + """Return information about a job.""" + if not job_id: + return {"status": "Error", "error": "Job ID not specified"} + + return { + "created": datetime.now().isoformat(), + "state": self._job_status[self._state], + "metadata": {}, + } + + def job_metadata(self, job_id: str) -> Dict: + """Return job metadata""" + return self.job_get(job_id)["metadata"] + + def job_status(self, job_id): + """Return the status of a job.""" + summary_fields = ["status", "error", "info_queue"] + complete_response = self.job_get(job_id)["state"] + try: + ApiJobStatus(complete_response["status"]) + except ValueError: + raise ApiIBMProtocolError("Api Error") + return {key: value for key, value in complete_response.items() if key in summary_fields} + + def program_run(self, *_args, **_kwargs): + """Submit the job.""" + time.sleep(0.2) + return VALID_JOB_RESPONSE + + def job_submit(self, *_args, **_kwargs): + """Submit the job.""" + time.sleep(0.2) + return VALID_JOB_RESPONSE + + def job_cancel(self, job_id, *_args, **_kwargs): + """Cancel the job.""" + if not job_id: + return {"status": "Error", "error": "Job ID not specified"} + return ( + {"cancelled": True} + if self._can_cancel + else {"error": "testing fake API can not cancel"} + ) + + def job_final_status(self, job_id, *_args, **_kwargs): + """Wait for job to enter a final state.""" + start_time = time.time() + status_response = self.job_status(job_id) + while ApiJobStatus(status_response["status"]) not in API_JOB_FINAL_STATES: + elapsed_time = time.time() - start_time + timeout = _kwargs.get("timeout", None) + if timeout is not None and elapsed_time >= timeout: + raise UserTimeoutExceededError("Timeout while waiting for job {}".format(job_id)) + time.sleep(5) + status_response = self.job_status(job_id) + return status_response + + def job_results(self, job_id: str) -> Any: + """Return job result""" + result = self.job_get(job_id) + return json.dumps(result["state"]["qObjectResult"]) + + def job_result(self, job_id, *_args, **_kwargs): + """Get job result.""" + return self.job_get(job_id)["qObjectResult"] + + def progress(self): + """Progress to the next job state.""" + if self._state == len(self._job_status) - 1: + raise self.NoMoreStatesError() + self._state += 1 + + def backend_status(self, backend_name: str) -> Dict[str, Any]: + """Return the status of the backend.""" + return { + "backend_name": backend_name, + "backend_version": "0.0.0", + "operational": True, + "pending_jobs": 0, + "status_msg": "active", + } + + def backend_properties(self, *args, **kwargs): # pylint: disable=unused-argument + return None + + def job_type(self, job_id: str) -> str: + if job_id[0] != "c" and len(job_id) == 24: + return "IQX" + return "RUNTIME" + + +class UnknownStatusAPI(BaseFakeAPI): + """Class for emulating an API with unknown status codes.""" + + _job_status = [{"status": "UNKNOWN"}] + + +class ValidatingAPI(BaseFakeAPI): + """Class for emulating an API with job validation.""" + + _job_status = [{"status": "VALIDATING"}, {"status": "RUNNING"}] + + +class ErrorWhileValidatingAPI(BaseFakeAPI): + """Class for emulating an API processing an invalid job.""" + + _job_status = [ + {"status": "VALIDATING"}, + {"status": "ERROR_VALIDATING_JOB", **MOCKED_ERROR_RESULT}, + ] + + +class NonQueuedAPI(BaseFakeAPI): + """Class for emulating a successfully-completed non-queued API.""" + + _job_status = [{"status": "RUNNING"}, VALID_QOBJ_RESPONSE] + + +class ErrorWhileCreatingAPI(BaseFakeAPI): + """Class emulating an API processing a job that errors while creating the job.""" + + _job_status = [{"status": "ERROR_CREATING_JOB", **MOCKED_ERROR_RESULT}] + + +class ErrorWhileRunningAPI(BaseFakeAPI): + """Class emulating an API processing a job that errors while running.""" + + _job_status = [ + {"status": "RUNNING"}, + {"status": "ERROR_RUNNING_JOB", **MOCKED_ERROR_RESULT}, + ] + + +class QueuedAPI(BaseFakeAPI): + """Class for emulating a successfully-completed queued API.""" + + _job_status = [{"status": "QUEUED"}, {"status": "RUNNING"}, {"status": "COMPLETED"}] + + +class RejectingJobAPI(BaseFakeAPI): + """Class for emulating an API unable of initializing.""" + + def job_submit(self, *_args, **_kwargs): + return {"error": "invalid qobj"} + + +class UnavailableRunAPI(BaseFakeAPI): + """Class for emulating an API throwing before even initializing.""" + + def program_run(self, *_args, **_kwargs): + time.sleep(0.2) + raise ApiError("Api Error") + + +class ThrowingAPI(BaseFakeAPI): + """Class for emulating an API throwing in the middle of execution.""" + + _job_status = [{"status": "RUNNING"}] + + def job_get(self, job_id): + raise ApiError("Api Error") + + +class ThrowingNonJobRelatedErrorAPI(BaseFakeAPI): + """Class for emulating an scenario where the job is done but the API + fails some times for non job-related errors. + """ + + _job_status = [{"status": "COMPLETED"}] + + def __init__(self, errors_before_success=2): + super().__init__() + self._number_of_exceptions_to_throw = errors_before_success + + def job_get(self, job_id): + if self._number_of_exceptions_to_throw != 0: + self._number_of_exceptions_to_throw -= 1 + raise ApiError("Api Error") + + return super().job_get(job_id) + + +class ThrowingGetJobAPI(BaseFakeAPI): + """Class for emulating an API throwing in the middle of execution. But not in + ``job_status()``, just in ``job_get()``. + """ + + _job_status = [{"status": "COMPLETED"}] + + def job_status(self, job_id): + return self._job_status[self._state] + + def job_get(self, job_id): + raise ApiError("Unexpected error") + + +class CancellableAPI(BaseFakeAPI): + """Class for emulating an API with cancellation.""" + + _job_status = [{"status": "RUNNING"}, {"status": "CANCELLED"}] + + _can_cancel = True + + +class NonCancellableAPI(BaseFakeAPI): + """Class for emulating an API without cancellation running a long job.""" + + _job_status = [{"status": "RUNNING"}, {"status": "RUNNING"}, {"status": "RUNNING"}] + + +class ErroredCancellationAPI(BaseFakeAPI): + """Class for emulating an API with cancellation but throwing while trying.""" + + _job_status = [{"status": "RUNNING"}, {"status": "RUNNING"}, {"status": "RUNNING"}] + + _can_cancel = True + + def job_cancel(self, job_id, *_args, **_kwargs): + return {"status": "Error", "error": "test-error-while-cancelling"} + + +class NoKindJobAPI(BaseFakeAPI): + """Class for emulating an API with QASM jobs.""" + + _job_status = [{"status": "COMPLETED"}] + + no_kind_response = copy.deepcopy(VALID_JOB_RESPONSE) + del no_kind_response["kind"] + + def job_submit(self, *_args, **_kwargs): + return self.no_kind_response + + def job_result(self, job_id, *_args, **_kwargs): + return self.no_kind_response + + +class TranspilingStatusAPI(BaseFakeAPI): + """Class for emulating an API with transpiling status codes.""" + + _job_status = [{"status": "TRANSPILING"}, {"status": "TRANSPILED"}] From 17c4ec2e6879c792afb3cc35bd47ba3596bd3c48 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 10 Oct 2023 15:01:51 +0000 Subject: [PATCH 06/47] lint --- qiskit_ibm_runtime/ibm_backend.py | 5 ++--- qiskit_ibm_runtime/runtime_job.py | 6 ++++-- test/unit/test_backend.py | 14 ++++++++------ test/unit/test_ibm_job_states.py | 32 ++++++++++++++++++++----------- 4 files changed, 35 insertions(+), 22 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index d83c47344..f2f0bf469 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -24,9 +24,7 @@ from qiskit.qobj.utils import MeasLevel, MeasReturnType from qiskit.tools.events.pubsub import Publisher from qiskit.transpiler.passmanager import PassManager -from .transpiler.passes.basis.convert_id_to_delay import ( - ConvertIdToDelay, -) + from qiskit.providers.backend import BackendV2 as Backend from qiskit.providers.options import Options from qiskit.providers.models import ( @@ -71,6 +69,7 @@ from .utils.backend_converter import ( convert_to_target, ) +from .transpiler.passes.basis.convert_id_to_delay import ConvertIdToDelay logger = logging.getLogger(__name__) diff --git a/qiskit_ibm_runtime/runtime_job.py b/qiskit_ibm_runtime/runtime_job.py index 2e359d5e2..1ba965fca 100644 --- a/qiskit_ibm_runtime/runtime_job.py +++ b/qiskit_ibm_runtime/runtime_job.py @@ -157,7 +157,8 @@ def __init__( if user_callback is not None: self.stream_results(user_callback) - def _download_external_result(self, response: Any) -> Any: + @staticmethod + def _download_external_result(response: Any) -> Any: """Download result from external URL. Args: @@ -575,7 +576,8 @@ def _stream_results( traceback.format_exc(), ) - def _empty_result_queue(self, result_queue: queue.Queue) -> None: + @staticmethod + def _empty_result_queue(result_queue: queue.Queue) -> None: """Empty the result queue. Args: diff --git a/test/unit/test_backend.py b/test/unit/test_backend.py index 44ad39a61..d9f081823 100644 --- a/test/unit/test_backend.py +++ b/test/unit/test_backend.py @@ -17,7 +17,7 @@ from qiskit import transpile, qasm3, QuantumCircuit from qiskit.providers.fake_provider import FakeManila -from qiskit.providers.models import BackendStatus, BackendProperties +from qiskit.providers.models import BackendStatus from qiskit_ibm_provider.exceptions import IBMBackendValueError @@ -87,7 +87,8 @@ def test_raise_faulty_edge(self): self.assertIn("cx", str(err.exception)) self.assertIn(f"faulty edge {tuple(edge_qubits)}", str(err.exception)) - def test_faulty_qubit_not_used(self): + @staticmethod + def test_faulty_qubit_not_used(): """Test faulty qubit is not raise if not used.""" fake_backend = FakeManila() circ = QuantumCircuit(2, 2) @@ -97,14 +98,14 @@ def test_faulty_qubit_not_used(self): transpiled = transpile(circ, backend=fake_backend, initial_layout=[0, 1]) faulty_qubit = 4 ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) - print(ibm_backend) with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: ibm_backend.run(circuits=transpiled) mock_run.assert_called_once() - def test_faulty_edge_not_used(self): + @staticmethod + def test_faulty_edge_not_used(): """Test faulty edge is not raised if not used.""" fake_backend = FakeManila() @@ -158,7 +159,7 @@ def test_dynamic_circuits_warning(self): with mock.patch.object(IBMBackend, "_runtime_run"): backend.run(circuits=circuit, dynamic=False) self.assertIn( - "Parameter 'dynamic' is False, but the circuit " "contains dynamic constructs.", + "Parameter 'dynamic' is False, but the circuit contains dynamic constructs.", str(warn[0].message), ) self.assertIn( @@ -166,7 +167,8 @@ def test_dynamic_circuits_warning(self): str(warn[1].message), ) - def _create_dc_test_backend(self): + @staticmethod + def _create_dc_test_backend(): """Create a test backend with an IfElseOp enables.""" model_backend = FakeManila() properties = model_backend.properties() diff --git a/test/unit/test_ibm_job_states.py b/test/unit/test_ibm_job_states.py index f54f6e572..f17cd1475 100644 --- a/test/unit/test_ibm_job_states.py +++ b/test/unit/test_ibm_job_states.py @@ -379,12 +379,15 @@ def job_status(self, job_id): raise ApiIBMProtocolError("Api Error") return {key: value for key, value in complete_response.items() if key in summary_fields} - def program_run(self, *_args, **_kwargs): + @staticmethod + def program_run(*_args, **_kwargs): """Submit the job.""" time.sleep(0.2) return VALID_JOB_RESPONSE - def job_submit(self, *_args, **_kwargs): + + @classmethod + def job_submit(cls, *_args, **_kwargs): """Submit the job.""" time.sleep(0.2) return VALID_JOB_RESPONSE @@ -427,7 +430,8 @@ def progress(self): raise self.NoMoreStatesError() self._state += 1 - def backend_status(self, backend_name: str) -> Dict[str, Any]: + @staticmethod + def backend_status(backend_name: str) -> Dict[str, Any]: """Return the status of the backend.""" return { "backend_name": backend_name, @@ -437,10 +441,12 @@ def backend_status(self, backend_name: str) -> Dict[str, Any]: "status_msg": "active", } - def backend_properties(self, *args, **kwargs): # pylint: disable=unused-argument + @staticmethod + def backend_properties(*args, **kwargs): # pylint: disable=unused-argument return None - def job_type(self, job_id: str) -> str: + @staticmethod + def job_type(job_id: str) -> str: if job_id[0] != "c" and len(job_id) == 24: return "IQX" return "RUNTIME" @@ -497,14 +503,16 @@ class QueuedAPI(BaseFakeAPI): class RejectingJobAPI(BaseFakeAPI): """Class for emulating an API unable of initializing.""" - def job_submit(self, *_args, **_kwargs): + @classmethod + def job_submit(cls, *_args, **_kwargs): return {"error": "invalid qobj"} class UnavailableRunAPI(BaseFakeAPI): """Class for emulating an API throwing before even initializing.""" - def program_run(self, *_args, **_kwargs): + @staticmethod + def program_run(*_args, **_kwargs): time.sleep(0.2) raise ApiError("Api Error") @@ -584,11 +592,13 @@ class NoKindJobAPI(BaseFakeAPI): no_kind_response = copy.deepcopy(VALID_JOB_RESPONSE) del no_kind_response["kind"] - def job_submit(self, *_args, **_kwargs): - return self.no_kind_response + @classmethod + def job_submit(cls, *_args, **_kwargs): + return cls.no_kind_response - def job_result(self, job_id, *_args, **_kwargs): - return self.no_kind_response + @classmethod + def job_result(cls, job_id, *_args, **_kwargs): + return cls.no_kind_response class TranspilingStatusAPI(BaseFakeAPI): From 7b9f6cc6ba4d73ce3956096a3928c6a7158ba8a8 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 10 Oct 2023 16:42:48 +0000 Subject: [PATCH 07/47] black --- test/unit/test_ibm_job_states.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/unit/test_ibm_job_states.py b/test/unit/test_ibm_job_states.py index f17cd1475..f92e33d69 100644 --- a/test/unit/test_ibm_job_states.py +++ b/test/unit/test_ibm_job_states.py @@ -385,7 +385,6 @@ def program_run(*_args, **_kwargs): time.sleep(0.2) return VALID_JOB_RESPONSE - @classmethod def job_submit(cls, *_args, **_kwargs): """Submit the job.""" From aab2a877a92b602c1dbe1430b15f373cbeaad606 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 10 Oct 2023 16:56:07 +0000 Subject: [PATCH 08/47] lint --- qiskit_ibm_runtime/runtime_job.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/qiskit_ibm_runtime/runtime_job.py b/qiskit_ibm_runtime/runtime_job.py index 1ba965fca..aa2496583 100644 --- a/qiskit_ibm_runtime/runtime_job.py +++ b/qiskit_ibm_runtime/runtime_job.py @@ -157,8 +157,7 @@ def __init__( if user_callback is not None: self.stream_results(user_callback) - @staticmethod - def _download_external_result(response: Any) -> Any: + def _download_external_result(self, response: Any) -> Any: """Download result from external URL. Args: From 71cb86de0579e2a5b2479130d105589d1bb53791 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 12 Oct 2023 13:22:54 +0000 Subject: [PATCH 09/47] Added integration tests from the provider --- test/integration/test_backend.py | 116 ++++++++++++++++++++++++++++--- 1 file changed, 108 insertions(+), 8 deletions(-) diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index a1d1bb209..ee6cb682d 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -12,11 +12,20 @@ """Tests for backend functions using real runtime service.""" -from unittest import SkipTest +from unittest import SkipTest, mock +from unittest.mock import patch from datetime import datetime import copy from qiskit.transpiler.target import Target +from qiskit import QuantumCircuit +from qiskit.providers.models import QasmBackendConfiguration +from qiskit.providers.exceptions import QiskitBackendNotFoundError +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.ibm_qubit_properties import IBMQubitProperties +from qiskit_ibm_provider.exceptions import IBMBackendValueError + from qiskit_ibm_runtime import QiskitRuntimeService from ..ibm_test_case import IBMIntegrationTestCase @@ -73,6 +82,7 @@ def setUpClass(cls): # pylint: disable=arguments-differ # pylint: disable=no-value-for-parameter super().setUpClass() + cls.service = cls.dependencies.service if cls.dependencies.channel == "ibm_cloud": # TODO use real device when cloud supports it cls.backend = cls.dependencies.service.least_busy(min_num_qubits=5) @@ -162,13 +172,6 @@ def test_backend_invalid_attribute(self): with self.assertRaises(AttributeError): backend.foobar # pylint: disable=pointless-statement - def test_backend_run(self): - """Check one cannot do backend.run""" - backend = self.backend - with self.subTest(backend=backend.name): - with self.assertRaises(RuntimeError): - backend.run() - def test_backend_deepcopy(self): """Test that deepcopy on IBMBackend works correctly""" backend = self.backend @@ -191,3 +194,100 @@ def test_backend_deepcopy(self): backend_copy._api_client._session.base_url, backend._api_client._session.base_url, ) + + def test_backend_pending_jobs(self): + """Test pending jobs are returned.""" + backends = self.service.backends() + self.assertTrue(any(backend.status().pending_jobs > 0 for backend in backends)) + + def test_backend_fetch_all_qubit_properties(self): + """Check retrieving properties of all qubits""" + num_qubits = self.backend.num_qubits + qubits = list(range(num_qubits)) + qubit_properties = self.backend.qubit_properties(qubits) + self.assertEqual(len(qubit_properties), num_qubits) + for i in qubits: + self.assertIsInstance(qubit_properties[i], IBMQubitProperties) + + def test_sim_backend_options(self): + """Test simulator backend options.""" + backend = self.service.backend("ibmq_qasm_simulator") + backend.options.shots = 2048 + backend.set_options(memory=True) + inputs = backend.run(ReferenceCircuits.bell(), shots=1, foo="foo").inputs + self.assertEqual(inputs["shots"], 1) + self.assertTrue(inputs["memory"]) + self.assertEqual(inputs["foo"], "foo") + + @production_only + def test_paused_backend_warning(self): + """Test that a warning is given when running jobs on a paused backend.""" + backend = self.service.backend("ibmq_qasm_simulator") + paused_status = backend.status() + paused_status.status_msg = "internal" + backend.status = mock.MagicMock(return_value=paused_status) + with self.assertWarns(Warning): + backend.run(ReferenceCircuits.bell()) + + def test_deprecate_id_instruction(self): + """Test replacement of 'id' Instructions with 'Delay' instructions.""" + circuit_with_id = QuantumCircuit(2) + circuit_with_id.id(0) + circuit_with_id.id(0) + circuit_with_id.id(1) + + config = QasmBackendConfiguration( + basis_gates=["id"], + supported_instructions=["delay"], + dt=0.25, + backend_name="test", + backend_version="0.0", + n_qubits=1, + gates=[], + local=False, + simulator=False, + conditional=False, + open_pulse=False, + memory=False, + max_shots=1, + coupling_map=[], + ) + + with patch.object(self.backend, "configuration", return_value=config): + with self.assertWarnsRegex(DeprecationWarning, r"'id' instruction"): + mutated_circuit = self.backend._deprecate_id_instruction([circuit_with_id]) + self.assertEqual(mutated_circuit[0].count_ops(), {"delay": 3}) + self.assertEqual(circuit_with_id.count_ops(), {"id": 3}) + + def test_backend_wrong_instance(self): + """Test that an error is raised when retrieving a backend not in the instance.""" + backends = self.service.backends() + hgps = self.service._hgps.values() + if len(hgps) >= 2: + for hgp in hgps: + backend_names = list(hgp._backends) + for backend in backends: + if backend.name not in backend_names: + with self.assertRaises(QiskitBackendNotFoundError): + self.service.backend( + backend.name, + instance=f"{hgp._hub}/{hgp._group}/{hgp._project}", + ) + return + + def test_retrieve_backend_not_exist(self): + """Test that an error is raised when retrieving a backend that does not exist.""" + with self.assertRaises(QiskitBackendNotFoundError): + self.service.backend("nonexistent_backend") + + def test_too_many_qubits_in_circuit(self): + """Check error message if circuit contains more qubits than supported on the backend.""" + num = len(self.backend.properties().qubits) + num_qubits = num + 1 + circuit = QuantumCircuit(num_qubits, num_qubits) + with self.assertRaises(IBMBackendValueError) as err: + _ = self.backend.run(circuit) + self.assertIn( + f"Circuit contains {num_qubits} qubits, but backend has only {num}.", + str(err.exception), + ) From d0a5a5bc466276dedb26742011713da9f56cd320 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 15 Oct 2023 16:24:49 +0000 Subject: [PATCH 10/47] added test_ibm_job and made necessary changes --- test/decorators.py | 49 +++ test/fake_account_client.py | 531 +++++++++++++++++++++++++++++++ test/integration/test_ibm_job.py | 441 +++++++++++++++++++++++++ test/utils.py | 41 +++ 4 files changed, 1062 insertions(+) create mode 100644 test/fake_account_client.py create mode 100644 test/integration/test_ibm_job.py diff --git a/test/decorators.py b/test/decorators.py index be43830ca..3471d78be 100644 --- a/test/decorators.py +++ b/test/decorators.py @@ -156,3 +156,52 @@ class IntegrationTestDependencies: token: str channel: str url: str + + +def integration_test_setup_with_backend( + backend_name: Optional[str] = None, + simulator: Optional[bool] = True, + min_num_qubits: Optional[int] = None, + staging: Optional[bool] = True, +) -> Callable: + """Returns a decorator that retrieves the appropriate backend to use for testing. + + Either retrieves the backend via its name (if specified), or selects the least busy backend that + matches all given filter criteria. + + Args: + backend_name: The name of the backend. + simulator: If set to True, the list of suitable backends is limited to simulators. + min_num_qubits: Minimum number of qubits the backend has to have. + + Returns: + Decorator that retrieves the appropriate backend to use for testing. + """ + + def _decorator(func): + @wraps(func) + @integration_test_setup() + def _wrapper(self, *args, **kwargs): + dependencies: IntegrationTestDependencies = kwargs["dependencies"] + # provider: IBMProvider = dependencies.provider + service = dependencies.service + if not staging: + raise SkipTest("Tests not supported on staging.") + if backend_name: + _backend = service.backend(name=backend_name, instance=dependencies.instance) + else: + _backend = service.least_busy( + min_num_qubits=min_num_qubits, + simulator=simulator, + instance=dependencies.instance, + ) + if not _backend: + # pylint: disable=broad-exception-raised + raise Exception("Unable to find a suitable backend.") + + kwargs["backend"] = _backend + func(self, *args, **kwargs) + + return _wrapper + + return _decorator diff --git a/test/fake_account_client.py b/test/fake_account_client.py new file mode 100644 index 000000000..5a2ec9654 --- /dev/null +++ b/test/fake_account_client.py @@ -0,0 +1,531 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Fake AccountClient.""" + +import copy + +# TODO This can probably be merged with the one in test_ibm_job_states +import time +import uuid +import warnings +from concurrent.futures import ThreadPoolExecutor, wait +from datetime import timedelta, datetime +from random import randrange +from typing import Dict, Any + +from qiskit.providers.fake_provider.backends.poughkeepsie.fake_poughkeepsie import ( + FakePoughkeepsie, +) + +from qiskit_ibm_provider.api.exceptions import ( + RequestsApiError, + UserTimeoutExceededError, +) +from qiskit_ibm_provider.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES + +VALID_RESULT_RESPONSE = { + "backend_name": "ibmqx2", + "backend_version": "1.1.1", + "job_id": "XC1323XG2", + "qobj_id": "Experiment1", + "success": True, + "results": [], +} +"""A valid job result response.""" + +VALID_RESULT = { + "header": { + "name": "Bell state", + "creg_sizes": [["c", 2]], + "clbit_labels": [["c", 0], ["c", 1]], + "qubit_labels": [["q", 0], ["q", 1]], + }, + "shots": 1024, + "status": "DONE", + "success": True, + "data": {"counts": {"0x0": 484, "0x3": 540}}, +} + +API_STATUS_TO_INT = { + ApiJobStatus.CREATING: 0, + ApiJobStatus.VALIDATING: 1, + ApiJobStatus.QUEUED: 2, + ApiJobStatus.RUNNING: 3, + ApiJobStatus.COMPLETED: 4, + ApiJobStatus.ERROR_RUNNING_JOB: 4, + ApiJobStatus.ERROR_VALIDATING_JOB: 4, + ApiJobStatus.CANCELLED: 4, +} + + +class BaseFakeJob: + """Base class for faking a remote job.""" + + _job_progress = [ + ApiJobStatus.CREATING, + ApiJobStatus.VALIDATING, + ApiJobStatus.QUEUED, + ApiJobStatus.RUNNING, + ApiJobStatus.COMPLETED, + ] + + def __init__( + self, + executor, + job_id, + qobj, + backend_name, + job_tags=None, + job_name=None, + experiment_id=None, + run_mode=None, + progress_time=0.5, + **kwargs, + ): + """Initialize a fake job.""" + self._job_id = job_id + self._status = ApiJobStatus.CREATING + self.qobj = qobj + self._result = None + self._backend_name = backend_name + self._job_tags = job_tags + self._job_name = job_name + self._experiment_id = experiment_id + self._creation_date = datetime.now() + self._run_mode = run_mode + self._queue_pos = kwargs.pop("queue_pos", "auto") + self._comp_time = kwargs.pop("est_completion", "auto") + self._queue_info = None + self._progress_time = progress_time + self._future = executor.submit(self._auto_progress) + + def _auto_progress(self): + """Automatically update job status.""" + for status in self._job_progress: + time.sleep(self._progress_time) + self._status = status + + if self._status == ApiJobStatus.COMPLETED: + self._save_result() + elif self._status == ApiJobStatus.ERROR_RUNNING_JOB: + self._save_bad_result() + + def _save_result(self): + new_result = copy.deepcopy(VALID_RESULT_RESPONSE) + for _ in range(len(self.qobj["experiments"])): + valid_result = copy.deepcopy(VALID_RESULT) + counts = randrange(1024) + valid_result["data"]["counts"] = {"0x0": counts, "0x3": 1024 - counts} + new_result["results"].append(valid_result) + new_result["job_id"] = self._job_id + new_result["backend_name"] = self._backend_name + self._result = new_result + + def _save_bad_result(self): + new_result = copy.deepcopy(VALID_RESULT_RESPONSE) + new_result["job_id"] = self._job_id + new_result["backend_name"] = self._backend_name + new_result["success"] = False + new_result["error"] = {"message": "Kaboom", "code": 1234} + self._result = new_result + + def data(self): + """Return job data.""" + status = self._status + data = { + "job_id": self._job_id, + "kind": "q-object", + "status": status.value, + "creation_date": self._creation_date.isoformat(), + "_backend_info": {"name": self._backend_name}, + "client_info": {"qiskit": "0.23.5"}, + } + if self._job_tags: + data["tags"] = self._job_tags.copy() + if self._job_name: + data["name"] = self._job_name + if self._experiment_id: + data["experiment_id"] = self._experiment_id + if status == ApiJobStatus.ERROR_VALIDATING_JOB: + data["error"] = {"message": "Validation failed.", "code": 1234} + if status in [ApiJobStatus.RUNNING] + list(API_JOB_FINAL_STATES) and self._run_mode: + data["run_mode"] = self._run_mode + + time_per_step = {} + timestamp = self._creation_date + for api_stat in API_STATUS_TO_INT: # pylint: disable=consider-using-dict-items + if API_STATUS_TO_INT[status] > API_STATUS_TO_INT[api_stat]: + time_per_step[api_stat.value] = timestamp.isoformat() + timestamp += timedelta(seconds=30) + elif status == api_stat: + time_per_step[api_stat.value] = timestamp.isoformat() + timestamp += timedelta(seconds=30) + data["time_per_step"] = time_per_step + + return data + + def _get_info_queue(self): + self._queue_info = { + "status": "PENDING_IN_QUEUE", + "position": randrange(1, 10) if self._queue_pos == "auto" else self._queue_pos, + } + if self._queue_info["position"] is None: + return self._queue_info + + est_comp_ts = ( + self._creation_date + timedelta(minutes=10 * self._queue_info["position"]) + if self._comp_time == "auto" + else self._comp_time + ) + if est_comp_ts is None: + return self._queue_info + + self._queue_info["estimated_complete_time"] = est_comp_ts.isoformat() + self._queue_info["estimated_start_time"] = (est_comp_ts - timedelta(minutes=20)).isoformat() + + return self._queue_info + + def cancel(self): + """Cancel the job.""" + self._future.cancel() + wait([self._future]) + self._status = ApiJobStatus.CANCELLED + self._result = None + + def result(self): + """Return job result.""" + if not self._result: + raise RequestsApiError("Result is not available") + return self._result + + def status_data(self): + """Return job status data, including queue info.""" + status = self._status + data = {"status": status.value} + if status == ApiJobStatus.QUEUED: + data["info_queue"] = self._get_info_queue() + return data + + def status(self): + """Return job status.""" + return self._status + + def name(self): + """Return job name.""" + return self._job_name + + +class CancelableFakeJob(BaseFakeJob): + """Fake job that can be canceled.""" + + _job_progress = [ + ApiJobStatus.CREATING, + ApiJobStatus.VALIDATING, + ApiJobStatus.RUNNING, + ] + + +class NewFieldFakeJob(BaseFakeJob): + """Fake job that contains additional fields.""" + + def data(self): + """Return job data.""" + data = super().data() + data["new_field"] = "foo" + return data + + +class MissingFieldFakeJob(BaseFakeJob): + """Fake job that does not contain required fields.""" + + def data(self): + """Return job data.""" + data = super().data() + del data["job_id"] + return data + + +class FailedFakeJob(BaseFakeJob): + """Fake job that fails.""" + + _job_progress = [ApiJobStatus.CREATING, ApiJobStatus.VALIDATING] + + def __init__(self, *args, **kwargs): + # failure_type can be "validation", "result", or "partial" + self._failure_type = kwargs.pop("failure_type", "validation") + self._job_progress = FailedFakeJob._job_progress.copy() + if self._failure_type == "validation": + self._job_progress.append(ApiJobStatus.ERROR_VALIDATING_JOB) + else: + self._job_progress.extend([ApiJobStatus.RUNNING, ApiJobStatus.ERROR_RUNNING_JOB]) + super().__init__(*args, **kwargs) + + def _save_bad_result(self): + if self._failure_type != "partial": + super()._save_bad_result() + return + new_result = copy.deepcopy(VALID_RESULT_RESPONSE) + new_result["job_id"] = self._job_id + new_result["backend_name"] = self._backend_name + new_result["success"] = False + # Good first result. + valid_result = copy.deepcopy(VALID_RESULT) + counts = randrange(1024) + valid_result["data"]["counts"] = {"0x0": counts, "0x3": 1024 - counts} + new_result["results"].append(valid_result) + + for _ in range(1, len(self.qobj["experiments"])): + valid_result = copy.deepcopy(VALID_RESULT) + valid_result["success"] = False + valid_result["status"] = "This circuit failed." + new_result["results"].append(valid_result) + self._result = new_result + + +class FixedStatusFakeJob(BaseFakeJob): + """Fake job that stays in a specific status.""" + + def __init__(self, *args, **kwargs): + self._fixed_status = kwargs.pop("fixed_status") + super().__init__(*args, **kwargs) + + def _auto_progress(self): + """Automatically update job status.""" + for status in self._job_progress: + time.sleep(0.5) + self._status = status + if status == self._fixed_status: + break + + if self._status == ApiJobStatus.COMPLETED: + self._save_result() + + +class BaseFakeAccountClient: + """Base class for faking the AccountClient.""" + + def __init__( + self, + job_limit=-1, + job_class=BaseFakeJob, + job_kwargs=None, + props_count=None, + queue_positions=None, + est_completion=None, + run_mode=None, + ): + """Initialize a fake account client.""" + self._jobs = {} + self._results_retrieved = set() + self._job_limit = job_limit + self._executor = ThreadPoolExecutor() + self._job_class = job_class + if isinstance(self._job_class, list): + self._job_class.reverse() + self._job_kwargs = job_kwargs or {} + self._props_count = props_count or 0 + self._props_date = datetime.now().isoformat() + self._queue_positions = queue_positions.copy() if queue_positions else [] + self._queue_positions.reverse() + self._est_completion = est_completion.copy() if est_completion else [] + self._est_completion.reverse() + self._run_mode = run_mode + self._default_job_class = BaseFakeJob + + def list_jobs(self, limit, skip, descending=True, extra_filter=None): + """Return a list of jobs.""" + # pylint: disable=unused-argument + extra_filter = extra_filter or {} + if all(fil in extra_filter for fil in ["creationDate", "id"]): + return {} + tag = extra_filter.get("tags", None) + all_job_data = [] + for job in list(self._jobs.values())[skip : skip + limit]: + job_data = job.data() + if tag is None or tag in job_data["tags"]: + all_job_data.append(job_data) + if not descending: + all_job_data.reverse() + return all_job_data + + def job_submit( + self, + backend_name, + qobj_dict, + job_name, + job_tags, + experiment_id, + *_args, + **_kwargs, + ): + """Submit a Qobj to a device.""" + if self._job_limit != -1 and self._unfinished_jobs() >= self._job_limit: + raise RequestsApiError( + "400 Client Error: Bad Request for url: . Reached " + "maximum number of concurrent jobs, Error code: 3458." + ) + + new_job_id = uuid.uuid4().hex + if isinstance(self._job_class, list): + job_class = self._job_class.pop() if self._job_class else self._default_job_class + else: + job_class = self._job_class + job_kwargs = copy.copy(self._job_kwargs) + if self._queue_positions: + job_kwargs["queue_pos"] = self._queue_positions.pop() + if self._est_completion: + job_kwargs["est_completion"] = self._est_completion.pop() + + run_mode = self._run_mode + if run_mode == "dedicated_once": + run_mode = "dedicated" + self._run_mode = "fairshare" + + new_job = job_class( + executor=self._executor, + job_id=new_job_id, + qobj=qobj_dict, + backend_name=backend_name, + job_tags=job_tags, + job_name=job_name, + experiment_id=experiment_id, + run_mode=run_mode, + **job_kwargs, + ) + self._jobs[new_job_id] = new_job + return new_job.data() + + def job_download_qobj(self, job_id, *_args, **_kwargs): + """Retrieve and return a Qobj.""" + return copy.deepcopy(self._get_job(job_id).qobj) + + def job_result(self, job_id, *_args, **_kwargs): + """Return a random job result.""" + if job_id in self._results_retrieved: + warnings.warn(f"Result already retrieved for job {job_id}") + self._results_retrieved.add(job_id) + return self._get_job(job_id).result() + + def job_get(self, job_id, *_args, **_kwargs): + """Return information about a job.""" + return self._get_job(job_id).data() + + def job_status(self, job_id, *_args, **_kwargs): + """Return the status of a job.""" + return self._get_job(job_id).status_data() + + def job_final_status(self, job_id, *_args, **_kwargs): + """Wait until the job progress to a final state.""" + job = self._get_job(job_id) + status = job.status() + while status not in API_JOB_FINAL_STATES: + time.sleep(0.5) + status_data = job.status_data() + status = ApiJobStatus(status_data["status"]) + if _kwargs.get("status_queue", None): + data = {"status": status.value} + if status is ApiJobStatus.QUEUED: + data["infoQueue"] = {"status": "PENDING_IN_QUEUE", "position": 1} + _kwargs["status_queue"].put(status_data) + return self.job_status(job_id) + + def job_properties(self, *_args, **_kwargs): + """Return the backend properties of a job.""" + props = FakePoughkeepsie().properties().to_dict() + if self._props_count > 0: + self._props_count -= 1 + new_dt = datetime.now() + timedelta(hours=randrange(300)) + self._props_date = new_dt.isoformat() + props["last_update_date"] = self._props_date + return props + + def job_cancel(self, job_id, *_args, **_kwargs): + """Submit a request for cancelling a job.""" + self._get_job(job_id).cancel() + return {"cancelled": True} + + def backend_job_limit(self, *_args, **_kwargs): + """Return the job limit for the backend.""" + return {"maximumJobs": self._job_limit, "runningJobs": self._unfinished_jobs()} + + def job_update_attribute(self, job_id, attr_name, attr_value, *_args, **_kwargs): + """Update the specified job attribute with the given value.""" + job = self._get_job(job_id) + if attr_name == "name": + job._job_name = attr_value + if attr_name == "tags": + job._job_tags = attr_value.copy() + return {attr_name: attr_value} + + def backend_status(self, backend_name: str) -> Dict[str, Any]: + """Return the status of the backend.""" + return { + "backend_name": backend_name, + "backend_version": "0.0.0", + "operational": True, + "pending_jobs": 0, + "status_msg": "active", + } + + def tear_down(self): + """Clean up job threads.""" + for job_id in list(self._jobs.keys()): + try: + self._jobs[job_id].cancel() + except KeyError: + pass + + def _unfinished_jobs(self): + """Return the number of unfinished jobs.""" + return sum(1 for job in self._jobs.values() if job.status() not in API_JOB_FINAL_STATES) + + def _get_job(self, job_id): + """Return job if found.""" + if job_id not in self._jobs: + raise RequestsApiError("Job not found. Error code: 3250.") + return self._jobs[job_id] + + +class JobSubmitFailClient(BaseFakeAccountClient): + """Fake AccountClient used to fail a job submit.""" + + def __init__(self, failed_indexes): + """JobSubmitFailClient constructor.""" + if not isinstance(failed_indexes, list): + failed_indexes = [failed_indexes] + self._failed_indexes = failed_indexes + self._job_count = -1 + super().__init__() + + def job_submit(self, *_args, **_kwargs): # pylint: disable=arguments-differ + """Failing job submit.""" + self._job_count += 1 + if self._job_count in self._failed_indexes: + raise RequestsApiError("Job submit failed!") + return super().job_submit(*_args, **_kwargs) + + +class JobTimeoutClient(BaseFakeAccountClient): + """Fake AccountClient used to fail a job submit.""" + + def __init__(self, *args, max_fail_count=-1, **kwargs): + """JobTimeoutClient constructor.""" + self._fail_count = max_fail_count + super().__init__(*args, **kwargs) + + def job_final_status(self, job_id, *_args, **_kwargs): + """Wait until the job progress to a final state.""" + if self._fail_count != 0: + self._fail_count -= 1 + raise UserTimeoutExceededError("Job timed out!") + return super().job_final_status(job_id, *_args, **_kwargs) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py new file mode 100644 index 000000000..bbd3fb784 --- /dev/null +++ b/test/integration/test_ibm_job.py @@ -0,0 +1,441 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""IBMJob Test.""" +import copy +import time +from datetime import datetime, timedelta +from threading import Thread, Event +from unittest import SkipTest, mock +from unittest import skip + +from dateutil import tz +from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister +from qiskit.compiler import transpile +from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.api.rest.job import Job as RestJob +from qiskit_ibm_provider.exceptions import IBMBackendApiError + +from qiskit_ibm_runtime import IBMBackend, RuntimeJob +from qiskit_ibm_runtime.api.exceptions import RequestsApiError +from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError +from ..decorators import ( + IntegrationTestDependencies, + integration_test_setup_with_backend, + production_only, +) +from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob +from ..ibm_test_case import IBMTestCase +from ..utils import ( + most_busy_backend, + cancel_job_safe, + submit_and_cancel, +) + + +class TestIBMJob(IBMTestCase): + """Test ibm_job module.""" + + sim_backend: IBMBackend + real_device_backend: IBMBackend + bell = QuantumCircuit + sim_job: RuntimeJob + last_month: datetime + + @classmethod + @integration_test_setup_with_backend(simulator=False, min_num_qubits=2) + def setUpClass(cls, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: + """Initial class level setup.""" + # pylint: disable=arguments-differ + super().setUpClass() + cls.service = dependencies.service + cls.sim_backend = dependencies.service.backend( + "ibmq_qasm_simulator", instance=dependencies.instance + ) + cls.real_device_backend = backend + cls.dependencies = dependencies + cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) + cls.sim_job = cls.sim_backend.run(cls.bell) + cls.last_month = datetime.now() - timedelta(days=30) + + def test_run_multiple_simulator(self): + """Test running multiple jobs in a simulator.""" + num_qubits = 16 + quantum_register = QuantumRegister(num_qubits, "qr") + classical_register = ClassicalRegister(num_qubits, "cr") + quantum_circuit = QuantumCircuit(quantum_register, classical_register) + for i in range(num_qubits - 1): + quantum_circuit.cx(quantum_register[i], quantum_register[i + 1]) + quantum_circuit.measure(quantum_register, classical_register) + num_jobs = 4 + job_array = [ + self.sim_backend.run(transpile([quantum_circuit] * 20), shots=2048) + for _ in range(num_jobs) + ] + timeout = 30 + start_time = time.time() + while True: + check = sum(job.status() is JobStatus.RUNNING for job in job_array) + if check >= 2: + self.log.info("found %d simultaneous jobs", check) + break + if all((job.status() is JobStatus.DONE for job in job_array)): + # done too soon? don't generate error + self.log.warning("all jobs completed before simultaneous jobs could be detected") + break + for job in job_array: + self.log.info( + "%s %s %s %s", + job.status(), + job.status() is JobStatus.RUNNING, + check, + job.job_id(), + ) + self.log.info("- %s", str(time.time() - start_time)) + if time.time() - start_time > timeout and self.sim_backend.status().pending_jobs <= 4: + raise TimeoutError( + "Failed to see multiple running jobs after " "{0} seconds.".format(timeout) + ) + time.sleep(0.2) + + result_array = [job.result() for job in job_array] + self.log.info("got back all job results") + # Ensure all jobs have finished. + self.assertTrue(all((job.status() is JobStatus.DONE for job in job_array))) + self.assertTrue(all((result.success for result in result_array))) + + # Ensure job ids are unique. + job_ids = [job.job_id() for job in job_array] + self.assertEqual(sorted(job_ids), sorted(list(set(job_ids)))) + + def test_cancel(self): + """Test job cancellation.""" + # Find the most busy backend + backend = most_busy_backend(self.service, instance=self.dependencies.instance) + submit_and_cancel(backend, self.log) + + def test_retrieve_jobs(self): + """Test retrieving jobs.""" + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=5, + skip=0, + created_after=self.last_month, + ) + self.assertLessEqual(len(job_list), 5) + for job in job_list: + self.assertTrue(isinstance(job.job_id(), str)) + + def test_retrieve_completed_jobs(self): + """Test retrieving jobs with the completed filter.""" + completed_job_list = self.service.jobs( + backend_name=self.sim_backend.name, limit=3, pending=False + ) + for job in completed_job_list: + self.assertTrue(job.status() in [JobStatus.DONE, JobStatus.CANCELLED, JobStatus.ERROR]) + + def test_retrieve_pending_jobs(self): + """Test retrieving jobs with the pending filter.""" + pending_job_list = self.service.jobs( + backend_name=self.sim_backend.name, limit=3, pending=True + ) + for job in pending_job_list: + self.assertTrue(job.status() in [JobStatus.QUEUED, JobStatus.RUNNING]) + + def test_retrieve_job(self): + """Test retrieving a single job.""" + retrieved_job = self.service.job(self.sim_job.job_id()) + print(retrieved_job.result()._metadata) + self.assertEqual(self.sim_job.job_id(), retrieved_job.job_id()) + self.assertEqual(self.sim_job.circuits(), retrieved_job.circuits()) + self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts()) + + def test_retrieve_job_uses_appropriate_backend(self): + """Test that retrieved jobs come from their appropriate backend.""" + backend_1 = self.real_device_backend + # Get a second backend. + backend_2 = None + service = self.real_device_backend.service + for my_backend in service.backends(): + if my_backend.status().operational and my_backend.name != backend_1.name: + backend_2 = my_backend + break + if not backend_2: + raise SkipTest("Skipping test that requires multiple backends") + + job_1 = backend_1.run(transpile(ReferenceCircuits.bell())) + job_2 = backend_2.run(transpile(ReferenceCircuits.bell())) + + # test a retrieved job's backend is the same as the queried backend + self.assertEqual(service.job(job_1.job_id()).backend().name, backend_1.name) + self.assertEqual(service.job(job_2.job_id()).backend().name, backend_2.name) + + # Cleanup + for job in [job_1, job_2]: + cancel_job_safe(job, self.log) + + def test_retrieve_job_error(self): + """Test retrieving an invalid job.""" + self.assertRaises(IBMBackendApiError, self.service.job, "BAD_JOB_ID") + + def test_retrieve_jobs_status(self): + """Test retrieving jobs filtered by status.""" + backend_jobs = self.service.jobs( + backend_name=self.sim_backend.name, + limit=5, + skip=5, + pending=False, + created_after=self.last_month, + ) + self.assertTrue(backend_jobs) + + for job in backend_jobs: + self.assertTrue( + job.status() in JOB_FINAL_STATES, + "Job {} has status {} when it should be DONE, CANCELLED, or ERROR".format( + job.job_id(), job.status() + ), + ) + + def test_retrieve_jobs_created_after(self): + """Test retrieving jobs created after a specified datetime.""" + past_month = datetime.now() - timedelta(days=30) + # Add local tz in order to compare to `creation_date` which is tz aware. + past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal()) + + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=2, + created_after=past_month, + ) + self.assertTrue(job_list) + for job in job_list: + self.assertGreaterEqual( + job.creation_date, + past_month_tz_aware, + "job {} creation date {} not within range".format(job.job_id(), job.creation_date), + ) + + def test_retrieve_jobs_created_before(self): + """Test retrieving jobs created before a specified datetime.""" + past_month = datetime.now() - timedelta(days=30) + # Add local tz in order to compare to `creation_date` which is tz aware. + past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal()) + + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=2, + created_before=past_month, + ) + self.assertTrue(job_list) + for job in job_list: + self.assertLessEqual( + job.creation_date, + past_month_tz_aware, + "job {} creation date {} not within range".format(job.job_id(), job.creation_date), + ) + + def test_retrieve_jobs_between_datetimes(self): + """Test retrieving jobs created between two specified datetimes.""" + date_today = datetime.now() + past_month = date_today - timedelta(30) + past_two_month = date_today - timedelta(60) + + # Add local tz in order to compare to `creation_date` which is tz aware. + past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal()) + past_two_month_tz_aware = past_two_month.replace(tzinfo=tz.tzlocal()) + + with self.subTest(): + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=2, + created_after=past_two_month, + created_before=past_month, + ) + self.assertTrue(job_list) + for job in job_list: + self.assertTrue( + (past_two_month_tz_aware <= job.creation_date <= past_month_tz_aware), + "job {} creation date {} not within range".format( + job.job_id(), job.creation_date + ), + ) + + def test_retrieve_jobs_order(self): + """Test retrieving jobs with different orders.""" + job = self.sim_backend.run(self.bell) + job.wait_for_final_state() + newest_jobs = self.service.jobs( + limit=10, + pending=False, + descending=True, + created_after=self.last_month, + ) + self.assertIn(job.job_id(), [rjob.job_id() for rjob in newest_jobs]) + + oldest_jobs = self.service.jobs( + limit=10, + pending=False, + descending=False, + created_after=self.last_month, + ) + self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs]) + + def test_refresh_job_result(self): + """Test re-retrieving job result via refresh.""" + result = self.sim_job.result() + + # Save original cached results. + cached_result = copy.deepcopy(result.to_dict()) + self.assertTrue(cached_result) + + # Modify cached results. + result.results[0].header.name = "modified_result" + self.assertNotEqual(cached_result, result.to_dict()) + self.assertEqual(result.results[0].header.name, "modified_result") + + # Re-retrieve result via refresh. + result = self.sim_job.result(refresh=True) + self.assertDictEqual(cached_result, result.to_dict()) + self.assertNotEqual(result.results[0].header.name, "modified_result") + + @skip("TODO update test case") + def test_wait_for_final_state(self): + """Test waiting for job to reach final state.""" + + def final_state_callback(c_job_id, c_status, c_job, **kwargs): + """Job status query callback function.""" + self.assertEqual(c_job_id, job.job_id()) + self.assertNotIn(c_status, JOB_FINAL_STATES) + self.assertEqual(c_job.job_id(), job.job_id()) + self.assertIn("queue_info", kwargs) + + queue_info = kwargs.pop("queue_info", None) + callback_info["called"] = True + + if wait_time is None: + # Look for status change. + data = {"status": c_status, "queue_info": queue_info} + self.assertNotEqual(data, callback_info["last data"]) + callback_info["last data"] = data + else: + # Check called within wait time. + if callback_info["last call time"] and job._status not in JOB_FINAL_STATES: + self.assertAlmostEqual( + time.time() - callback_info["last call time"], + wait_time, + delta=0.2, + ) + callback_info["last call time"] = time.time() + + def job_canceller(job_, exit_event, wait): + exit_event.wait(wait) + cancel_job_safe(job_, self.log) + + wait_args = [2, None] + + saved_api = self.sim_backend._api_client + try: + self.sim_backend._api_client = BaseFakeAccountClient(job_class=CancelableFakeJob) + for wait_time in wait_args: + with self.subTest(wait_time=wait_time): + # Put callback data in a dictionary to make it mutable. + callback_info = { + "called": False, + "last call time": 0.0, + "last data": {}, + } + cancel_event = Event() + job = self.sim_backend.run(self.bell) + # Cancel the job after a while. + Thread(target=job_canceller, args=(job, cancel_event, 7), daemon=True).start() + try: + job.wait_for_final_state( + timeout=10, wait=wait_time, callback=final_state_callback + ) + self.assertTrue(job.in_final_state()) + self.assertTrue(callback_info["called"]) + cancel_event.set() + finally: + # Ensure all threads ended. + for thread in job._executor._threads: + thread.join(0.1) + finally: + self.sim_backend._api_client = saved_api + + def test_wait_for_final_state_timeout(self): + """Test waiting for job to reach final state times out.""" + backend = most_busy_backend(TestIBMJob.service, instance=self.dependencies.instance) + job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend)) + try: + self.assertRaises(RuntimeJobTimeoutError, job.wait_for_final_state, timeout=0.1) + finally: + # Ensure all threads ended. + for thread in job._executor._threads: + thread.join(0.1) + cancel_job_safe(job, self.log) + + @skip("not supported by api") + def test_job_submit_partial_fail(self): + """Test job submit partial fail.""" + job_id = [] + + def _side_effect(self, *args, **kwargs): + # pylint: disable=unused-argument + job_id.append(self.job_id) + raise RequestsApiError("Kaboom") + + fail_points = ["put_object_storage", "callback_upload"] + + for fail_method in fail_points: + with self.subTest(fail_method=fail_method): + with mock.patch.object( + RestJob, fail_method, side_effect=_side_effect, autospec=True + ): + with self.assertRaises(IBMBackendApiError): + self.sim_backend.run(self.bell) + + self.assertTrue(job_id, "Job ID not saved.") + job = self.service.job(job_id[0]) + self.assertEqual( + job.status(), + JobStatus.CANCELLED, + f"Job {job.job_id()} status is {job.status()} and not cancelled!", + ) + + def test_job_circuits(self): + """Test job circuits.""" + self.assertEqual(str(self.bell), str(self.sim_job.circuits()[0])) + + def test_job_backend_options(self): + """Test job backend options.""" + run_config = {"shots": 2048, "memory": True} + job = self.sim_backend.run(self.bell, **run_config) + self.assertLessEqual(run_config.items(), job.backend_options().items()) + + def test_job_header(self): + """Test job header.""" + custom_header = {"test": "test_job_header"} + job = self.sim_backend.run(self.bell, header=custom_header) + self.assertEqual(custom_header["test"], job.header()["test"]) + self.assertLessEqual(custom_header.items(), job.header().items()) + + def test_lazy_loading_params(self): + """Test lazy loading job params.""" + job = self.sim_backend.run(self.bell) + job.wait_for_final_state() + + rjob = self.service.job(job.job_id()) + self.assertFalse(rjob._params) + self.assertTrue(rjob.circuits) diff --git a/test/utils.py b/test/utils.py index 284ec0220..10cb51f7c 100644 --- a/test/utils.py +++ b/test/utils.py @@ -21,6 +21,8 @@ from datetime import datetime from qiskit.circuit import QuantumCircuit +from qiskit.compiler import transpile +from qiskit.test.reference_circuits import ReferenceCircuits from qiskit.providers.jobstatus import JOB_FINAL_STATES, JobStatus from qiskit.providers.exceptions import QiskitBackendNotFoundError from qiskit.providers.models import BackendStatus, BackendProperties @@ -57,6 +59,30 @@ def setup_test_logging(logger: logging.Logger, filename: str) -> None: logger.setLevel(os.getenv("LOG_LEVEL", "DEBUG")) +def most_busy_backend( + service: QiskitRuntimeService, + instance: Optional[str] = None, +) -> IBMBackend: + """Return the most busy backend for the provider given. + + Return the most busy available backend for those that + have a `pending_jobs` in their `status`. Backends such as + local backends that do not have this are not considered. + + Args: + service: Qiskit Runtime Service. + instance: The instance in the hub/group/project format. + + Returns: + The most busy backend. + """ + backends = service.backends(simulator=False, operational=True, instance=instance) + return max( + (b for b in backends if b.configuration().n_qubits >= 5), + key=lambda b: b.status().pending_jobs, + ) + + def get_large_circuit(backend: IBMBackend) -> QuantumCircuit: """Return a slightly larger circuit that would run a bit longer. @@ -254,3 +280,18 @@ def get_mocked_backend(name: str = "ibm_gotham") -> Any: mock_backend.name = name mock_backend._instance = None return mock_backend + + +def submit_and_cancel(backend: IBMBackend, logger: logging.Logger) -> RuntimeJob: + """Submit and cancel a job. + + Args: + backend: Backend to submit the job to. + + Returns: + Cancelled job. + """ + circuit = transpile(ReferenceCircuits.bell(), backend=backend) + job = backend.run(circuit) + cancel_job_safe(job, logger=logger) + return job From 8ca95c53048c3069ba4119ca39fdb45cada52792 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 16 Oct 2023 12:28:53 +0000 Subject: [PATCH 11/47] Fixed several tests --- test/integration/test_ibm_job.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index bbd3fb784..7598accda 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -33,7 +33,6 @@ from ..decorators import ( IntegrationTestDependencies, integration_test_setup_with_backend, - production_only, ) from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob from ..ibm_test_case import IBMTestCase @@ -156,9 +155,8 @@ def test_retrieve_pending_jobs(self): def test_retrieve_job(self): """Test retrieving a single job.""" retrieved_job = self.service.job(self.sim_job.job_id()) - print(retrieved_job.result()._metadata) self.assertEqual(self.sim_job.job_id(), retrieved_job.job_id()) - self.assertEqual(self.sim_job.circuits(), retrieved_job.circuits()) + self.assertEqual(self.sim_job.inputs["circuits"], retrieved_job.inputs["circuits"]) self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts()) def test_retrieve_job_uses_appropriate_backend(self): @@ -292,6 +290,8 @@ def test_retrieve_jobs_order(self): ) self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs]) + + @skip("how do we support refresh") def test_refresh_job_result(self): """Test re-retrieving job result via refresh.""" result = self.sim_job.result() @@ -416,7 +416,7 @@ def _side_effect(self, *args, **kwargs): def test_job_circuits(self): """Test job circuits.""" - self.assertEqual(str(self.bell), str(self.sim_job.circuits()[0])) + self.assertEqual(str(self.bell), str(self.sim_job.inputs["circuits"][0])) def test_job_backend_options(self): """Test job backend options.""" @@ -428,8 +428,8 @@ def test_job_header(self): """Test job header.""" custom_header = {"test": "test_job_header"} job = self.sim_backend.run(self.bell, header=custom_header) - self.assertEqual(custom_header["test"], job.header()["test"]) - self.assertLessEqual(custom_header.items(), job.header().items()) + self.assertEqual(custom_header["test"], job.inputs["header"]["test"]) + self.assertLessEqual(custom_header.items(), job.inputs["header"].items()) def test_lazy_loading_params(self): """Test lazy loading job params.""" @@ -438,4 +438,4 @@ def test_lazy_loading_params(self): rjob = self.service.job(job.job_id()) self.assertFalse(rjob._params) - self.assertTrue(rjob.circuits) + self.assertTrue(rjob.inputs["circuits"]) From b17413b5bb5c9ce64828ed8bc8b005feee07c8b0 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 16 Oct 2023 12:38:01 +0000 Subject: [PATCH 12/47] Fixed missing job methods in test --- test/integration/test_ibm_job.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index 7598accda..984432f44 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -290,7 +290,6 @@ def test_retrieve_jobs_order(self): ) self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs]) - @skip("how do we support refresh") def test_refresh_job_result(self): """Test re-retrieving job result via refresh.""" @@ -418,11 +417,11 @@ def test_job_circuits(self): """Test job circuits.""" self.assertEqual(str(self.bell), str(self.sim_job.inputs["circuits"][0])) - def test_job_backend_options(self): - """Test job backend options.""" + def test_job_options(self): + """Test job options.""" run_config = {"shots": 2048, "memory": True} job = self.sim_backend.run(self.bell, **run_config) - self.assertLessEqual(run_config.items(), job.backend_options().items()) + self.assertLessEqual(run_config.items(), job.inputs.items()) def test_job_header(self): """Test job header.""" From 2a95e9bc7f97b3affd3b80d179025e9ea4a93ead Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 16 Oct 2023 12:54:47 +0000 Subject: [PATCH 13/47] Changed exception type --- test/integration/test_ibm_job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index 984432f44..4a0f08c10 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -29,7 +29,7 @@ from qiskit_ibm_runtime import IBMBackend, RuntimeJob from qiskit_ibm_runtime.api.exceptions import RequestsApiError -from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError +from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError, RuntimeJobNotFound from ..decorators import ( IntegrationTestDependencies, integration_test_setup_with_backend, @@ -185,7 +185,7 @@ def test_retrieve_job_uses_appropriate_backend(self): def test_retrieve_job_error(self): """Test retrieving an invalid job.""" - self.assertRaises(IBMBackendApiError, self.service.job, "BAD_JOB_ID") + self.assertRaises(RuntimeJobNotFound, self.service.job, "BAD_JOB_ID") def test_retrieve_jobs_status(self): """Test retrieving jobs filtered by status.""" From 40917a43a449a7ac10affbd7389b9e3b90633892 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 16 Oct 2023 16:08:50 +0000 Subject: [PATCH 14/47] Added test_ibm_job_attributes.py --- qiskit_ibm_runtime/qiskit_runtime_service.py | 4 ++ test/utils.py | 56 +++++++++++++++++++- 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index 2a9abd080..f8f3264f9 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -30,8 +30,10 @@ ) from qiskit_ibm_provider.proxies import ProxyConfiguration +from qiskit_ibm_provider.utils import validate_job_tags from qiskit_ibm_provider.utils.hgp import to_instance_format, from_instance_format from qiskit_ibm_provider.utils.backend_decoder import configuration_from_server_data +from qiskit_ibm_provider.exceptions import IBMBackendValueError from qiskit_ibm_runtime import ibm_backend from .accounts import AccountManager, Account, ChannelType @@ -1340,6 +1342,8 @@ def jobs( "The 'instance' keyword is only supported for ``ibm_quantum`` runtime." ) hub, group, project = from_instance_format(instance) + if job_tags: + validate_job_tags(job_tags, IBMBackendValueError) job_responses = [] # type: List[Dict[str, Any]] current_page_limit = limit or 20 diff --git a/test/utils.py b/test/utils.py index 10cb51f7c..438197825 100644 --- a/test/utils.py +++ b/test/utils.py @@ -21,7 +21,8 @@ from datetime import datetime from qiskit.circuit import QuantumCircuit -from qiskit.compiler import transpile +from qiskit.compiler import transpile, assemble +from qiskit.qobj import QasmQobj from qiskit.test.reference_circuits import ReferenceCircuits from qiskit.providers.jobstatus import JOB_FINAL_STATES, JobStatus from qiskit.providers.exceptions import QiskitBackendNotFoundError @@ -295,3 +296,56 @@ def submit_and_cancel(backend: IBMBackend, logger: logging.Logger) -> RuntimeJob job = backend.run(circuit) cancel_job_safe(job, logger=logger) return job + + +def submit_job_bad_shots(backend: IBMBackend) -> RuntimeJob: + """Submit a job that will fail due to too many shots. + + Args: + backend: Backend to submit the job to. + + Returns: + Submitted job. + """ + qobj = bell_in_qobj(backend=backend) + # Modify the number of shots to be an invalid amount. + qobj.config.shots = backend.configuration().max_shots + 10000 + job_to_fail = backend._submit_job(qobj) + return job_to_fail + + +def submit_job_one_bad_instr(backend: IBMBackend) -> RuntimeJob: + """Submit a job that contains one good and one bad instruction. + + Args: + backend: Backend to submit the job to. + + Returns: + Submitted job. + """ + qc_new = transpile(ReferenceCircuits.bell(), backend) + if backend.configuration().simulator: + # Specify method so it doesn't fail at method selection. + qobj = assemble([qc_new] * 2, backend=backend, method="statevector") + else: + qobj = assemble([qc_new] * 2, backend=backend) + qobj.experiments[1].instructions[1].name = "bad_instruction" + job = backend._submit_job(qobj) + return job + + +def bell_in_qobj(backend: IBMBackend, shots: int = 1024) -> QasmQobj: + """Return a bell circuit in Qobj format. + + Args: + backend: Backend to use for transpiling the circuit. + shots: Number of shots. + + Returns: + A bell circuit in Qobj format. + """ + return assemble( + transpile(ReferenceCircuits.bell(), backend=backend), + backend=backend, + shots=shots, + ) From d1171517805086ee0ed85e8a38de4ef3ca013f1f Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 17 Oct 2023 08:07:49 +0000 Subject: [PATCH 15/47] Added test_ibm_job_attributes.py that was missed in previous commit --- test/integration/test_ibm_job_attributes.py | 339 ++++++++++++++++++++ 1 file changed, 339 insertions(+) create mode 100644 test/integration/test_ibm_job_attributes.py diff --git a/test/integration/test_ibm_job_attributes.py b/test/integration/test_ibm_job_attributes.py new file mode 100644 index 000000000..864478f1a --- /dev/null +++ b/test/integration/test_ibm_job_attributes.py @@ -0,0 +1,339 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Test IBMJob attributes.""" + +import re +import time +import uuid +from datetime import datetime, timedelta +from unittest import mock, skip + +from dateutil import tz +from qiskit.compiler import transpile +from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES +from qiskit import QuantumCircuit +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.api.clients.runtime import RuntimeClient +from qiskit_ibm_provider.exceptions import ( + IBMBackendValueError, +) +from qiskit_ibm_provider.job.exceptions import IBMJobFailureError + +from qiskit_ibm_runtime import IBMBackend, RuntimeJob +from ..decorators import ( + IntegrationTestDependencies, + integration_test_setup, +) +from ..fake_account_client import BaseFakeAccountClient +from ..ibm_test_case import IBMTestCase +from ..utils import ( + most_busy_backend, + cancel_job_safe, + submit_job_bad_shots, + submit_job_one_bad_instr, +) + + +class TestIBMJobAttributes(IBMTestCase): + """Test IBMJob instance attributes.""" + + sim_backend: IBMBackend + bell: QuantumCircuit + sim_job: RuntimeJob + last_week: datetime + + @classmethod + @integration_test_setup() + def setUpClass(cls, dependencies: IntegrationTestDependencies) -> None: + """Initial class level setup.""" + # pylint: disable=arguments-differ + super().setUpClass() + cls.dependencies = dependencies + cls.service = dependencies.service + cls.sim_backend = dependencies.service.backend( + "ibmq_qasm_simulator", instance=dependencies.instance + ) + cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) + cls.sim_job = cls.sim_backend.run(cls.bell) + cls.last_week = datetime.now() - timedelta(days=7) + + def setUp(self): + """Initial test setup.""" + super().setUp() + self._qc = ReferenceCircuits.bell() + + def test_job_id(self): + """Test getting a job ID.""" + self.assertTrue(self.sim_job.job_id() is not None) + + def test_get_backend_name(self): + """Test getting a backend name.""" + self.assertTrue(self.sim_job.backend().name == self.sim_backend.name) + + @skip("Skip until aer issue 1214 is fixed") + def test_error_message_simulator(self): + """Test retrieving job error messages from a simulator backend.""" + job = submit_job_one_bad_instr(self.sim_backend) + with self.assertRaises(IBMJobFailureError) as err_cm: + job.result() + self.assertNotIn("bad_instruction", err_cm.exception.message) + + message = job.error_message() + self.assertIn("Experiment 1: ERROR", message) + + r_message = self.service.job(job.job_id()).error_message() + self.assertIn("Experiment 1: ERROR", r_message) + + @skip("not supported by api") + def test_error_message_validation(self): + """Test retrieving job error message for a validation error.""" + job = submit_job_bad_shots(self.sim_backend) + rjob = self.service.job(job.job_id()) + + for q_job, partial in [(job, False), (rjob, True)]: + with self.subTest(partial=partial): + with self.assertRaises(IBMJobFailureError) as err_cm: + q_job.result(partial=partial) + for msg in (err_cm.exception.message, q_job.error_message()): + self.assertNotIn("Unknown", msg) + self.assertIsNotNone(re.search(r"Error code: [0-9]{4}\.$", msg), msg) + + self.assertEqual(job.error_message(), rjob.error_message()) + + @skip("time_per_step not supported by the api") + def test_refresh(self): + """Test refreshing job data.""" + self.sim_job._wait_for_completion() + if "COMPLETED" not in self.sim_job.time_per_step(): + self.sim_job.refresh() + + rjob = self.service.job(self.sim_job.job_id()) + rjob.refresh() + self.assertEqual(rjob._time_per_step, self.sim_job._time_per_step) + + def test_job_creation_date(self): + """Test retrieving creation date, while ensuring it is in local time.""" + # datetime, before running the job, in local time. + start_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) - timedelta(seconds=1) + job = self.sim_backend.run(self.bell) + job.result() + # datetime, after the job is done running, in local time. + end_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) + timedelta(seconds=1) + + self.assertTrue( + (start_datetime <= job.creation_date <= end_datetime), + "job creation date {} is not " + "between the start date time {} and end date time {}".format( + job.creation_date, start_datetime, end_datetime + ), + ) + + @skip("time_per_step supported in provider but not in runtime") + def test_time_per_step(self): + """Test retrieving time per step, while ensuring the date times are in local time.""" + # datetime, before running the job, in local time. + start_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) - timedelta(seconds=1) + job = self.sim_backend.run(self.bell) + job.result() + # datetime, after the job is done running, in local time. + end_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) + timedelta(seconds=1) + + self.assertTrue(job.time_per_step()) + for step, time_data in job.time_per_step().items(): + self.assertTrue( + (start_datetime <= time_data <= end_datetime), + 'job time step "{}={}" is not ' + "between the start date time {} and end date time {}".format( + step, time_data, start_datetime, end_datetime + ), + ) + + rjob = self.service.job(job.job_id()) + self.assertTrue(rjob.time_per_step()) + + @skip("need attributes not supported") + def test_new_job_attributes(self): + """Test job with new attributes.""" + + def _mocked__api_job_submit(*args, **kwargs): + submit_info = original_submit(*args, **kwargs) + submit_info.update({"batman": "bruce"}) + return submit_info + + original_submit = self.sim_backend._api_client.job_submit + with mock.patch.object(RuntimeClient, "job_submit", side_effect=_mocked__api_job_submit): + job = self.sim_backend.run(self.bell) + + self.assertEqual(job.batman_, "bruce") + + @skip("queue_info supported in provider but not here") + def test_queue_info(self): + """Test retrieving queue information.""" + # Find the most busy backend. + backend = most_busy_backend(self.service) + leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING] + job = backend.run(self.bell) + queue_info = None + for _ in range(20): + queue_info = job.queue_info() + # Even if job status is queued, its queue info may not be immediately available. + if ( + job._status is JobStatus.QUEUED and job.queue_position() is not None + ) or job._status in leave_states: + break + time.sleep(1) + + if job._status is JobStatus.QUEUED and job.queue_position() is not None: + self.log.debug( + "Job id=%s, queue info=%s, queue position=%s", + job.job_id(), + queue_info, + job.queue_position(), + ) + msg = "Job {} is queued but has no ".format(job.job_id()) + self.assertIsNotNone(queue_info, msg + "queue info.") + for attr, value in queue_info.__dict__.items(): + self.assertIsNotNone(value, msg + attr) + self.assertTrue( + all( + 0 < priority <= 1.0 + for priority in [ + queue_info.hub_priority, + queue_info.group_priority, + queue_info.project_priority, + ] + ), + "Unexpected queue info {} for job {}".format(queue_info, job.job_id()), + ) + + self.assertTrue(queue_info.format()) + self.assertTrue(repr(queue_info)) + elif job._status is not None: + self.assertIsNone(job.queue_position()) + self.log.warning("Unable to retrieve queue information") + + # Cancel job so it doesn't consume more resources. + cancel_job_safe(job, self.log) + + def test_esp_readout_not_enabled(self): + """Test that an error is thrown is ESP readout is used and the backend does not support it.""" + saved_api = self.sim_backend._api_client + try: + self.sim_backend._api_client = BaseFakeAccountClient() + # sim backend does not have ``measure_esp_enabled`` flag: defaults to ``False`` + with self.assertRaises(IBMBackendValueError) as context_manager: + self.sim_backend.run(self.bell, use_measure_esp=True) + self.assertIn( + "ESP readout not supported on this device. Please make sure the flag " + "'use_measure_esp' is unset or set to 'False'.", + context_manager.exception.message, + ) + finally: + self.sim_backend._api_client = saved_api + + @skip("not supported by api") + def test_esp_readout_enabled(self): + """Test that ESP readout can be used when the backend supports it.""" + saved_api = self.sim_backend._api_client + try: + self.sim_backend._api_client = BaseFakeAccountClient() + setattr(self.sim_backend._configuration, "measure_esp_enabled", True) + job = self.sim_backend.run(self.bell, use_measure_esp=True) + self.assertEqual(job.inputs["use_measure_esp"], True) + finally: + delattr(self.sim_backend._configuration, "measure_esp_enabled") + self.sim_backend._api_client = saved_api + + @skip("not supported by api") + def test_esp_readout_default_value(self): + """Test that ESP readout is set to backend support value if not specified.""" + saved_api = self.sim_backend._api_client + try: + self.sim_backend._api_client = BaseFakeAccountClient() + # ESP readout not enabled on backend + setattr(self.sim_backend._configuration, "measure_esp_enabled", False) + job = self.sim_backend.run(self.bell) + self.assertEqual(job.inputs["use_measure_esp"], False) + # ESP readout enabled on backend + setattr(self.sim_backend._configuration, "measure_esp_enabled", True) + job = self.sim_backend.run(self.bell) + self.assertEqual(job.inputs["use_measure_esp"], True) + finally: + delattr(self.sim_backend._configuration, "measure_esp_enabled") + self.sim_backend._api_client = saved_api + + def test_job_tags(self): + """Test using job tags.""" + # Use a unique tag. + job_tags = [ + uuid.uuid4().hex[0:16], + uuid.uuid4().hex[0:16], + uuid.uuid4().hex[0:16], + ] + job = self.sim_backend.run(self.bell, job_tags=job_tags) + + no_rjobs_tags = [job_tags[0:1] + ["phantom_tags"], ["phantom_tag"]] + for tags in no_rjobs_tags: + rjobs = self.service.jobs(job_tags=tags, created_after=self.last_week) + self.assertEqual(len(rjobs), 0, "Expected job {}, got {}".format(job.job_id(), rjobs)) + + has_rjobs_tags = [job_tags, job_tags[1:3]] + for tags in has_rjobs_tags: + with self.subTest(tags=tags): + rjobs = self.service.jobs( + job_tags=tags, + created_after=self.last_week, + ) + self.assertEqual( + len(rjobs), 1, "Expected job {}, got {}".format(job.job_id(), rjobs) + ) + self.assertEqual(rjobs[0].job_id(), job.job_id()) + # TODO check why this sometimes fails + # self.assertEqual(set(rjobs[0].tags()), set(job_tags)) + + @skip("refresh supported in provider but not in runtime") + def test_job_tags_replace(self): + """Test updating job tags by replacing a job's existing tags.""" + initial_job_tags = [uuid.uuid4().hex[:16]] + job = self.sim_backend.run(self.bell, job_tags=initial_job_tags) + + tags_to_replace_subtests = [ + [], # empty tags. + list("{}_new_tag_{}".format(uuid.uuid4().hex[:5], i) for i in range(2)), # unique tags. + initial_job_tags + ["foo"], + ] + for tags_to_replace in tags_to_replace_subtests: + with self.subTest(tags_to_replace=tags_to_replace): + # Update the job tags. + _ = job.update_tags(new_tags=tags_to_replace) + + # Wait a bit so we don't get cached results. + time.sleep(2) + job.refresh() + + self.assertEqual(set(tags_to_replace), set(job.tags())) + + def test_invalid_job_tags(self): + """Test using job tags with an and operator.""" + self.assertRaises(IBMBackendValueError, self.sim_backend.run, self.bell, job_tags={"foo"}) + self.assertRaises( + IBMBackendValueError, + self.service.jobs, + job_tags=[1, 2, 3], + ) + + def test_cost_estimation(self): + """Test cost estimation is returned correctly.""" + self.assertTrue(self.sim_job.usage_estimation) + self.assertIn("quantum_seconds", self.sim_job.usage_estimation) From e72524de2c5ab3068ac9e39c9f06090b9f172382 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 17 Oct 2023 12:36:57 +0000 Subject: [PATCH 16/47] Added test class TestBackendRunInSession for backend.run with session --- qiskit_ibm_runtime/ibm_backend.py | 3 +- test/integration/test_session.py | 74 +++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 87cb024fd..49057daae 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -851,7 +851,8 @@ def cancel_session(self) -> None: if self._session: self._session.cancel() if self._session.session_id: - self.provider._runtime_client.close_session(self._session.session_id) + self._api_client.close_session(self._session.session_id) + self._session = None def _deprecate_id_instruction(self, circuits: List[QuantumCircuit]) -> List[QuantumCircuit]: diff --git a/test/integration/test_session.py b/test/integration/test_session.py index 5a808e989..6789327c5 100644 --- a/test/integration/test_session.py +++ b/test/integration/test_session.py @@ -16,6 +16,7 @@ from qiskit.quantum_info import SparsePauliOp from qiskit.test.reference_circuits import ReferenceCircuits from qiskit.primitives import EstimatorResult, SamplerResult +from qiskit.result import Result from qiskit_ibm_runtime import Estimator, Session, Sampler, Options @@ -98,3 +99,76 @@ def test_session_from_id(self, service): sampler = Sampler(session=new_session) job = sampler.run(ReferenceCircuits.bell(), shots=400) self.assertEqual(session_id, job.session_id) + + +class TestBackendRunInSession(IBMIntegrationTestCase): + """Integration tests for Backend.run in Session.""" + + def test_session_id(self): + """Test that session_id is updated correctly and maintained throughout the session""" + backend = self.service.get_backend("ibmq_qasm_simulator") + backend.open_session() + self.assertEqual(backend.session.session_id, None) + self.assertTrue(backend.session.active) + job1 = backend.run(ReferenceCircuits.bell()) + self.assertEqual(job1._session_id, job1.job_id()) + job2 = backend.run(ReferenceCircuits.bell()) + self.assertFalse(job2._session_id == job2.job_id()) + + def test_backend_run_with_session(self): + """Test that 'shots' parameter is transferred correctly""" + shots = 1000 + backend = self.service.backend("ibmq_qasm_simulator") + backend.open_session() + result = backend.run(circuits=ReferenceCircuits.bell(), shots=shots).result() + self.assertIsInstance(result, Result) + self.assertEqual(result.results[0].shots, shots) + self.assertAlmostEqual( + result.get_counts()["00"], result.get_counts()["11"], delta=shots / 10 + ) + + def test_session_cancel(self): + """Test closing a session""" + backend = self.service.backend("ibmq_qasm_simulator") + backend.open_session() + self.assertTrue(backend.session.active) + backend.cancel_session() + self.assertIsNone(backend.session) + + def test_run_after_cancel(self): + """Test running after session is cancelled.""" + backend = self.service.backend("ibmq_qasm_simulator") + job1 = backend.run(circuits=ReferenceCircuits.bell()) + self.assertIsNone(backend.session) + self.assertIsNone(job1._session_id) + + backend.open_session() + job2 = backend.run(ReferenceCircuits.bell()) + self.assertIsNotNone(job2._session_id) + backend.cancel_session() + + job3 = backend.run(circuits=ReferenceCircuits.bell()) + self.assertIsNone(backend.session) + self.assertIsNone(job3._session_id) + + def test_session_as_context_manager(self): + """Test session as a context manager""" + backend = self.service.backend("ibmq_qasm_simulator") + + with backend.open_session() as session: + job1 = backend.run(ReferenceCircuits.bell()) + session_id = session.session_id + self.assertEqual(session_id, job1.job_id()) + job2 = backend.run(ReferenceCircuits.bell()) + self.assertFalse(session_id == job2.job_id()) + + def test_run_after_cancel_as_context_manager(self): + """Test run after cancel in context manager""" + backend = self.service.backend("ibmq_qasm_simulator") + with backend.open_session() as session: + _ = backend.run(ReferenceCircuits.bell()) + self.assertEqual(backend.session, session) + backend.cancel_session() + job = backend.run(circuits=ReferenceCircuits.bell()) + self.assertIsNone(backend.session) + self.assertIsNone(job._session_id) From 03c8fb8550253fb7cfb915ea6ec71a0296b55188 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 17 Oct 2023 14:45:09 +0000 Subject: [PATCH 17/47] Cleaning up code --- qiskit_ibm_runtime/ibm_backend.py | 9 +++------ test/decorators.py | 1 - test/integration/test_backend.py | 1 - test/integration/test_ibm_job.py | 6 ++---- 4 files changed, 5 insertions(+), 12 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 49057daae..abdb8a4a4 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -698,13 +698,10 @@ def run( warnings.warn(f"The backend {self.name} is currently paused.") program_id = str(run_config.get("program_id", "")) - if not program_id: - if dynamic: - program_id = QASM3RUNNERPROGRAMID - else: - program_id = QOBJRUNNERPROGRAMID - else: + if program_id: run_config.pop("program_id", None) + else: + program_id = QASM3RUNNERPROGRAMID if dynamic else QOBJRUNNERPROGRAMID image: Optional[str] = run_config.get("image", None) # type: ignore if image is not None: diff --git a/test/decorators.py b/test/decorators.py index 3471d78be..b81c9b750 100644 --- a/test/decorators.py +++ b/test/decorators.py @@ -183,7 +183,6 @@ def _decorator(func): @integration_test_setup() def _wrapper(self, *args, **kwargs): dependencies: IntegrationTestDependencies = kwargs["dependencies"] - # provider: IBMProvider = dependencies.provider service = dependencies.service if not staging: raise SkipTest("Tests not supported on staging.") diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index 74767c9ae..2af2ad5d5 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -82,7 +82,6 @@ def setUpClass(cls): # pylint: disable=arguments-differ # pylint: disable=no-value-for-parameter super().setUpClass() - cls.service = cls.dependencies.service if cls.dependencies.channel == "ibm_cloud": # TODO use real device when cloud supports it cls.backend = cls.dependencies.service.least_busy(min_num_qubits=5) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index 4a0f08c10..bb2cc0c1e 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -35,7 +35,7 @@ integration_test_setup_with_backend, ) from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob -from ..ibm_test_case import IBMTestCase +from ..ibm_test_case import IBMIntegrationTestCase from ..utils import ( most_busy_backend, cancel_job_safe, @@ -43,7 +43,7 @@ ) -class TestIBMJob(IBMTestCase): +class TestIBMJob(IBMIntegrationTestCase): """Test ibm_job module.""" sim_backend: IBMBackend @@ -58,12 +58,10 @@ def setUpClass(cls, backend: IBMBackend, dependencies: IntegrationTestDependenci """Initial class level setup.""" # pylint: disable=arguments-differ super().setUpClass() - cls.service = dependencies.service cls.sim_backend = dependencies.service.backend( "ibmq_qasm_simulator", instance=dependencies.instance ) cls.real_device_backend = backend - cls.dependencies = dependencies cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) cls.sim_job = cls.sim_backend.run(cls.bell) cls.last_month = datetime.now() - timedelta(days=30) From ac1e4643d6e63913fe868f8e55f4a02399959897 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 17 Oct 2023 14:59:19 +0000 Subject: [PATCH 18/47] lint, added missing parameter --- test/integration/test_ibm_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index bb2cc0c1e..f6fd9f170 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -57,7 +57,7 @@ class TestIBMJob(IBMIntegrationTestCase): def setUpClass(cls, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: """Initial class level setup.""" # pylint: disable=arguments-differ - super().setUpClass() + super().setUpClass(dependencies=dependencies) cls.sim_backend = dependencies.service.backend( "ibmq_qasm_simulator", instance=dependencies.instance ) From 535c875cc85a5f1138b34a080f681b7db656edbf Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 19 Oct 2023 13:39:20 +0000 Subject: [PATCH 19/47] Added more tests from qiskit-ibm-provider --- test/integration/test_ibm_qasm_simulator.py | 178 ++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 test/integration/test_ibm_qasm_simulator.py diff --git a/test/integration/test_ibm_qasm_simulator.py b/test/integration/test_ibm_qasm_simulator.py new file mode 100644 index 000000000..957c759d9 --- /dev/null +++ b/test/integration/test_ibm_qasm_simulator.py @@ -0,0 +1,178 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Test IBM Quantum online QASM simulator.""" + +from unittest import mock +from unittest import skip + +from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister +from qiskit.compiler import transpile +from qiskit.providers.aer.noise import ( # pylint: disable=import-error,no-name-in-module + NoiseModel, +) +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_runtime import IBMBackend +from ..decorators import ( + integration_test_setup_with_backend, + IntegrationTestDependencies, +) +from ..ibm_test_case import IBMIntegrationTestCase + + +class TestIBMQasmSimulator(IBMIntegrationTestCase): + """Test IBM Quantum QASM Simulator.""" + + @integration_test_setup_with_backend(simulator=False) + def setUp( + self, backend: IBMBackend, dependencies: IntegrationTestDependencies + ) -> None: + """Initial test setup.""" + # pylint: disable=arguments-differ + super().setUp() + self.sim_backend = self.service.backend( + "ibmq_qasm_simulator", instance=dependencies.instance + ) + self.real_device_backend = backend + + def test_execute_one_circuit_simulator_online(self): + """Test execute_one_circuit_simulator_online.""" + quantum_register = QuantumRegister(1) + classical_register = ClassicalRegister(1) + quantum_circuit = QuantumCircuit( + quantum_register, classical_register, name="qc" + ) + quantum_circuit.h(quantum_register[0]) + quantum_circuit.measure(quantum_register[0], classical_register[0]) + circs = transpile(quantum_circuit, backend=self.sim_backend) + shots = 1024 + job = self.sim_backend.run(circs, shots=shots) + result = job.result() + counts = result.get_counts(quantum_circuit) + target = {"0": shots / 2, "1": shots / 2} + threshold = 0.1 * shots + self.assertDictAlmostEqual(counts, target, threshold) + + def test_execute_several_circuits_simulator_online(self): + """Test execute_several_circuits_simulator_online.""" + quantum_register = QuantumRegister(2) + classical_register = ClassicalRegister(2) + qcr1 = QuantumCircuit(quantum_register, classical_register, name="qc1") + qcr2 = QuantumCircuit(quantum_register, classical_register, name="qc2") + qcr1.h(quantum_register) + qcr2.h(quantum_register[0]) + qcr2.cx(quantum_register[0], quantum_register[1]) + qcr1.measure(quantum_register[0], classical_register[0]) + qcr1.measure(quantum_register[1], classical_register[1]) + qcr2.measure(quantum_register[0], classical_register[0]) + qcr2.measure(quantum_register[1], classical_register[1]) + shots = 1024 + circs = transpile([qcr1, qcr2], backend=self.sim_backend) + job = self.sim_backend.run(circs, shots=shots) + result = job.result() + counts1 = result.get_counts(qcr1) + counts2 = result.get_counts(qcr2) + target1 = {"00": shots / 4, "01": shots / 4, "10": shots / 4, "11": shots / 4} + target2 = {"00": shots / 2, "11": shots / 2} + threshold = 0.1 * shots + self.assertDictAlmostEqual(counts1, target1, threshold) + self.assertDictAlmostEqual(counts2, target2, threshold) + + def test_online_qasm_simulator_two_registers(self): + """Test online_qasm_simulator_two_registers.""" + qr1 = QuantumRegister(2) + cr1 = ClassicalRegister(2) + qr2 = QuantumRegister(2) + cr2 = ClassicalRegister(2) + qcr1 = QuantumCircuit(qr1, qr2, cr1, cr2, name="circuit1") + qcr2 = QuantumCircuit(qr1, qr2, cr1, cr2, name="circuit2") + qcr1.x(qr1[0]) + qcr2.x(qr2[1]) + qcr1.measure(qr1[0], cr1[0]) + qcr1.measure(qr1[1], cr1[1]) + qcr1.measure(qr2[0], cr2[0]) + qcr1.measure(qr2[1], cr2[1]) + qcr2.measure(qr1[0], cr1[0]) + qcr2.measure(qr1[1], cr1[1]) + qcr2.measure(qr2[0], cr2[0]) + qcr2.measure(qr2[1], cr2[1]) + circs = transpile([qcr1, qcr2], self.sim_backend) + job = self.sim_backend.run(circs, shots=1024) + result = job.result() + result1 = result.get_counts(qcr1) + result2 = result.get_counts(qcr2) + self.assertEqual(result1, {"00 01": 1024}) + self.assertEqual(result2, {"10 00": 1024}) + + @skip("TODO refactor to use backend._runtime_run") + def test_new_sim_method(self): + """Test new simulator methods.""" + + def _new_submit(qobj, *args, **kwargs): + # pylint: disable=unused-argument + self.assertEqual( + qobj.config.method, "extended_stabilizer", f"qobj header={qobj.header}" + ) + return mock.MagicMock() + + backend = self.sim_backend + + sim_method = backend._configuration._data.get("simulation_method", None) + submit_fn = backend._submit_job + + try: + backend._configuration._data["simulation_method"] = "extended_stabilizer" + backend._submit_job = _new_submit + circ = transpile(ReferenceCircuits.bell(), backend=backend) + backend.run(circ, header={"test": "circuits"}) + finally: + backend._configuration._data["simulation_method"] = sim_method + backend._submit_job = submit_fn + + @skip("TODO refactor to use backend._runtime_run") + def test_new_sim_method_no_overwrite(self): + """Test custom method option is not overwritten.""" + + def _new_submit(qobj, *args, **kwargs): + # pylint: disable=unused-argument + self.assertEqual( + qobj.config.method, "my_method", f"qobj header={qobj.header}" + ) + return mock.MagicMock() + + backend = self.sim_backend + + sim_method = backend._configuration._data.get("simulation_method", None) + submit_fn = backend._submit_job + + try: + backend._configuration._data["simulation_method"] = "extended_stabilizer" + backend._submit_job = _new_submit + circ = transpile(ReferenceCircuits.bell(), backend=backend) + backend.run(circ, method="my_method", header={"test": "circuits"}) + finally: + backend._configuration._data["simulation_method"] = sim_method + backend._submit_job = submit_fn + + # @skip( + # "NoiseModel.from_backend does not currently support V2 Backends. \ + # Skip test until it's fixed in aer." + # ) + def test_simulator_with_noise_model(self): + """Test using simulator with a noise model.""" + noise_model = NoiseModel.from_backend(self.real_device_backend) + result = self.sim_backend.run( + transpile(ReferenceCircuits.bell(), backend=self.sim_backend), + noise_model=noise_model, + ).result() + self.assertTrue(result) From f17312442fc2ec4508d930b3f63308462b1c2ac2 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 19 Oct 2023 13:50:13 +0000 Subject: [PATCH 20/47] Inherit from BaseQiskitTestCase --- test/ibm_test_case.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/ibm_test_case.py b/test/ibm_test_case.py index 2878597d4..929e7268e 100644 --- a/test/ibm_test_case.py +++ b/test/ibm_test_case.py @@ -16,12 +16,13 @@ import copy import logging import inspect -import unittest from contextlib import suppress from collections import defaultdict from typing import DefaultDict, Dict from qiskit.test.reference_circuits import ReferenceCircuits +from qiskit.test.base import BaseQiskitTestCase + from qiskit_ibm_runtime import QISKIT_IBM_RUNTIME_LOGGER_NAME from qiskit_ibm_runtime import QiskitRuntimeService, Sampler, Options @@ -30,7 +31,7 @@ from .templates import RUNTIME_PROGRAM, RUNTIME_PROGRAM_METADATA, PROGRAM_PREFIX -class IBMTestCase(unittest.TestCase): +class IBMTestCase(BaseQiskitTestCase): """Custom TestCase for use with qiskit-ibm-runtime.""" log: logging.Logger From 757af8e6499e7d534cb986a0977355bbe732c626 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 19 Oct 2023 16:59:03 +0000 Subject: [PATCH 21/47] Enabled several tests --- test/integration/test_ibm_job_attributes.py | 34 ++++++--------------- test/integration/test_ibm_qasm_simulator.py | 12 ++------ 2 files changed, 13 insertions(+), 33 deletions(-) diff --git a/test/integration/test_ibm_job_attributes.py b/test/integration/test_ibm_job_attributes.py index 864478f1a..167586f4f 100644 --- a/test/integration/test_ibm_job_attributes.py +++ b/test/integration/test_ibm_job_attributes.py @@ -35,7 +35,6 @@ IntegrationTestDependencies, integration_test_setup, ) -from ..fake_account_client import BaseFakeAccountClient from ..ibm_test_case import IBMTestCase from ..utils import ( most_busy_backend, @@ -228,50 +227,37 @@ def test_queue_info(self): def test_esp_readout_not_enabled(self): """Test that an error is thrown is ESP readout is used and the backend does not support it.""" - saved_api = self.sim_backend._api_client - try: - self.sim_backend._api_client = BaseFakeAccountClient() - # sim backend does not have ``measure_esp_enabled`` flag: defaults to ``False`` - with self.assertRaises(IBMBackendValueError) as context_manager: - self.sim_backend.run(self.bell, use_measure_esp=True) - self.assertIn( - "ESP readout not supported on this device. Please make sure the flag " - "'use_measure_esp' is unset or set to 'False'.", - context_manager.exception.message, - ) - finally: - self.sim_backend._api_client = saved_api + # sim backend does not have ``measure_esp_enabled`` flag: defaults to ``False`` + with self.assertRaises(IBMBackendValueError) as context_manager: + self.sim_backend.run(self.bell, use_measure_esp=True) + self.assertIn( + "ESP readout not supported on this device. Please make sure the flag " + "'use_measure_esp' is unset or set to 'False'.", + context_manager.exception.message, + ) - @skip("not supported by api") def test_esp_readout_enabled(self): """Test that ESP readout can be used when the backend supports it.""" - saved_api = self.sim_backend._api_client try: - self.sim_backend._api_client = BaseFakeAccountClient() setattr(self.sim_backend._configuration, "measure_esp_enabled", True) job = self.sim_backend.run(self.bell, use_measure_esp=True) self.assertEqual(job.inputs["use_measure_esp"], True) finally: delattr(self.sim_backend._configuration, "measure_esp_enabled") - self.sim_backend._api_client = saved_api - @skip("not supported by api") def test_esp_readout_default_value(self): """Test that ESP readout is set to backend support value if not specified.""" - saved_api = self.sim_backend._api_client try: - self.sim_backend._api_client = BaseFakeAccountClient() # ESP readout not enabled on backend setattr(self.sim_backend._configuration, "measure_esp_enabled", False) job = self.sim_backend.run(self.bell) - self.assertEqual(job.inputs["use_measure_esp"], False) + self.assertIsNone(getattr(job.inputs, "use_measure_esp", None)) # ESP readout enabled on backend setattr(self.sim_backend._configuration, "measure_esp_enabled", True) - job = self.sim_backend.run(self.bell) + job = self.sim_backend.run(self.bell, use_measure_esp=True) self.assertEqual(job.inputs["use_measure_esp"], True) finally: delattr(self.sim_backend._configuration, "measure_esp_enabled") - self.sim_backend._api_client = saved_api def test_job_tags(self): """Test using job tags.""" diff --git a/test/integration/test_ibm_qasm_simulator.py b/test/integration/test_ibm_qasm_simulator.py index 957c759d9..a03bb3697 100644 --- a/test/integration/test_ibm_qasm_simulator.py +++ b/test/integration/test_ibm_qasm_simulator.py @@ -34,9 +34,7 @@ class TestIBMQasmSimulator(IBMIntegrationTestCase): """Test IBM Quantum QASM Simulator.""" @integration_test_setup_with_backend(simulator=False) - def setUp( - self, backend: IBMBackend, dependencies: IntegrationTestDependencies - ) -> None: + def setUp(self, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: """Initial test setup.""" # pylint: disable=arguments-differ super().setUp() @@ -49,9 +47,7 @@ def test_execute_one_circuit_simulator_online(self): """Test execute_one_circuit_simulator_online.""" quantum_register = QuantumRegister(1) classical_register = ClassicalRegister(1) - quantum_circuit = QuantumCircuit( - quantum_register, classical_register, name="qc" - ) + quantum_circuit = QuantumCircuit(quantum_register, classical_register, name="qc") quantum_circuit.h(quantum_register[0]) quantum_circuit.measure(quantum_register[0], classical_register[0]) circs = transpile(quantum_circuit, backend=self.sim_backend) @@ -145,9 +141,7 @@ def test_new_sim_method_no_overwrite(self): def _new_submit(qobj, *args, **kwargs): # pylint: disable=unused-argument - self.assertEqual( - qobj.config.method, "my_method", f"qobj header={qobj.header}" - ) + self.assertEqual(qobj.config.method, "my_method", f"qobj header={qobj.header}") return mock.MagicMock() backend = self.sim_backend From 862723efba867119698fbbdf21547a74bf5a1c90 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 22 Oct 2023 08:59:31 +0000 Subject: [PATCH 22/47] removed method _deprecate_id_instruction --- qiskit_ibm_runtime/ibm_backend.py | 62 ------------------------------- test/integration/test_backend.py | 30 --------------- 2 files changed, 92 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index e4017401f..18b0fd265 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -715,8 +715,6 @@ def run( if isinstance(shots, float): shots = int(shots) - if not self.configuration().simulator: - circuits = self._deprecate_id_instruction(circuits) run_config_dict = self._get_run_config( program_id=program_id, @@ -852,66 +850,6 @@ def cancel_session(self) -> None: self._session = None - def _deprecate_id_instruction(self, circuits: List[QuantumCircuit]) -> List[QuantumCircuit]: - """Raise a DeprecationWarning if any circuit contains an 'id' instruction. - - Additionally, if 'delay' is a 'supported_instruction', replace each 'id' - instruction (in-place) with the equivalent ('sx'-length) 'delay' instruction. - - Args: - circuits: The individual or list of :class:`~qiskit.circuits.QuantumCircuit` - passed to :meth:`IBMBackend.run()`. Modified in-place. - - Returns: - A modified copy of the original circuit where 'id' instructions are replaced with - 'delay' instructions. A copy is used so the original circuit is not modified. - If there are no 'id' instructions or 'delay' is not supported, return the original circuit. - """ - - id_support = "id" in getattr(self.configuration(), "basis_gates", []) - delay_support = "delay" in getattr(self.configuration(), "supported_instructions", []) - - if not delay_support: - return circuits - - circuit_has_id = any( - instr.name == "id" - for circuit in circuits - if isinstance(circuit, QuantumCircuit) - for instr, qargs, cargs in circuit.data - ) - if not circuit_has_id: - return circuits - if not self.id_warning_issued: - if id_support and delay_support: - warnings.warn( - "Support for the 'id' instruction has been deprecated " - "from IBM hardware backends. Any 'id' instructions " - "will be replaced with their equivalent 'delay' instruction. " - "Please use the 'delay' instruction instead.", - DeprecationWarning, - stacklevel=4, - ) - else: - warnings.warn( - "Support for the 'id' instruction has been removed " - "from IBM hardware backends. Any 'id' instructions " - "will be replaced with their equivalent 'delay' instruction. " - "Please use the 'delay' instruction instead.", - DeprecationWarning, - stacklevel=4, - ) - - self.id_warning_issued = True - - # Make sure we don't mutate user's input circuits - circuits = copy.deepcopy(circuits) - # Convert id gates to delays. - pm = PassManager(ConvertIdToDelay(self.target.durations())) # pylint: disable=invalid-name - circuits = pm.run(circuits) - - return circuits - class IBMRetiredBackend(IBMBackend): """Backend class interfacing with an IBM Quantum device no longer available.""" diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index 2af2ad5d5..77cd51902 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -236,36 +236,6 @@ def test_paused_backend_warning(self): with self.assertWarns(Warning): backend.run(ReferenceCircuits.bell()) - def test_deprecate_id_instruction(self): - """Test replacement of 'id' Instructions with 'Delay' instructions.""" - circuit_with_id = QuantumCircuit(2) - circuit_with_id.id(0) - circuit_with_id.id(0) - circuit_with_id.id(1) - - config = QasmBackendConfiguration( - basis_gates=["id"], - supported_instructions=["delay"], - dt=0.25, - backend_name="test", - backend_version="0.0", - n_qubits=1, - gates=[], - local=False, - simulator=False, - conditional=False, - open_pulse=False, - memory=False, - max_shots=1, - coupling_map=[], - ) - - with patch.object(self.backend, "configuration", return_value=config): - with self.assertWarnsRegex(DeprecationWarning, r"'id' instruction"): - mutated_circuit = self.backend._deprecate_id_instruction([circuit_with_id]) - self.assertEqual(mutated_circuit[0].count_ops(), {"delay": 3}) - self.assertEqual(circuit_with_id.count_ops(), {"id": 3}) - def test_backend_wrong_instance(self): """Test that an error is raised when retrieving a backend not in the instance.""" backends = self.service.backends() From 3697ec362a0807b3e7c9cc9ea06d5d4fe2dda44c Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 22 Oct 2023 09:21:51 +0000 Subject: [PATCH 23/47] lint, unused imports --- qiskit_ibm_runtime/ibm_backend.py | 3 - qiskit_ibm_runtime/transpiler/__init__.py | 31 - .../transpiler/passes/__init__.py | 36 - .../transpiler/passes/basis/__init__.py | 23 - .../passes/basis/convert_id_to_delay.py | 87 --- .../transpiler/passes/scheduling/__init__.py | 397 ----------- .../passes/scheduling/block_base_padder.py | 620 ----------------- .../passes/scheduling/dynamical_decoupling.py | 553 --------------- .../transpiler/passes/scheduling/pad_delay.py | 78 --- .../transpiler/passes/scheduling/scheduler.py | 643 ------------------ .../transpiler/passes/scheduling/utils.py | 287 -------- qiskit_ibm_runtime/transpiler/plugin.py | 98 --- test/integration/test_backend.py | 2 - 13 files changed, 2858 deletions(-) delete mode 100644 qiskit_ibm_runtime/transpiler/__init__.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/__init__.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/__init__.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py delete mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py delete mode 100644 qiskit_ibm_runtime/transpiler/plugin.py diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 18b0fd265..552a2b3fb 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -12,7 +12,6 @@ """Module for interfacing with an IBM Quantum Backend.""" -import copy import logging from typing import Iterable, Union, Optional, Any, List, Dict from datetime import datetime as python_datetime @@ -23,7 +22,6 @@ from qiskit import QuantumCircuit from qiskit.qobj.utils import MeasLevel, MeasReturnType from qiskit.tools.events.pubsub import Publisher -from qiskit.transpiler.passmanager import PassManager from qiskit.providers.backend import BackendV2 as Backend from qiskit.providers.options import Options @@ -69,7 +67,6 @@ from .utils.backend_converter import ( convert_to_target, ) -from .transpiler.passes.basis.convert_id_to_delay import ConvertIdToDelay logger = logging.getLogger(__name__) diff --git a/qiskit_ibm_runtime/transpiler/__init__.py b/qiskit_ibm_runtime/transpiler/__init__.py deleted file mode 100644 index d6e62daa4..000000000 --- a/qiskit_ibm_runtime/transpiler/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -""" -==================================================================== -IBM Backend Transpiler Tools (:mod:`qiskit_ibm_provider.transpiler`) -==================================================================== - -A collection of transpiler tools for working with IBM Quantum's -next-generation backends that support advanced "dynamic circuit" -capabilities. Ie., circuits with support for classical -compute and control-flow/feedback based off of measurement results. - -Transpiler Passes -================== - -.. autosummary:: - :toctree: ../stubs/ - - passes - -""" diff --git a/qiskit_ibm_runtime/transpiler/passes/__init__.py b/qiskit_ibm_runtime/transpiler/passes/__init__.py deleted file mode 100644 index 2fe16514c..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -""" -================================================================ -Transpiler Passes (:mod:`qiskit_ibm_provider.transpiler.passes`) -================================================================ - -.. currentmodule:: qiskit_ibm_provider.transpiler.passes - -A collection of transpiler passes for IBM backends. - -.. autosummary:: - :toctree: ../stubs/ - - basis - scheduling - - -""" - -from .basis import ConvertIdToDelay - -# circuit scheduling -from .scheduling import ASAPScheduleAnalysis -from .scheduling import PadDynamicalDecoupling -from .scheduling import PadDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py deleted file mode 100644 index 0a71af010..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -""" -========================================================== -Basis (:mod:`qiskit_ibm_provider.transpiler.passes.basis`) -========================================================== - -.. currentmodule:: qiskit_ibm_provider.transpiler.passes.basis - -Passes to layout circuits to IBM backend's instruction sets. -""" - -from .convert_id_to_delay import ConvertIdToDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py deleted file mode 100644 index 3906d9046..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py +++ /dev/null @@ -1,87 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Pass to convert Id gate operations to a delay instruction.""" - -from typing import Dict - -from qiskit.converters import dag_to_circuit, circuit_to_dag - -from qiskit.circuit import ControlFlowOp -from qiskit.circuit import Delay -from qiskit.circuit.library import IGate -from qiskit.dagcircuit import DAGCircuit -from qiskit.transpiler.basepasses import TransformationPass -from qiskit.transpiler.instruction_durations import InstructionDurations - - -class ConvertIdToDelay(TransformationPass): - """Convert :class:`qiskit.circuit.library.standard_gates.IGate` to - a delay of the corresponding length. - """ - - def __init__(self, durations: InstructionDurations, gate: str = "sx"): - """Convert :class:`qiskit.circuit.library.IGate` to a - Convert :class:`qiskit.circuit.Delay`. - - Args: - duration: Duration of the delay to replace the identity gate with. - gate: Single qubit gate to extract duration from. - """ - self.durations = durations - self.gate = gate - self._cached_durations: Dict[int, int] = {} - - super().__init__() - - def run(self, dag: DAGCircuit) -> DAGCircuit: - self._run_inner(dag) - return dag - - def _run_inner(self, dag: DAGCircuit) -> bool: - """Run the pass on one :class:`.DAGCircuit`, mutating it. Returns ``True`` if the circuit - was modified and ``False`` if not.""" - modified = False - qubit_index_map = {bit: index for index, bit in enumerate(dag.qubits)} - for node in dag.op_nodes(): - if isinstance(node.op, ControlFlowOp): - modified_blocks = False - new_dags = [] - for block in node.op.blocks: - new_dag = circuit_to_dag(block) - modified_blocks |= self._run_inner(new_dag) - new_dags.append(new_dag) - if not modified_blocks: - continue - dag.substitute_node( - node, - node.op.replace_blocks(dag_to_circuit(block) for block in new_dags), - inplace=True, - ) - elif isinstance(node.op, IGate): - delay_op = Delay(self._get_duration(qubit_index_map[node.qargs[0]])) - dag.substitute_node(node, delay_op, inplace=True) - - modified = True - - return modified - - def _get_duration(self, qubit: int) -> int: - """Get the duration of a gate in dt.""" - duration = self._cached_durations.get(qubit, None) - if duration: - return duration - - duration = self.durations.get(self.gate, qubit) - self._cached_durations[qubit] = duration - - return duration diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py deleted file mode 100644 index c3017e9bc..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py +++ /dev/null @@ -1,397 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -""" -==================================================================== -Scheduling (:mod:`qiskit_ibm_provider.transpiler.passes.scheduling`) -==================================================================== - -.. currentmodule:: qiskit_ibm_provider.transpiler.passes.scheduling - -A collection of scheduling passes for working with IBM Quantum's next-generation -backends that support advanced "dynamic circuit" capabilities. Ie., -circuits with support for classical control-flow/feedback based off -of measurement results. - -.. warning:: - You should not mix these scheduling passes with Qiskit's builtin scheduling - passes as they will negatively interact with the scheduling routines for - dynamic circuits. This includes setting ``scheduling_method`` in - :func:`~qiskit.compiler.transpile` or - :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. - -Below we demonstrate how to schedule and pad a teleportation circuit with delays -for a dynamic circuit backend's execution model: - -.. jupyter-execute:: - - from qiskit.circuit import ClassicalRegister, QuantumCircuit, QuantumRegister - from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager - from qiskit.transpiler.passmanager import PassManager - - from qiskit_ibm_provider.transpiler.passes.scheduling import DynamicCircuitInstructionDurations - from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis - from qiskit_ibm_provider.transpiler.passes.scheduling import PadDelay - from qiskit.providers.fake_provider import FakeJakarta - - - backend = FakeJakarta() - - # Temporary workaround for mock backends. For real backends this is not required. - backend.configuration().basis_gates.append("if_else") - - - # Use this duration class to get appropriate durations for dynamic - # circuit backend scheduling - durations = DynamicCircuitInstructionDurations.from_backend(backend) - # Generate the main Qiskit transpile passes. - pm = generate_preset_pass_manager(optimization_level=1, backend=backend) - # Configure the as-late-as-possible scheduling pass - pm.scheduling = PassManager([ALAPScheduleAnalysis(durations), PadDelay()]) - - qr = QuantumRegister(3) - crz = ClassicalRegister(1, name="crz") - crx = ClassicalRegister(1, name="crx") - result = ClassicalRegister(1, name="result") - - teleport = QuantumCircuit(qr, crz, crx, result, name="Teleport") - - teleport.h(qr[1]) - teleport.cx(qr[1], qr[2]) - teleport.cx(qr[0], qr[1]) - teleport.h(qr[0]) - teleport.measure(qr[0], crz) - teleport.measure(qr[1], crx) - with teleport.if_test((crz, 1)): - teleport.z(qr[2]) - with teleport.if_test((crx, 1)): - teleport.x(qr[2]) - teleport.measure(qr[2], result) - - # Transpile. - scheduled_teleport = pm.run(teleport) - - scheduled_teleport.draw(output="mpl") - - -Instead of padding with delays we may also insert a dynamical decoupling sequence -using the :class:`PadDynamicalDecoupling` pass as shown below: - -.. jupyter-execute:: - - from qiskit.circuit.library import XGate - - from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling - - - dd_sequence = [XGate(), XGate()] - - pm = generate_preset_pass_manager(optimization_level=1, backend=backend) - pm.scheduling = PassManager( - [ - ALAPScheduleAnalysis(durations), - PadDynamicalDecoupling(durations, dd_sequence), - ] - ) - - dd_teleport = pm.run(teleport) - - dd_teleport.draw(output="mpl") - -When compiling a circuit with Qiskit, it is more efficient and more robust to perform all the -transformations in a single transpilation. This has been done above by extending Qiskit's preset -pass managers. For example, Qiskit's :func:`~qiskit.compiler.transpile` function internally builds -its pass set by using :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. -This returns instances of :class:`~qiskit.transpiler.StagedPassManager`, which can be extended. - - -Scheduling old format ``c_if`` conditioned gates ------------------------------------------------- - -Scheduling with old format ``c_if`` conditioned gates is not supported. - -.. jupyter-execute:: - - qc_c_if = QuantumCircuit(1, 1) - qc_c_if.x(0).c_if(0, 1) - qc_c_if.draw(output="mpl") - -The :class:`.IBMBackend` configures a translation plugin -:class:`.IBMTranslationPlugin` to automatically -apply transformations and optimizations for IBM hardware backends when invoking -:func:`~qiskit.compiler.transpile`. This will automatically convert all old style ``c_if`` -conditioned gates to new-style control-flow. -We may then schedule the transpiled circuit without further modification. - -.. jupyter-execute:: - - # Temporary workaround for mock backends. For real backends this is not required. - backend.get_translation_stage_plugin = lambda: "ibm_dynamic_circuits" - - pm = generate_preset_pass_manager(optimization_level=1, backend=backend) - pm.scheduling = PassManager( - [ - ALAPScheduleAnalysis(durations), - PadDynamicalDecoupling(durations, dd_sequence), - ] - ) - - qc_if_dd = pm.run(qc_c_if, backend) - qc_if_dd.draw(output="mpl") - - -If you are not using the transpiler plugin stages to -work around this please manually run the pass -:class:`qiskit.transpiler.passes.ConvertConditionsToIfOps` -prior to your scheduling pass. - -.. jupyter-execute:: - - from qiskit.transpiler.passes import ConvertConditionsToIfOps - - pm = generate_preset_pass_manager(optimization_level=1, backend=backend) - pm.scheduling = PassManager( - [ - ConvertConditionsToIfOps(), - ALAPScheduleAnalysis(durations), - PadDelay(), - ] - ) - - qc_if_dd = pm.run(qc_c_if) - qc_if_dd.draw(output="mpl") - - -Exploiting IBM backend's local parallel "fast-path" ---------------------------------------------------- - -IBM quantum hardware supports a localized "fast-path" which enables a block of gates -applied to a *single qubit* that are conditional on an immediately predecessor measurement -*of the same qubit* to be completed with lower latency. The hardware is also -able to do this in *parallel* on disjoint qubits that satisfy this condition. - -For example, the conditional gates below are performed in parallel with lower latency -as the measurements flow directly into the conditional blocks which in turn only apply -gates to the same measurement qubit. - -.. jupyter-execute:: - - qc = QuantumCircuit(2, 2) - qc.measure(0, 0) - qc.measure(1, 1) - # Conditional blocks will be performed in parallel in the hardware - with qc.if_test((0, 1)): - qc.x(0) - with qc.if_test((1, 1)): - qc.x(1) - - qc.draw(output="mpl") - - -The circuit below will not use the fast-path as the conditional gate is -on a different qubit than the measurement qubit. - -.. jupyter-execute:: - - qc = QuantumCircuit(2, 2) - qc.measure(0, 0) - with qc.if_test((0, 1)): - qc.x(1) - - qc.draw(output="mpl") - -Similarly, the circuit below contains gates on multiple qubits -and will not be performed using the fast-path. - -.. jupyter-execute:: - - qc = QuantumCircuit(2, 2) - qc.measure(0, 0) - with qc.if_test((0, 1)): - qc.x(0) - qc.x(1) - - qc.draw(output="mpl") - -A fast-path block may contain multiple gates as long as they are on the fast-path qubit. -If there are multiple fast-path blocks being performed in parallel each block will be -padded out to the duration of the longest block. - -.. jupyter-execute:: - - qc = QuantumCircuit(2, 2) - qc.measure(0, 0) - qc.measure(1, 1) - # Conditional blocks will be performed in parallel in the hardware - with qc.if_test((0, 1)): - qc.x(0) - # Will be padded out to a duration of 1600 on the backend. - with qc.if_test((1, 1)): - qc.delay(1600, 1) - - qc.draw(output="mpl") - -This behavior is also applied to the else condition of a fast-path eligible branch. - -.. jupyter-execute:: - - qc = QuantumCircuit(1, 1) - qc.measure(0, 0) - # Conditional blocks will be performed in parallel in the hardware - with qc.if_test((0, 1)) as else_: - qc.x(0) - # Will be padded out to a duration of 1600 on the backend. - with else_: - qc.delay(1600, 0) - - qc.draw(output="mpl") - - -If a single measurement result is used with several conditional blocks, if there is a fast-path -eligible block it will be applied followed by the non-fast-path blocks which will execute with -the standard higher latency conditional branch. - -.. jupyter-execute:: - - qc = QuantumCircuit(2, 2) - qc.measure(0, 0) - # Conditional blocks will be performed in parallel in the hardware - with qc.if_test((0, 1)): - # Uses fast-path - qc.x(0) - with qc.if_test((0, 1)): - # Does not use fast-path - qc.x(1) - - qc.draw(output="mpl") - -If you wish to prevent the usage of the fast-path you may insert a barrier between the measurement and -the conditional branch. - -.. jupyter-execute:: - - qc = QuantumCircuit(1, 2) - qc.measure(0, 0) - # Barrier prevents the fast-path. - qc.barrier() - with qc.if_test((0, 1)): - qc.x(0) - - qc.draw(output="mpl") - -Conditional measurements are not eligible for the fast-path. - -.. jupyter-execute:: - - qc = QuantumCircuit(1, 2) - qc.measure(0, 0) - with qc.if_test((0, 1)): - # Does not use the fast-path - qc.measure(0, 1) - - qc.draw(output="mpl") - -Similarly nested control-flow is not eligible. - -.. jupyter-execute:: - - qc = QuantumCircuit(1, 1) - qc.measure(0, 0) - with qc.if_test((0, 1)): - # Does not use the fast-path - qc.x(0) - with qc.if_test((0, 1)): - qc.x(0) - - qc.draw(output="mpl") - - -The scheduler is aware of the fast-path behavior and will not insert delays on idle qubits -in blocks that satisfy the fast-path conditions so as to avoid preventing the backend -compiler from performing the necessary optimizations to utilize the fast-path. If -there are fast-path blocks that will be performed in parallel they currently *will not* -be padded out by the scheduler to ensure they are of the same duration in Qiskit - -.. jupyter-execute:: - - dd_sequence = [XGate(), XGate()] - - pm = PassManager( - [ - ALAPScheduleAnalysis(durations), - PadDynamicalDecoupling(durations, dd_sequence), - ] - ) - - qc = QuantumCircuit(2, 2) - qc.measure(0, 0) - qc.measure(1, 1) - with qc.if_test((0, 1)): - qc.x(0) - # Is currently not padded to ensure - # a duration of 1000. If you desire - # this you would need to manually add - # qc.delay(840, 0) - with qc.if_test((1, 1)): - qc.delay(1000, 0) - - - qc.draw(output="mpl") - - qc_dd = pm.run(qc) - - qc_dd.draw(output="mpl") - -.. note:: - If there are qubits that are *not* involved in a fast-path decision it is not - currently possible to use them in a fast-path branch in parallel with the fast-path - qubits resulting from a measurement. This will be revised in the future as we - further improve these capabilities. - - For example: - - .. jupyter-execute:: - - qc = QuantumCircuit(3, 2) - qc.x(1) - qc.measure(0, 0) - with qc.if_test((0, 1)): - qc.x(0) - # Qubit 1 sits idle throughout the fast-path decision - with qc.if_test((1, 0)): - # Qubit 2 is idle but there is no measurement - # to make it fast-path eligible. This will - # however avoid a communication event in the hardware - # since the condition is compile time evaluated. - qc.x(2) - - qc.draw(output="mpl") - - -Scheduling & Dynamical Decoupling -================================= -.. autosummary:: - :toctree: ../stubs/ - - BlockBasePadder - ALAPScheduleAnalysis - ASAPScheduleAnalysis - DynamicCircuitInstructionDurations - PadDelay - PadDynamicalDecoupling -""" - -from .block_base_padder import BlockBasePadder -from .dynamical_decoupling import PadDynamicalDecoupling -from .pad_delay import PadDelay -from .scheduler import ALAPScheduleAnalysis, ASAPScheduleAnalysis -from .utils import DynamicCircuitInstructionDurations diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py deleted file mode 100644 index 1232750a5..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py +++ /dev/null @@ -1,620 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Padding pass to fill timeslots for IBM (dynamic circuit) backends.""" - -from typing import Dict, Iterable, List, Optional, Union, Set - -from qiskit.circuit import ( - Qubit, - Clbit, - ControlFlowOp, - Gate, - IfElseOp, - Instruction, - Measure, -) -from qiskit.circuit.bit import Bit -from qiskit.circuit.library import Barrier -from qiskit.circuit.delay import Delay -from qiskit.circuit.parameterexpression import ParameterExpression -from qiskit.converters import dag_to_circuit -from qiskit.dagcircuit import DAGCircuit, DAGNode -from qiskit.transpiler.basepasses import TransformationPass -from qiskit.transpiler.exceptions import TranspilerError - -from .utils import block_order_op_nodes - - -class BlockBasePadder(TransformationPass): - """The base class of padding pass. - - This pass requires one of scheduling passes to be executed before itself. - Since there are multiple scheduling strategies, the selection of scheduling - pass is left in the hands of the pass manager designer. - Once a scheduling analysis pass is run, ``node_start_time`` is generated - in the :attr:`property_set`. This information is represented by a python dictionary of - the expected instruction execution times keyed on the node instances. - The padding pass expects all ``DAGOpNode`` in the circuit to be scheduled. - - This base class doesn't define any sequence to interleave, but it manages - the location where the sequence is inserted, and provides a set of information necessary - to construct the proper sequence. Thus, a subclass of this pass just needs to implement - :meth:`_pad` method, in which the subclass constructs a circuit block to insert. - This mechanism removes lots of boilerplate logic to manage whole DAG circuits. - - Note that padding pass subclasses should define interleaving sequences satisfying: - - - Interleaved sequence does not change start time of other nodes - - Interleaved sequence should have total duration of the provided ``time_interval``. - - Any manipulation violating these constraints may prevent this base pass from correctly - tracking the start time of each instruction, - which may result in violation of hardware alignment constraints. - """ - - def __init__(self, schedule_idle_qubits: bool = False) -> None: - self._node_start_time = None - self._node_block_dags = None - self._idle_after: Optional[Dict[Qubit, int]] = None - self._root_dag = None - self._dag = None - self._block_dag = None - self._prev_node: Optional[DAGNode] = None - self._wire_map: Optional[Dict[Bit, Bit]] = None - self._block_duration = 0 - self._current_block_idx = 0 - self._conditional_block = False - self._bit_indices: Optional[Dict[Qubit, int]] = None - # Nodes that the scheduling of this node is tied to. - - self._last_node_to_touch: Optional[Dict[Qubit, DAGNode]] = None - # Last node to touch a bit - - self._fast_path_nodes: Set[DAGNode] = set() - - self._dirty_qubits: Set[Qubit] = set() - # Qubits that are dirty in the circuit. - self._schedule_idle_qubits = schedule_idle_qubits - self._idle_qubits: Set[Qubit] = set() - super().__init__() - - def run(self, dag: DAGCircuit) -> DAGCircuit: - """Run the padding pass on ``dag``. - - Args: - dag: DAG to be checked. - - Returns: - DAGCircuit: DAG with idle time filled with instructions. - - Raises: - TranspilerError: When a particular node is not scheduled, likely some transform pass - is inserted before this node is called. - """ - if not self._schedule_idle_qubits: - self._idle_qubits = set(wire for wire in dag.idle_wires() if isinstance(wire, Qubit)) - self._pre_runhook(dag) - - self._init_run(dag) - - # Trivial wire map at the top-level - wire_map = {wire: wire for wire in dag.wires} - # Top-level dag is the entry block - new_dag = self._visit_block(dag, wire_map) - - return new_dag - - def _init_run(self, dag: DAGCircuit) -> None: - """Setup for initial run.""" - self._node_start_time = self.property_set["node_start_time"].copy() - self._node_block_dags = self.property_set["node_block_dags"] - self._idle_after = {bit: 0 for bit in dag.qubits} - self._current_block_idx = 0 - self._conditional_block = False - self._block_duration = 0 - - # Prepare DAG to pad - self._root_dag = dag - self._dag = self._empty_dag_like(dag) - self._block_dag = self._dag - self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} - self._last_node_to_touch = {} - self._fast_path_nodes = set() - self._dirty_qubits = set() - - self.property_set["node_start_time"].clear() - self._prev_node = None - self._wire_map = {} - - def _empty_dag_like( - self, - dag: DAGCircuit, - pad_wires: bool = True, - wire_map: Optional[Dict[Qubit, Qubit]] = None, - ignore_idle: bool = False, - ) -> DAGCircuit: - """Create an empty dag like the input dag.""" - new_dag = DAGCircuit() - - # Ensure *all* registers are included from the input circuit - # so that they are scheduled in sub-blocks - - # The top-level QuantumCircuit has the full registers available - # Control flow blocks do not get the full register added to the - # block but just the bits. When testing for equivalency the register - # information is taken into account. To work around this we try to - # while enabling generic handling of QuantumCircuits we - # add the register if available and otherwise add the bits directly. - # We need this work around as otherwise the padded circuit will - # not be equivalent to one written manually as bits will not - # be defined on registers like in the test case. - - source_wire_dag = self._root_dag if pad_wires else dag - - # trivial wire map if not provided, or if the top-level dag is used - if not wire_map or pad_wires: - wire_map = {wire: wire for wire in source_wire_dag.wires} - if dag.qregs and self._schedule_idle_qubits or not ignore_idle: - for qreg in source_wire_dag.qregs.values(): - new_dag.add_qreg(qreg) - else: - new_dag.add_qubits( - [ - wire_map[qubit] - for qubit in source_wire_dag.qubits - if qubit not in self._idle_qubits or not ignore_idle - ] - ) - - # Don't add root cargs as these will not be padded. - # Just focus on current block dag. - if dag.cregs: - for creg in dag.cregs.values(): - new_dag.add_creg(creg) - else: - new_dag.add_clbits(dag.clbits) - - new_dag.name = dag.name - new_dag.metadata = dag.metadata - new_dag.unit = self.property_set["time_unit"] or "dt" - if new_dag.unit != "dt": - raise TranspilerError( - 'All blocks must have time units of "dt". ' - "Please run TimeUnitConversion pass prior to padding." - ) - - new_dag.calibrations = dag.calibrations - new_dag.global_phase = dag.global_phase - return new_dag - - def _pre_runhook(self, dag: DAGCircuit) -> None: - """Extra routine inserted before running the padding pass. - - Args: - dag: DAG circuit on which the sequence is applied. - - Raises: - TranspilerError: If the whole circuit or instruction is not scheduled. - """ - if "node_start_time" not in self.property_set: - raise TranspilerError( - f"The input circuit {dag.name} is not scheduled. Call one of scheduling passes " - f"before running the {self.__class__.__name__} pass." - ) - - def _pad( - self, - block_idx: int, - qubit: Qubit, - t_start: int, - t_end: int, - next_node: DAGNode, - prev_node: DAGNode, - ) -> None: - """Interleave instruction sequence in between two nodes. - - .. note:: - If a DAGOpNode is added here, it should update node_start_time property - in the property set so that the added node is also scheduled. - This is achieved by adding operation via :meth:`_apply_scheduled_op`. - - .. note:: - - This method doesn't check if the total duration of new DAGOpNode added here - is identical to the interval (``t_end - t_start``). - A developer of the pass must guarantee this is satisfied. - If the duration is greater than the interval, your circuit may be - compiled down to the target code with extra duration on the backend compiler, - which is then played normally without error. However, the outcome of your circuit - might be unexpected due to erroneous scheduling. - - Args: - block_idx: Execution block index for this node. - qubit: The wire that the sequence is applied on. - t_start: Absolute start time of this interval. - t_end: Absolute end time of this interval. - next_node: Node that follows the sequence. - prev_node: Node ahead of the sequence. - """ - raise NotImplementedError - - def _get_node_duration(self, node: DAGNode) -> int: - """Get the duration of a node.""" - if node.op.condition_bits or isinstance(node.op, ControlFlowOp): - # As we cannot currently schedule through conditionals model - # as zero duration to avoid padding. - return 0 - - indices = [self._bit_indices[qarg] for qarg in self._map_wires(node.qargs)] - - if self._block_dag.has_calibration_for(node): - # If node has calibration, this value should be the highest priority - cal_key = tuple(indices), tuple(float(p) for p in node.op.params) - duration = self._block_dag.calibrations[node.op.name][cal_key].duration - else: - duration = node.op.duration - - if isinstance(duration, ParameterExpression): - raise TranspilerError( - f"Parameterized duration ({duration}) " - f"of {node.op.name} on qubits {indices} is not bounded." - ) - if duration is None: - raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") - - return duration - - def _needs_block_terminating_barrier(self, prev_node: DAGNode, curr_node: DAGNode) -> bool: - # Only barrier if not in fast-path nodes - is_fast_path_node = curr_node in self._fast_path_nodes - - def _is_terminating_barrier(node: DAGNode) -> bool: - return ( - isinstance(node.op, (Barrier, ControlFlowOp)) - and len(node.qargs) == self._block_dag.num_qubits() - ) - - return not ( - prev_node is None - or (isinstance(prev_node.op, ControlFlowOp) and isinstance(curr_node.op, ControlFlowOp)) - or _is_terminating_barrier(prev_node) - or _is_terminating_barrier(curr_node) - or is_fast_path_node - ) - - def _add_block_terminating_barrier( - self, block_idx: int, time: int, current_node: DAGNode, force: bool = False - ) -> None: - """Add a block terminating barrier to prevent topological ordering slide by. - - TODO: Fix by ensuring control-flow is a block terminator in the core circuit IR. - """ - # Only add a barrier to the end if a viable barrier is not already present on all qubits - # Only barrier if not in fast-path nodes - needs_terminating_barrier = True - if not force: - needs_terminating_barrier = self._needs_block_terminating_barrier( - self._prev_node, current_node - ) - - if needs_terminating_barrier: - # Terminate with a barrier to ensure topological ordering does not slide past - if self._schedule_idle_qubits: - barrier = Barrier(self._block_dag.num_qubits()) - qubits = self._block_dag.qubits - else: - barrier = Barrier(self._block_dag.num_qubits() - len(self._idle_qubits)) - qubits = [x for x in self._block_dag.qubits if x not in self._idle_qubits] - - barrier_node = self._apply_scheduled_op( - block_idx, - time, - barrier, - qubits, - [], - ) - barrier_node.op.duration = 0 - - def _visit_block( - self, - block: DAGCircuit, - wire_map: Dict[Qubit, Qubit], - pad_wires: bool = True, - ignore_idle: bool = False, - ) -> DAGCircuit: - # Push the previous block dag onto the stack - prev_node = self._prev_node - self._prev_node = None - prev_wire_map, self._wire_map = self._wire_map, wire_map - - prev_block_dag = self._block_dag - self._block_dag = new_block_dag = self._empty_dag_like( - block, pad_wires, wire_map=wire_map, ignore_idle=ignore_idle - ) - - self._block_duration = 0 - self._conditional_block = False - - for node in block_order_op_nodes(block): - self._visit_node(node) - - # Terminate the block to pad it after scheduling. - prev_block_duration = self._block_duration - prev_block_idx = self._current_block_idx - self._terminate_block(self._block_duration, self._current_block_idx) - - # Edge-case: Add a barrier if the final node is a fast-path - if self._prev_node in self._fast_path_nodes: - self._add_block_terminating_barrier( - prev_block_duration, prev_block_idx, self._prev_node, force=True - ) - - # Pop the previous block dag off the stack restoring it - self._block_dag = prev_block_dag - self._prev_node = prev_node - self._wire_map = prev_wire_map - - return new_block_dag - - def _visit_node(self, node: DAGNode) -> None: - if isinstance(node.op, ControlFlowOp): - if isinstance(node.op, IfElseOp): - self._visit_if_else_op(node) - else: - self._visit_control_flow_op(node) - elif node in self._node_start_time: - if isinstance(node.op, Delay): - self._visit_delay(node) - else: - self._visit_generic(node) - else: - raise TranspilerError( - f"Operation {repr(node)} is likely added after the circuit is scheduled. " - "Schedule the circuit again if you transformed it." - ) - self._prev_node = node - - def _visit_if_else_op(self, node: DAGNode) -> None: - """check if is fast-path eligible otherwise fall back - to standard ControlFlowOp handling.""" - - if self._will_use_fast_path(node): - self._fast_path_nodes.add(node) - self._visit_control_flow_op(node) - - def _will_use_fast_path(self, node: DAGNode) -> bool: - """Check if this conditional operation will be scheduled on the fastpath. - This will happen if - 1. This operation is a direct descendent of a current measurement block to be flushed - 2. The operation only operates on the qubit that is measured. - """ - # Verify IfElseOp has a direct measurement predecessor - condition_bits = node.op.condition_bits - # Fast-path valid only with a single bit. - if not condition_bits or len(condition_bits) > 1: - return False - - bit = condition_bits[0] - last_node, last_node_dag = self._last_node_to_touch.get(bit, (None, None)) - - last_node_in_block = last_node_dag is self._block_dag - - if not ( - last_node_in_block - and isinstance(last_node.op, Measure) - and set(self._map_wires(node.qargs)) == set(self._map_wires(last_node.qargs)) - ): - return False - - # Fast path contents are limited to gates and delays - for block in node.op.blocks: - if not all(isinstance(inst.operation, (Gate, Delay)) for inst in block.data): - return False - return True - - def _visit_control_flow_op(self, node: DAGNode) -> None: - """Visit a control-flow node to pad.""" - - # Control-flow terminator ends scheduling of block currently - block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name - self._terminate_block(t0, block_idx) - self._add_block_terminating_barrier(block_idx, t0, node) - - # Only pad non-fast path nodes - fast_path_node = node in self._fast_path_nodes - - # TODO: This is a hack required to tie nodes of control-flow - # blocks across the scheduler and block_base_padder. This is - # because the current control flow nodes store the block as a - # circuit which is not hashable. For processing we are currently - # required to convert each circuit block to a dag which is inefficient - # and causes node relationships stored in analysis to be lost between - # passes as we are constantly recreating the block dags. - # We resolve this here by extracting the cached dag blocks that were - # stored by the scheduling pass. - new_node_block_dags = [] - for block_idx, _ in enumerate(node.op.blocks): - block_dag = self._node_block_dags[node][block_idx] - inner_wire_map = { - inner: outer - for outer, inner in zip( - self._map_wires(node.qargs + node.cargs), - block_dag.qubits + block_dag.clbits, - ) - } - new_node_block_dags.append( - self._visit_block( - block_dag, - pad_wires=not fast_path_node, - wire_map=inner_wire_map, - ignore_idle=True, - ) - ) - - # Build new control-flow operation containing scheduled blocks - # and apply to the DAG. - new_control_flow_op = node.op.replace_blocks( - dag_to_circuit(block) for block in new_node_block_dags - ) - # Enforce that this control-flow operation contains all wires since it has now been padded - # such that each qubit is scheduled within each block. Don't added all cargs as these will not - # be padded. - if fast_path_node: - padded_qubits = node.qargs - elif not self._schedule_idle_qubits: - padded_qubits = [q for q in self._block_dag.qubits if q not in self._idle_qubits] - else: - padded_qubits = self._block_dag.qubits - self._apply_scheduled_op( - block_idx, - t0, - new_control_flow_op, - padded_qubits, - self._map_wires(node.cargs), - ) - - def _visit_delay(self, node: DAGNode) -> None: - """The padding class considers a delay instruction as idle time - rather than instruction. Delay node is not added so that - we can extract non-delay predecessors. - """ - block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name - # Trigger the end of a block - if block_idx > self._current_block_idx: - self._terminate_block(self._block_duration, self._current_block_idx) - self._add_block_terminating_barrier(block_idx, t0, node) - - self._conditional_block = bool(node.op.condition_bits) - - self._current_block_idx = block_idx - - t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name - self._block_duration = max(self._block_duration, t1) - - def _visit_generic(self, node: DAGNode) -> None: - """Visit a generic node to pad.""" - # Note: t0 is the relative time with respect to the current block specified - # by block_idx. - block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name - - # Trigger the end of a block - if block_idx > self._current_block_idx: - self._terminate_block(self._block_duration, self._current_block_idx) - self._add_block_terminating_barrier(block_idx, t0, node) - - # This block will not be padded as it is conditional. - # See TODO below. - self._conditional_block = bool(node.op.condition_bits) - - # Now set the current block index. - self._current_block_idx = block_idx - - t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name - self._block_duration = max(self._block_duration, t1) - - for bit in self._map_wires(node.qargs): - if bit in self._idle_qubits: - continue - # Fill idle time with some sequence - if t0 - self._idle_after.get(bit, 0) > 0: - # Find previous node on the wire, i.e. always the latest node on the wire - prev_node = next(self._block_dag.predecessors(self._block_dag.output_map[bit])) - self._pad( - block_idx=block_idx, - qubit=bit, - t_start=self._idle_after[bit], - t_end=t0, - next_node=node, - prev_node=prev_node, - ) - - self._idle_after[bit] = t1 - - if not isinstance(node.op, (Barrier, Delay)): - self._dirty_qubits |= set(self._map_wires(node.qargs)) - - new_node = self._apply_scheduled_op( - block_idx, - t0, - node.op, - self._map_wires(node.qargs), - self._map_wires(node.cargs), - ) - self._last_node_to_touch.update( - {bit: (new_node, self._block_dag) for bit in new_node.qargs + new_node.cargs} - ) - - def _terminate_block(self, block_duration: int, block_idx: int) -> None: - """Terminate the end of a block scheduling region.""" - # Update all other qubits as not idle so that delays are *not* - # inserted. This is because we need the delays to be inserted in - # the conditional circuit block. - self._block_duration = 0 - self._pad_until_block_end(block_duration, block_idx) - self._idle_after = {bit: 0 for bit in self._block_dag.qubits} - - def _pad_until_block_end(self, block_duration: int, block_idx: int) -> None: - # Add delays until the end of circuit. - for bit in self._block_dag.qubits: - if bit in self._idle_qubits: - continue - idle_after = self._idle_after.get(bit, 0) - if block_duration - idle_after > 0: - node = self._block_dag.output_map[bit] - prev_node = next(self._block_dag.predecessors(node)) - self._pad( - block_idx=block_idx, - qubit=bit, - t_start=idle_after, - t_end=block_duration, - next_node=node, - prev_node=prev_node, - ) - - def _apply_scheduled_op( - self, - block_idx: int, - t_start: int, - oper: Instruction, - qubits: Union[Qubit, Iterable[Qubit]], - clbits: Union[Clbit, Iterable[Clbit]] = (), - ) -> DAGNode: - """Add new operation to DAG with scheduled information. - - This is identical to apply_operation_back + updating the node_start_time propety. - - Args: - block_idx: Execution block index for this node. - t_start: Start time of new node. - oper: New operation that is added to the DAG circuit. - qubits: The list of qubits that the operation acts on. - clbits: The list of clbits that the operation acts on. - - Returns: - The DAGNode applied to. - """ - if isinstance(qubits, Qubit): - qubits = [qubits] - if isinstance(clbits, Clbit): - clbits = [clbits] - - new_node = self._block_dag.apply_operation_back(oper, qubits, clbits) - self.property_set["node_start_time"][new_node] = (block_idx, t_start) - return new_node - - def _map_wires(self, wires: Iterable[Bit]) -> List[Bit]: - """Map the wires from the current block to the top-level block's wires. - - TODO: We should have an easier approach to wire mapping from the transpiler. - """ - return [self._wire_map[w] for w in wires] diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py deleted file mode 100644 index 006c53feb..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py +++ /dev/null @@ -1,553 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Dynamical decoupling insertion pass for IBM (dynamic circuit) backends.""" - -import warnings -from typing import Dict, List, Optional, Union - -import numpy as np -import rustworkx as rx -from qiskit.circuit import Qubit, Gate -from qiskit.circuit.delay import Delay -from qiskit.circuit.library.standard_gates import IGate, UGate, U3Gate -from qiskit.circuit.reset import Reset -from qiskit.dagcircuit import DAGCircuit, DAGNode, DAGInNode, DAGOpNode -from qiskit.quantum_info.operators.predicates import matrix_equal -from qiskit.quantum_info.synthesis import OneQubitEulerDecomposer -from qiskit.transpiler.exceptions import TranspilerError -from qiskit.transpiler.instruction_durations import InstructionDurations -from qiskit.transpiler.passes.optimization import Optimize1qGates -from qiskit.transpiler import CouplingMap - -from .block_base_padder import BlockBasePadder - - -class PadDynamicalDecoupling(BlockBasePadder): - """Dynamical decoupling insertion pass for IBM dynamic circuit backends. - - This pass works on a scheduled, physical circuit. It scans the circuit for - idle periods of time (i.e. those containing delay instructions) and inserts - a DD sequence of gates in those spots. These gates amount to the identity, - so do not alter the logical action of the circuit, but have the effect of - mitigating decoherence in those idle periods. - As a special case, the pass allows a length-1 sequence (e.g. [XGate()]). - In this case the DD insertion happens only when the gate inverse can be - absorbed into a neighboring gate in the circuit (so we would still be - replacing Delay with something that is equivalent to the identity). - This can be used, for instance, as a Hahn echo. - This pass ensures that the inserted sequence preserves the circuit exactly - (including global phase). - - .. jupyter-execute:: - - import numpy as np - from qiskit.circuit import QuantumCircuit - from qiskit.circuit.library import XGate - from qiskit.transpiler import PassManager, InstructionDurations - from qiskit.visualization import timeline_drawer - - from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis - from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling - - circ = QuantumCircuit(4) - circ.h(0) - circ.cx(0, 1) - circ.cx(1, 2) - circ.cx(2, 3) - circ.measure_all() - durations = InstructionDurations( - [("h", 0, 50), ("cx", [0, 1], 700), ("reset", None, 10), - ("cx", [1, 2], 200), ("cx", [2, 3], 300), - ("x", None, 50), ("measure", None, 1000)] - ) - - .. jupyter-execute:: - - # balanced X-X sequence on all qubits - dd_sequence = [XGate(), XGate()] - pm = PassManager([ALAPScheduleAnalysis(durations), - PadDynamicalDecoupling(durations, dd_sequence)]) - circ_dd = pm.run(circ) - circ_dd.draw() - - .. jupyter-execute:: - - # Uhrig sequence on qubit 0 - n = 8 - dd_sequence = [XGate()] * n - def uhrig_pulse_location(k): - return np.sin(np.pi * (k + 1) / (2 * n + 2)) ** 2 - spacings = [] - for k in range(n): - spacings.append(uhrig_pulse_location(k) - sum(spacings)) - spacings.append(1 - sum(spacings)) - pm = PassManager( - [ - ALAPScheduleAnalysis(durations), - PadDynamicalDecoupling(durations, dd_sequence, qubits=[0], spacings=spacings), - ] - ) - circ_dd = pm.run(circ) - circ_dd.draw() - - .. note:: - - You need to call - :class:`~qiskit_ibm_provider.transpiler.passes.scheduling.ALAPScheduleAnalysis` - before running dynamical decoupling to guarantee your circuit satisfies acquisition - alignment constraints for dynamic circuit backends. - """ - - def __init__( - self, - durations: InstructionDurations, - dd_sequences: Union[List[Gate], List[List[Gate]]], - qubits: Optional[List[int]] = None, - spacings: Optional[Union[List[List[float]], List[float]]] = None, - skip_reset_qubits: bool = True, - pulse_alignment: int = 16, - extra_slack_distribution: str = "middle", - sequence_min_length_ratios: Optional[Union[int, List[int]]] = None, - insert_multiple_cycles: bool = False, - coupling_map: CouplingMap = None, - alt_spacings: Optional[Union[List[List[float]], List[float]]] = None, - schedule_idle_qubits: bool = False, - ): - """Dynamical decoupling initializer. - - Args: - durations: Durations of instructions to be used in scheduling. - dd_sequences: Sequence of gates to apply in idle spots. - Alternatively a list of gate sequences may be supplied that - will preferentially be inserted if there is a delay of sufficient - duration. This may be tuned by the optionally supplied - ``sequence_min_length_ratios``. - qubits: Physical qubits on which to apply DD. - If None, all qubits will undergo DD (when possible). - spacings: A list of lists of spacings between the DD gates. - The available slack will be divided according to this. - The list length must be one more than the length of dd_sequence, - and the elements must sum to 1. If None, a balanced spacing - will be used [d/2, d, d, ..., d, d, d/2]. This spacing only - applies to the first subcircuit, if a ``coupling_map`` is - specified - skip_reset_qubits: If True, does not insert DD on idle periods that - immediately follow initialized/reset qubits - (as qubits in the ground state are less susceptible to decoherence). - pulse_alignment: The hardware constraints for gate timing allocation. - This is usually provided from ``backend.configuration().timing_constraints``. - If provided, the delay length, i.e. ``spacing``, is implicitly adjusted to - satisfy this constraint. - extra_slack_distribution: The option to control the behavior of DD sequence generation. - The duration of the DD sequence should be identical to an idle time in the - scheduled quantum circuit, however, the delay in between gates comprising the sequence - should be integer number in units of dt, and it might be further truncated - when ``pulse_alignment`` is specified. This sometimes results in the duration of - the created sequence being shorter than the idle time - that you want to fill with the sequence, i.e. `extra slack`. - This option takes following values. - - * "middle": Put the extra slack to the interval at the middle of the sequence. - * "edges": Divide the extra slack as evenly as possible into - intervals at beginning and end of the sequence. - sequence_min_length_ratios: List of minimum delay length to DD sequence ratio to satisfy - in order to insert the DD sequence. For example if the X-X dynamical decoupling sequence - is 320dt samples long and the available delay is 384dt it has a ratio of 384dt/320dt=1.2. - From the perspective of dynamical decoupling this is likely to add more control noise - than decoupling error rate reductions. The defaults value is 2.0. - insert_multiple_cycles: If the available duration exceeds - 2*sequence_min_length_ratio*duration(dd_sequence) enable the insertion of multiple - rounds of the dynamical decoupling sequence in that delay. - coupling_map: directed graph representing the coupling map for the device. Specifying a - coupling map partitions the device into subcircuits, in order to apply DD sequences - with different pulse spacings within each. Currently support 2 subcircuits. - alt_spacings: A list of lists of spacings between the DD gates, for the second subcircuit, - as determined by the coupling map. If None, a balanced spacing that is staggered with - respect to the first subcircuit will be used [d, d, d, ..., d, d, 0]. - schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. - This is useful for timeline visualizations, but may cause issues - for execution on large backends. - Raises: - TranspilerError: When invalid DD sequence is specified. - TranspilerError: When pulse gate with the duration which is - non-multiple of the alignment constraint value is found. - TranspilerError: When the coupling map is not supported (i.e., if degree > 3) - """ - - super().__init__(schedule_idle_qubits=schedule_idle_qubits) - self._durations = durations - - # Enforce list of DD sequences - if dd_sequences: - try: - iter(dd_sequences[0]) - except TypeError: - dd_sequences = [dd_sequences] - self._dd_sequences = dd_sequences - self._qubits = qubits - self._skip_reset_qubits = skip_reset_qubits - self._alignment = pulse_alignment - self._coupling_map = coupling_map - self._coupling_coloring = None - - if spacings is not None: - try: - iter(spacings[0]) # type: ignore - except TypeError: - spacings = [spacings] # type: ignore - if alt_spacings is not None: - try: - iter(alt_spacings[0]) # type: ignore - except TypeError: - alt_spacings = [alt_spacings] # type: ignore - self._spacings = spacings - self._alt_spacings = alt_spacings - - if self._spacings and len(self._spacings) != len(self._dd_sequences): - raise TranspilerError("Number of sequence spacings must equal number of DD sequences.") - - if self._alt_spacings: - if not self._coupling_map: - warnings.warn( - "Alternate spacings are ignored because a coupling map was not provided" - ) - elif len(self._alt_spacings) != len(self._dd_sequences): - raise TranspilerError( - "Number of alternate sequence spacings must equal number of DD sequences." - ) - - self._extra_slack_distribution = extra_slack_distribution - - self._dd_sequence_lengths: Dict[Qubit, List[List[Gate]]] = {} - self._sequence_phase = 0 - - if sequence_min_length_ratios is None: - # Use 2.0 as a sane default - self._sequence_min_length_ratios = [2.0 for _ in self._dd_sequences] - else: - try: - iter(sequence_min_length_ratios) # type: ignore - except TypeError: - sequence_min_length_ratios = [sequence_min_length_ratios] # type: ignore - self._sequence_min_length_ratios = sequence_min_length_ratios # type: ignore - - if len(self._sequence_min_length_ratios) != len(self._dd_sequences): - raise TranspilerError("Number of sequence lengths must equal number of DD sequences.") - - self._insert_multiple_cycles = insert_multiple_cycles - - def _pre_runhook(self, dag: DAGCircuit) -> None: - super()._pre_runhook(dag) - - if self._coupling_map: - physical_qubits = [dag.qubits.index(q) for q in dag.qubits] - subgraph = self._coupling_map.graph.subgraph(physical_qubits) - self._coupling_coloring = rx.graph_greedy_color(subgraph.to_undirected()) - if any(c > 1 for c in self._coupling_coloring.values()): - raise TranspilerError( - "This circuit topology is not supported for staggered dynamical decoupling." - "The maximum connectivity is 3 nearest neighbors per qubit." - ) - - spacings_required = self._spacings is None - if spacings_required: - self._spacings = [] # type: ignore - alt_spacings_required = self._alt_spacings is None and self._coupling_map is not None - if alt_spacings_required: - self._alt_spacings = [] # type: ignore - - for seq_idx, seq in enumerate(self._dd_sequences): - num_pulses = len(self._dd_sequences[seq_idx]) - - # Check if physical circuit is given - if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: - raise TranspilerError("DD runs on physical circuits only.") - - # Set default spacing otherwise validate user input - if spacings_required: - mid = 1 / num_pulses - end = mid / 2 - self._spacings.append([end] + [mid] * (num_pulses - 1) + [end]) # type: ignore - else: - if sum(self._spacings[seq_idx]) != 1 or any( # type: ignore - a < 0 for a in self._spacings[seq_idx] # type: ignore - ): - raise TranspilerError( - "The spacings must be given in terms of fractions " - "of the slack period and sum to 1." - ) - - if self._coupling_map: - if alt_spacings_required: - mid = 1 / num_pulses - self._alt_spacings.append([mid] * num_pulses + [0]) # type: ignore - else: - if sum(self._alt_spacings[seq_idx]) != 1 or any( # type: ignore - a < 0 for a in self._alt_spacings[seq_idx] # type: ignore - ): - raise TranspilerError( - "The spacings must be given in terms of fractions " - "of the slack period and sum to 1." - ) - - # Check if DD sequence is identity - if num_pulses != 1: - if num_pulses % 2 != 0: - raise TranspilerError( - "DD sequence must contain an even number of gates (or 1)." - ) - # TODO: this check should use the quantum info package in Qiskit. - noop = np.eye(2) - for gate in self._dd_sequences[seq_idx]: - noop = noop.dot(gate.to_matrix()) - if not matrix_equal(noop, IGate().to_matrix(), ignore_phase=True): - raise TranspilerError("The DD sequence does not make an identity operation.") - self._sequence_phase = np.angle(noop[0][0]) - - # Precompute qubit-wise DD sequence length for performance - for qubit in dag.qubits: - seq_length_ = [] - if qubit not in self._dd_sequence_lengths: - self._dd_sequence_lengths[qubit] = [] - - physical_index = dag.qubits.index(qubit) - if self._qubits and physical_index not in self._qubits: - continue - - for index, gate in enumerate(seq): - try: - # Check calibration. - gate_length = dag.calibrations[gate.name][(physical_index, gate.params)] - if gate_length % self._alignment != 0: - # This is necessary to implement lightweight scheduling logic for this pass. - # Usually the pulse alignment constraint and pulse data chunk size take - # the same value, however, we can intentionally violate this pattern - # at the gate level. For example, we can create a schedule consisting of - # a pi-pulse of 32 dt followed by a post buffer, i.e. delay, of 4 dt - # on the device with 16 dt constraint. Note that the pi-pulse length - # is multiple of 16 dt but the gate length of 36 is not multiple of it. - # Such pulse gate should be excluded. - raise TranspilerError( - f"Pulse gate {gate.name} with length non-multiple of {self._alignment} " - f"is not acceptable in {self.__class__.__name__} pass." - ) - except KeyError: - gate_length = self._durations.get(gate, physical_index) - seq_length_.append(gate_length) - # Update gate duration. - # This is necessary for current timeline drawer, i.e. scheduled. - - if hasattr( - gate, "to_mutable" - ): # TODO this check can be removed after Qiskit 1.0, as it is always True - gate = gate.to_mutable() - seq[index] = gate - gate.duration = gate_length - self._dd_sequence_lengths[qubit].append(seq_length_) - - def _pad( - self, - block_idx: int, - qubit: Qubit, - t_start: int, - t_end: int, - next_node: DAGNode, - prev_node: DAGNode, - ) -> None: - # This routine takes care of the pulse alignment constraint for the DD sequence. - # Note that the alignment constraint acts on the t0 of the DAGOpNode. - # Now this constrained scheduling problem is simplified to the problem of - # finding a delay amount which is a multiple of the constraint value by assuming - # that the duration of every DAGOpNode is also a multiple of the constraint value. - # - # For example, given the constraint value of 16 and XY4 with 160 dt gates. - # Here we assume current interval is 992 dt. - # - # relative spacing := [0.125, 0.25, 0.25, 0.25, 0.125] - # slack = 992 dt - 4 x 160 dt = 352 dt - # - # unconstrained sequence: 44dt-X1-88dt-Y2-88dt-X3-88dt-Y4-44dt - # constrained sequence : 32dt-X1-80dt-Y2-80dt-X3-80dt-Y4-32dt + extra slack 48 dt - # - # Now we evenly split extra slack into start and end of the sequence. - # The distributed slack should be multiple of 16. - # Start = +16, End += 32 - # - # final sequence : 48dt-X1-80dt-Y2-80dt-X3-80dt-Y4-64dt / in total 992 dt - # - # Now we verify t0 of every node starts from multiple of 16 dt. - # - # X1: 48 dt (3 x 16 dt) - # Y2: 48 dt + 160 dt + 80 dt = 288 dt (18 x 16 dt) - # Y3: 288 dt + 160 dt + 80 dt = 528 dt (33 x 16 dt) - # Y4: 368 dt + 160 dt + 80 dt = 768 dt (48 x 16 dt) - # - # As you can see, constraints on t0 are all satified without explicit scheduling. - time_interval = t_end - t_start - - if self._qubits and self._block_dag.qubits.index(qubit) not in self._qubits: - # Target physical qubit is not the target of this DD sequence. - self._apply_scheduled_op( - block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit - ) - return - - if ( - not isinstance(prev_node, DAGInNode) - and self._skip_reset_qubits - and isinstance(prev_node.op, Reset) - and qubit in prev_node.qargs - ): - self._dirty_qubits.remove(qubit) - - if qubit not in self._dirty_qubits: - # Previous node is the start edge or reset, i.e. qubit is ground state. - self._apply_scheduled_op( - block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit - ) - return - - for sequence_idx, _ in enumerate(self._dd_sequences): - dd_sequence = self._dd_sequences[sequence_idx] - seq_lengths = self._dd_sequence_lengths[qubit][sequence_idx] - seq_length = np.sum(seq_lengths) - seq_ratio = self._sequence_min_length_ratios[sequence_idx] - spacings = self._spacings[sequence_idx] - alt_spacings = ( - np.asarray(self._alt_spacings[sequence_idx]) if self._coupling_map else None - ) - - # Verify the delay duration exceeds the minimum time to insert - if time_interval / seq_length <= seq_ratio: - continue - - if self._insert_multiple_cycles: - num_sequences = max(int(time_interval // (seq_length * seq_ratio)), 1) - if (num_sequences % 2 == 1) and len(dd_sequence) == 1: - warnings.warn( - "Sequence would result in an odd number of DD cycles with original DD " - "sequence of length 1. This may result in non-identity sequence insertion " - "and so are defaulting to 1 cycle insertion." - ) - num_sequences = 1 - else: - num_sequences = 1 - - # multiple dd sequences may be inserted - if num_sequences > 1: - dd_sequence = list(dd_sequence) * num_sequences - seq_lengths = seq_lengths * num_sequences - seq_length = np.sum(seq_lengths) - spacings = spacings * num_sequences - - spacings = np.asarray(spacings) / num_sequences - slack = time_interval - seq_length - sequence_gphase = self._sequence_phase - - if slack <= 0: - continue - - if len(dd_sequence) == 1: - # Special case of using a single gate for DD - u_inv = dd_sequence[0].inverse().to_matrix() - theta, phi, lam, phase = OneQubitEulerDecomposer().angles_and_phase(u_inv) - if isinstance(next_node, DAGOpNode) and isinstance(next_node.op, (UGate, U3Gate)): - # Absorb the inverse into the successor (from left in circuit) - theta_r, phi_r, lam_r = next_node.op.params - next_node.op.params = Optimize1qGates.compose_u3( - theta_r, phi_r, lam_r, theta, phi, lam - ) - sequence_gphase += phase - elif isinstance(prev_node, DAGOpNode) and isinstance(prev_node.op, (UGate, U3Gate)): - # Absorb the inverse into the predecessor (from right in circuit) - theta_l, phi_l, lam_l = prev_node.op.params - prev_node.op.params = Optimize1qGates.compose_u3( - theta, phi, lam, theta_l, phi_l, lam_l - ) - sequence_gphase += phase - else: - # Don't do anything if there's no single-qubit gate to absorb the inverse - self._apply_scheduled_op( - block_idx, - t_start, - Delay(time_interval, self._block_dag.unit), - qubit, - ) - return - - def _constrained_length(values: np.array) -> np.array: - return self._alignment * np.floor(values / self._alignment) - - if self._coupling_map: - if self._coupling_coloring[self._dag.qubits.index(qubit)] == 0: - sub_spacings = spacings - else: - sub_spacings = alt_spacings - else: - sub_spacings = spacings - - # (1) Compute DD intervals satisfying the constraint - taus = _constrained_length(slack * sub_spacings) - extra_slack = slack - np.sum(taus) - # (2) Distribute extra slack - if self._extra_slack_distribution == "middle": - mid_ind = int((len(taus) - 1) / 2) - to_middle = _constrained_length(extra_slack) - taus[mid_ind] += to_middle - if extra_slack - to_middle: - # If to_middle is not a multiple value of the pulse alignment, - # it is truncated to the nearest multiple value and - # the rest of slack is added to the end. - taus[-1] += extra_slack - to_middle - elif self._extra_slack_distribution == "edges": - to_begin_edge = _constrained_length(extra_slack / 2) - taus[0] += to_begin_edge - taus[-1] += extra_slack - to_begin_edge - else: - raise TranspilerError( - f"Option extra_slack_distribution = {self._extra_slack_distribution} is invalid." - ) - - # (3) Construct DD sequence with delays - idle_after = t_start - dd_ind = 0 - # Interleave delays with DD sequence operations - for tau_idx, tau in enumerate(taus): - if tau > 0: - self._apply_scheduled_op( - block_idx, idle_after, Delay(tau, self._dag.unit), qubit - ) - idle_after += tau - - # Detect if we are on a sequence boundary - # If so skip insert of sequence to allow delays to combine - # There are two cases. - # 1. The number of delays to be inserted is equal to the number of gates. - # 2. There is an extra delay inserted after the last operation. - # The condition below handles both. - seq_length = int(len(taus) / num_sequences) - if len(dd_sequence) == len(taus) or tau_idx % seq_length != (seq_length - 1): - gate = dd_sequence[dd_ind] - gate_length = seq_lengths[dd_ind] - self._apply_scheduled_op(block_idx, idle_after, gate, qubit) - idle_after += gate_length - dd_ind += 1 - - self._block_dag.global_phase = self._block_dag.global_phase + sequence_gphase - return - - # DD could not be applied, delay instead - self._apply_scheduled_op( - block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit - ) - return diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py deleted file mode 100644 index fd61f8c49..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py +++ /dev/null @@ -1,78 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Padding pass to insert Delay into empty timeslots for dynamic circuit backends.""" - -from qiskit.circuit import Qubit -from qiskit.circuit.delay import Delay -from qiskit.dagcircuit import DAGNode, DAGOutNode - -from .block_base_padder import BlockBasePadder - - -class PadDelay(BlockBasePadder): - """Padding idle time with Delay instructions. - - Consecutive delays will be merged in the output of this pass. - - .. code-block::python - - durations = InstructionDurations([("x", None, 160), ("cx", None, 800)]) - - qc = QuantumCircuit(2) - qc.delay(100, 0) - qc.x(1) - qc.cx(0, 1) - - The ASAP-scheduled circuit output may become - - .. parsed-literal:: - - ┌────────────────┐ - q_0: ┤ Delay(160[dt]) ├──■── - └─────┬───┬──────┘┌─┴─┐ - q_1: ──────┤ X ├───────┤ X ├ - └───┘ └───┘ - - Note that the additional idle time of 60dt on the ``q_0`` wire coming from the duration difference - between ``Delay`` of 100dt (``q_0``) and ``XGate`` of 160 dt (``q_1``) is absorbed in - the delay instruction on the ``q_0`` wire, i.e. in total 160 dt. - - See :class:`BlockBasePadder` pass for details. - """ - - def __init__(self, fill_very_end: bool = True, schedule_idle_qubits: bool = False): - """Create new padding delay pass. - - Args: - fill_very_end: Set ``True`` to fill the end of circuit with delay. - schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. - This is useful for timeline visualizations, but may cause issues for execution - on large backends. - """ - super().__init__(schedule_idle_qubits=schedule_idle_qubits) - self.fill_very_end = fill_very_end - - def _pad( - self, - block_idx: int, - qubit: Qubit, - t_start: int, - t_end: int, - next_node: DAGNode, - prev_node: DAGNode, - ) -> None: - if not self.fill_very_end and isinstance(next_node, DAGOutNode): - return - - time_interval = t_end - t_start - self._apply_scheduled_op(block_idx, t_start, Delay(time_interval, "dt"), qubit) diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py deleted file mode 100644 index b18ee32c6..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py +++ /dev/null @@ -1,643 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Scheduler for dynamic circuit backends.""" - -from abc import abstractmethod -from typing import Dict, List, Optional, Union, Set, Tuple -import itertools - -import qiskit -from qiskit.circuit.parameterexpression import ParameterExpression -from qiskit.converters import circuit_to_dag -from qiskit.transpiler.basepasses import TransformationPass -from qiskit.transpiler.passes.scheduling.time_unit_conversion import TimeUnitConversion - -from qiskit.circuit import Barrier, Clbit, ControlFlowOp, Measure, Qubit, Reset -from qiskit.circuit.bit import Bit -from qiskit.dagcircuit import DAGCircuit, DAGNode -from qiskit.transpiler.exceptions import TranspilerError - -from .utils import block_order_op_nodes - - -class BaseDynamicCircuitAnalysis(TransformationPass): - """Base class for scheduling analysis - - This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits - backends due to the limitations imposed by hardware. This is expected to evolve over time as the - dynamic circuit backends also change. - - The primary differences are that: - - * Resets and control-flow currently trigger the end of a "quantum block". The period between the end - of the block and the next is *nondeterministic* - ie., we do not know when the next block will begin (as we could be evaluating a classical - function of nondeterministic length) and therefore the - next block starts at a *relative* t=0. - * During a measurement it is possible to apply gates in parallel on disjoint qubits. - * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. - """ - - def __init__( - self, durations: qiskit.transpiler.instruction_durations.InstructionDurations - ) -> None: - """Scheduler for dynamic circuit backends. - - Args: - durations: Durations of instructions to be used in scheduling. - """ - self._durations = durations - - self._dag: Optional[DAGCircuit] = None - self._block_dag: Optional[DAGCircuit] = None - self._wire_map: Optional[Dict[Bit, Bit]] = None - self._node_mapped_wires: Optional[Dict[DAGNode, List[Bit]]] = None - self._node_block_dags: Dict[DAGNode, DAGCircuit] = {} - # Mapping of control-flow nodes to their containing blocks - self._block_idx_dag_map: Dict[int, DAGCircuit] = {} - # Mapping of block indices to the respective DAGCircuit - - self._current_block_idx = 0 - self._max_block_t1: Optional[Dict[int, int]] = None - # Track as we build to avoid extra pass - self._control_flow_block = False - self._node_start_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None - self._node_stop_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None - self._bit_stop_times: Optional[Dict[int, Dict[Union[Qubit, Clbit], int]]] = None - # Dictionary of blocks each containing a dictionary with the key for each bit - # in the block and its value being the final time of the bit within the block. - self._current_block_measures: Set[DAGNode] = set() - self._current_block_measures_has_reset: bool = False - self._node_tied_to: Optional[Dict[DAGNode, Set[DAGNode]]] = None - # Nodes that the scheduling of this node is tied to. - self._bit_indices: Optional[Dict[Qubit, int]] = None - - self._time_unit_converter = TimeUnitConversion(durations) - - super().__init__() - - @property - def _current_block_bit_times(self) -> Dict[Union[Qubit, Clbit], int]: - return self._bit_stop_times[self._current_block_idx] - - def _visit_block(self, block: DAGCircuit, wire_map: Dict[Qubit, Qubit]) -> None: - # Push the previous block dag onto the stack - prev_block_dag = self._block_dag - self._block_dag = block - prev_wire_map, self._wire_map = self._wire_map, wire_map - - # We must run this on the individual block - # as the current implementation does not recurse - # into the circuit structure. - self._time_unit_converter.run(block) - self._begin_new_circuit_block() - - for node in block_order_op_nodes(block): - self._visit_node(node) - - # Final flush - self._flush_measures() - - # Pop the previous block dag off the stack restoring it - self._block_dag = prev_block_dag - self._wire_map = prev_wire_map - - def _visit_node(self, node: DAGNode) -> None: - if isinstance(node.op, ControlFlowOp): - self._visit_control_flow_op(node) - elif node.op.condition_bits: - raise TranspilerError( - "c_if control-flow is not supported by this pass. " - 'Please apply "ConvertConditionsToIfOps" to convert these ' - "conditional operations to new-style Qiskit control-flow." - ) - else: - if isinstance(node.op, Measure): - self._visit_measure(node) - elif isinstance(node.op, Reset): - self._visit_reset(node) - else: - self._visit_generic(node) - - def _visit_control_flow_op(self, node: DAGNode) -> None: - # TODO: This is a hack required to tie nodes of control-flow - # blocks across the scheduler and block_base_padder. This is - # because the current control flow nodes store the block as a - # circuit which is not hashable. For processing we are currently - # required to convert each circuit block to a dag which is inefficient - # and causes node relationships stored in analysis to be lost between - # passes as we are constantly recreating the block dags. - # We resolve this here by caching these dags in the property set. - self._node_block_dags[node] = node_block_dags = [] - - t0 = max( # pylint: disable=invalid-name - self._current_block_bit_times[bit] for bit in self._map_wires(node) - ) - - # Duration is 0 as we do not schedule across terminator - t1 = t0 # pylint: disable=invalid-name - self._update_bit_times(node, t0, t1) - - for block in node.op.blocks: - self._control_flow_block = True - - new_dag = circuit_to_dag(block) - inner_wire_map = { - inner: outer - for outer, inner in zip(self._map_wires(node), new_dag.qubits + new_dag.clbits) - } - node_block_dags.append(new_dag) - self._visit_block(new_dag, inner_wire_map) - - # Begin new block for exit to "then" block. - self._begin_new_circuit_block() - - @abstractmethod - def _visit_measure(self, node: DAGNode) -> None: - raise NotImplementedError - - @abstractmethod - def _visit_reset(self, node: DAGNode) -> None: - raise NotImplementedError - - @abstractmethod - def _visit_generic(self, node: DAGNode) -> None: - raise NotImplementedError - - def _init_run(self, dag: DAGCircuit) -> None: - """Setup for initial run.""" - - self._dag = dag - self._block_dag = None - self._wire_map = {wire: wire for wire in dag.wires} - self._node_mapped_wires = {} - self._node_block_dags = {} - self._block_idx_dag_map = {} - - self._current_block_idx = 0 - self._max_block_t1 = {} - self._control_flow_block = False - - if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: - raise TranspilerError("ASAP schedule runs on physical circuits only") - - self._node_start_time = {} - self._node_stop_time = {} - self._bit_stop_times = {0: {q: 0 for q in dag.qubits + dag.clbits}} - self._current_block_measures = set() - self._current_block_measures_has_reset = False - self._node_tied_to = {} - self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} - - def _get_duration(self, node: DAGNode, dag: Optional[DAGCircuit] = None) -> int: - if node.op.condition_bits or isinstance(node.op, ControlFlowOp): - # As we cannot currently schedule through conditionals model - # as zero duration to avoid padding. - return 0 - - indices = [self._bit_indices[qarg] for qarg in self._map_qubits(node)] - - # Fall back to current block dag if not specified. - dag = dag or self._block_dag - - if dag.has_calibration_for(node): - # If node has calibration, this value should be the highest priority - cal_key = tuple(indices), tuple(float(p) for p in node.op.params) - duration = dag.calibrations[node.op.name][cal_key].duration - node.op.duration = duration - else: - duration = node.op.duration - - if isinstance(duration, ParameterExpression): - raise TranspilerError( - f"Parameterized duration ({duration}) " - f"of {node.op.name} on qubits {indices} is not bounded." - ) - if duration is None: - raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") - - return duration - - def _update_bit_times( # pylint: disable=invalid-name - self, node: DAGNode, t0: int, t1: int, update_cargs: bool = True - ) -> None: - self._max_block_t1[self._current_block_idx] = max( - self._max_block_t1.get(self._current_block_idx, 0), t1 - ) - - update_bits = self._map_wires(node) if update_cargs else self._map_qubits(node) - for bit in update_bits: - self._current_block_bit_times[bit] = t1 - - self._node_start_time[node] = (self._current_block_idx, t0) - self._node_stop_time[node] = (self._current_block_idx, t1) - - def _begin_new_circuit_block(self) -> None: - """Create a new timed circuit block completing the previous block.""" - self._current_block_idx += 1 - self._block_idx_dag_map[self._current_block_idx] = self._block_dag - self._control_flow_block = False - self._bit_stop_times[self._current_block_idx] = { - self._wire_map[wire]: 0 for wire in self._block_dag.wires - } - self._flush_measures() - - def _flush_measures(self) -> None: - """Flush currently accumulated measurements by resetting block measures.""" - for node in self._current_block_measures: - self._node_tied_to[node] = self._current_block_measures.copy() - - self._current_block_measures = set() - self._current_block_measures_has_reset = False - - def _current_block_measure_qargs(self) -> Set[Qubit]: - return set( - qarg for measure in self._current_block_measures for qarg in self._map_qubits(measure) - ) - - def _check_flush_measures(self, node: DAGNode) -> None: - if self._current_block_measure_qargs() & set(self._map_qubits(node)): - if self._current_block_measures_has_reset: - # If a reset is included we must trigger the end of a block. - self._begin_new_circuit_block() - else: - # Otherwise just trigger a measurement flush - self._flush_measures() - - def _map_wires(self, node: DAGNode) -> List[Qubit]: - """Map the wires from the current node to the top-level block's wires. - - TODO: We should have an easier approach to wire mapping from the transpiler. - """ - if node not in self._node_mapped_wires: - self._node_mapped_wires[node] = wire_map = [ - self._wire_map[q] for q in node.qargs + node.cargs - ] - return wire_map - - return self._node_mapped_wires[node] - - def _map_qubits(self, node: DAGNode) -> List[Qubit]: - """Map the qubits from the current node to the top-level block's qubits. - - TODO: We should have an easier approach to wire mapping from the transpiler. - """ - return [wire for wire in self._map_wires(node) if isinstance(wire, Qubit)] - - -class ASAPScheduleAnalysis(BaseDynamicCircuitAnalysis): - """Dynamic circuits as-soon-as-possible (ASAP) scheduling analysis pass. - - This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits - backends due to the limitations imposed by hardware. This is expected to evolve over time as the - dynamic circuit backends also change. - - In its current form this is similar to Qiskit's ASAP scheduler in which instructions - start as early as possible. - - The primary differences are that: - - * Resets and control-flow currently trigger the end of a "quantum block". The period between the end - of the block and the next is *nondeterministic* - ie., we do not know when the next block will begin (as we could be evaluating a classical - function of nondeterministic length) and therefore the - next block starts at a *relative* t=0. - * During a measurement it is possible to apply gates in parallel on disjoint qubits. - * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. - """ - - def run(self, dag: DAGCircuit) -> DAGCircuit: - """Run the ALAPSchedule pass on `dag`. - Args: - dag (DAGCircuit): DAG to schedule. - Raises: - TranspilerError: if the circuit is not mapped on physical qubits. - TranspilerError: if conditional bit is added to non-supported instruction. - Returns: - The scheduled DAGCircuit. - """ - self._init_run(dag) - - # Trivial wire map at the top-level - wire_map = {wire: wire for wire in dag.wires} - # Top-level dag is the entry block - self._visit_block(dag, wire_map) - - self.property_set["node_start_time"] = self._node_start_time - self.property_set["node_block_dags"] = self._node_block_dags - return dag - - def _visit_measure(self, node: DAGNode) -> None: - """Visit a measurement node. - - Measurement currently triggers the end of a deterministically scheduled block - of instructions in IBM dynamic circuits hardware. - This means that it is possible to schedule *up to* a measurement (and during its pulses) - but the measurement will be followed by a period of indeterminism. - All measurements on disjoint qubits that topologically follow another - measurement will be collected and performed in parallel. A measurement on a qubit - intersecting with the set of qubits to be measured in parallel will trigger the - end of a scheduling block with said measurement occurring in a following block - which begins another grouping sequence. This behavior will change in future - backend software updates.""" - - current_block_measure_qargs = self._current_block_measure_qargs() - # We handle a set of qubits here as _visit_reset currently calls - # this method and a reset may have multiple qubits. - measure_qargs = set(self._map_qubits(node)) - - t0q = max( - self._current_block_bit_times[q] for q in measure_qargs - ) # pylint: disable=invalid-name - - # If the measurement qubits overlap, we need to flush measurements and start a - # new scheduling block. - if current_block_measure_qargs & measure_qargs: - if self._current_block_measures_has_reset: - # If a reset is included we must trigger the end of a block. - self._begin_new_circuit_block() - t0q = 0 - else: - # Otherwise just trigger a measurement flush - self._flush_measures() - else: - # Otherwise we need to increment all measurements to start at the same time within the block. - t0q = max( # pylint: disable=invalid-name - itertools.chain( - [t0q], - (self._node_start_time[measure][1] for measure in self._current_block_measures), - ) - ) - - # Insert this measure into the block - self._current_block_measures.add(node) - - for measure in self._current_block_measures: - t0 = t0q # pylint: disable=invalid-name - bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} - measure_duration = self._durations.get( - Measure(), - [bit_indices[qarg] for qarg in self._map_qubits(measure)], - unit="dt", - ) - t1 = t0 + measure_duration # pylint: disable=invalid-name - self._update_bit_times(measure, t0, t1) - - def _visit_reset(self, node: DAGNode) -> None: - """Visit a reset node. - - Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware - as conditional reset is performed internally using a c_if. This means that it is - possible to schedule *up to* a reset (and during its measurement pulses) - but the reset will be followed by a period of conditional indeterminism. - All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. - """ - # Process as measurement - self._current_block_measures_has_reset = True - self._visit_measure(node) - # Then set that we are now a conditional node. - self._control_flow_block = True - - def _visit_generic(self, node: DAGNode) -> None: - """Visit a generic node such as a gate or barrier.""" - op_duration = self._get_duration(node) - - # If the measurement qubits overlap, we need to flush the measurement group - self._check_flush_measures(node) - - t0 = max( # pylint: disable=invalid-name - self._current_block_bit_times[bit] for bit in self._map_wires(node) - ) - - t1 = t0 + op_duration # pylint: disable=invalid-name - self._update_bit_times(node, t0, t1) - - -class ALAPScheduleAnalysis(BaseDynamicCircuitAnalysis): - """Dynamic circuits as-late-as-possible (ALAP) scheduling analysis pass. - - This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits - backends due to the limitations imposed by hardware. This is expected to evolve over time as the - dynamic circuit backends also change. - - In its current form this is similar to Qiskit's ALAP scheduler in which instructions - start as late as possible. - - The primary differences are that: - - * Resets and control-flow currently trigger the end of a "quantum block". The period between the end - of the block and the next is *nondeterministic* - ie., we do not know when the next block will begin (as we could be evaluating a classical - function of nondeterministic length) and therefore the - next block starts at a *relative* t=0. - * During a measurement it is possible to apply gates in parallel on disjoint qubits. - * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. - """ - - def run(self, dag: DAGCircuit) -> None: - """Run the ASAPSchedule pass on `dag`. - Args: - dag (DAGCircuit): DAG to schedule. - Raises: - TranspilerError: if the circuit is not mapped on physical qubits. - TranspilerError: if conditional bit is added to non-supported instruction. - Returns: - The scheduled DAGCircuit. - """ - self._init_run(dag) - - # Trivial wire map at the top-level - wire_map = {wire: wire for wire in dag.wires} - # Top-level dag is the entry block - self._visit_block(dag, wire_map) - self._push_block_durations() - self.property_set["node_start_time"] = self._node_start_time - self.property_set["node_block_dags"] = self._node_block_dags - return dag - - def _visit_measure(self, node: DAGNode) -> None: - """Visit a measurement node. - - Measurement currently triggers the end of a deterministically scheduled block - of instructions in IBM dynamic circuits hardware. - This means that it is possible to schedule *up to* a measurement (and during its pulses) - but the measurement will be followed by a period of indeterminism. - All measurements on disjoint qubits that topologically follow another - measurement will be collected and performed in parallel. A measurement on a qubit - intersecting with the set of qubits to be measured in parallel will trigger the - end of a scheduling block with said measurement occurring in a following block - which begins another grouping sequence. This behavior will change in future - backend software updates.""" - - current_block_measure_qargs = self._current_block_measure_qargs() - # We handle a set of qubits here as _visit_reset currently calls - # this method and a reset may have multiple qubits. - measure_qargs = set(self._map_qubits(node)) - - t0q = max( - self._current_block_bit_times[q] for q in measure_qargs - ) # pylint: disable=invalid-name - - # If the measurement qubits overlap, we need to flush measurements and start a - # new scheduling block. - if current_block_measure_qargs & measure_qargs: - if self._current_block_measures_has_reset: - # If a reset is included we must trigger the end of a block. - self._begin_new_circuit_block() - t0q = 0 - else: - # Otherwise just trigger a measurement flush - self._flush_measures() - else: - # Otherwise we need to increment all measurements to start at the same time within the block. - t0q = max( # pylint: disable=invalid-name - itertools.chain( - [t0q], - (self._node_start_time[measure][1] for measure in self._current_block_measures), - ) - ) - - # Insert this measure into the block - self._current_block_measures.add(node) - - for measure in self._current_block_measures: - t0 = t0q # pylint: disable=invalid-name - bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} - measure_duration = self._durations.get( - Measure(), - [bit_indices[qarg] for qarg in self._map_qubits(measure)], - unit="dt", - ) - t1 = t0 + measure_duration # pylint: disable=invalid-name - self._update_bit_times(measure, t0, t1) - - def _visit_reset(self, node: DAGNode) -> None: - """Visit a reset node. - - Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware - as conditional reset is performed internally using a c_if. This means that it is - possible to schedule *up to* a reset (and during its measurement pulses) - but the reset will be followed by a period of conditional indeterminism. - All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. - """ - # Process as measurement - self._current_block_measures_has_reset = True - self._visit_measure(node) - # Then set that we are now a conditional node. - self._control_flow_block = True - - def _visit_generic(self, node: DAGNode) -> None: - """Visit a generic node such as a gate or barrier.""" - - # If True we are coming from a conditional block. - # start a new block for the unconditional operations. - if self._control_flow_block: - self._begin_new_circuit_block() - - op_duration = self._get_duration(node) - - # If the measurement qubits overlap, we need to flush the measurement group - self._check_flush_measures(node) - - t0 = max( # pylint: disable=invalid-name - self._current_block_bit_times[bit] for bit in self._map_wires(node) - ) - - t1 = t0 + op_duration # pylint: disable=invalid-name - self._update_bit_times(node, t0, t1) - - def _push_block_durations(self) -> None: - """After scheduling of each block, pass over and push the times of all nodes.""" - - # Store the next available time to push to for the block by bit - block_bit_times = {} - # Iterated nodes starting at the first, from the node with the - # last time, preferring barriers over non-barriers - - def order_ops(item: Tuple[DAGNode, Tuple[int, int]]) -> Tuple[int, int, bool, int]: - """Iterated nodes ordering by channel, time and preferring that barriers are processed - first.""" - return ( - item[1][0], - -item[1][1], - not isinstance(item[0].op, Barrier), - self._get_duration(item[0], dag=self._block_idx_dag_map[item[1][0]]), - ) - - iterate_nodes = sorted(self._node_stop_time.items(), key=order_ops) - - new_node_start_time = {} - new_node_stop_time = {} - - def _calculate_new_times( - block: int, node: DAGNode, block_bit_times: Dict[int, Dict[Qubit, int]] - ) -> int: - max_block_time = min(block_bit_times[block][bit] for bit in self._map_qubits(node)) - - t0 = self._node_start_time[node][1] # pylint: disable=invalid-name - t1 = self._node_stop_time[node][1] # pylint: disable=invalid-name - # Determine how much to shift by - node_offset = max_block_time - t1 - new_t0 = t0 + node_offset - return new_t0 - - scheduled = set() - - def _update_time( - block: int, - node: DAGNode, - new_time: int, - block_bit_times: Dict[int, Dict[Qubit, int]], - ) -> None: - scheduled.add(node) - - new_node_start_time[node] = (block, new_time) - new_node_stop_time[node] = ( - block, - new_time + self._get_duration(node, dag=self._block_idx_dag_map[block]), - ) - - # Update available times by bit - for bit in self._map_qubits(node): - block_bit_times[block][bit] = new_time - - for node, ( - block, - _, - ) in iterate_nodes: # pylint: disable=invalid-name - # skip already scheduled - if node in scheduled: - continue - # Start with last time as the time to push to - if block not in block_bit_times: - block_bit_times[block] = {q: self._max_block_t1[block] for q in self._dag.wires} - - # Calculate the latest available time to push to collectively for tied nodes - tied_nodes = self._node_tied_to.get(node, None) - if tied_nodes is not None: - # Take the minimum time that will be schedulable - # self._node_tied_to includes the node itself. - new_times = [ - _calculate_new_times(block, tied_node, block_bit_times) - for tied_node in self._node_tied_to[node] - ] - new_time = min(new_times) - for tied_node in tied_nodes: - _update_time(block, tied_node, new_time, block_bit_times) - - else: - new_t0 = _calculate_new_times(block, node, block_bit_times) - _update_time(block, node, new_t0, block_bit_times) - - self._node_start_time = new_node_start_time - self._node_stop_time = new_node_stop_time diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py deleted file mode 100644 index bf7665cd1..000000000 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py +++ /dev/null @@ -1,287 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Utility functions for scheduling passes.""" - -import warnings -from typing import List, Generator, Optional, Tuple, Union - -from qiskit.circuit import ControlFlowOp, Measure, Reset, Parameter -from qiskit.dagcircuit import DAGCircuit, DAGOpNode -from qiskit.transpiler.instruction_durations import ( - InstructionDurations, - InstructionDurationsType, -) -from qiskit.transpiler.exceptions import TranspilerError - - -def block_order_op_nodes(dag: DAGCircuit) -> Generator[DAGOpNode, None, None]: - """Yield nodes such that they are sorted into groups of blocks that minimize synchronization. - - Measurements are also grouped. - """ - - def _is_grouped_measure(node: DAGOpNode) -> bool: - """Does this node need to be grouped?""" - return isinstance(node.op, (Reset, Measure)) - - def _is_block_trigger(node: DAGOpNode) -> bool: - """Does this node trigger the end of a block?""" - return isinstance(node.op, ControlFlowOp) - - def _emit( - node: DAGOpNode, - grouped_measure: List[DAGOpNode], - block_triggers: List[DAGOpNode], - ) -> bool: - """Should we emit this node?""" - for measure in grouped_measure: - if dag.is_predecessor(node, measure): - return True - for block_trigger in block_triggers: - if dag.is_predecessor(node, block_trigger): - return True - - return _is_grouped_measure(node) or _is_block_trigger(node) - - # Begin processing nodes in order - next_nodes = dag.topological_op_nodes() - while next_nodes: - curr_nodes = next_nodes # Setup the next iteration nodes - next_nodes_set = set() # Nodes that will make it into the next iteration - next_nodes = [] # Nodes to process in order in the next iteration - to_push = [] # Do we push this to the very last block? - yield_measures = [] # Measures/resets we will yield first - yield_block_triggers = [] # Followed by block triggers (conditionals) - block_break = False # Did we encounter a block trigger in this iteration? - for node in curr_nodes: - # If we have added this node to the next set of nodes - # skip for now. - if node in next_nodes_set: - next_nodes.append(node) - continue - - # If this nodes is a measurement - # push on the measurements to process - if _is_grouped_measure(node): - block_break = True - node_descendants = dag.descendants(node) - next_nodes_set |= set(node_descendants) - yield_measures.append(node) - # If this node is a block push this onto - # the block trigger list. - elif _is_block_trigger(node): - block_break = True - node_descendants = dag.descendants(node) - next_nodes_set |= set(node_descendants) - yield_block_triggers.append(node) - # Otherwise we push onto the final list of blocks to emit - # as part of the final block. - else: - to_push.append(node) - - new_to_push = [] - for node in to_push: - node_descendants = dag.descendants(node) - if any( - _emit(descendant, yield_measures, yield_block_triggers) - for descendant in node_descendants - if isinstance(descendant, DAGOpNode) - ): - yield node - else: - new_to_push.append(node) - - to_push = new_to_push - - # First emit the measurements which will feed - for node in yield_measures: - yield node - # Into the block triggers we will emit. - for node in yield_block_triggers: - yield node - - # We're at the last block and emit the final nodes - if not block_break: - for node in to_push: - yield node - break - # Otherwise emit the final nodes - # Add to the front of the list to be processed next - to_push.extend(next_nodes) - next_nodes = to_push - - -InstrKey = Union[ - Tuple[str, None, None], - Tuple[str, Tuple[int], None], - Tuple[str, Tuple[int], Tuple[Parameter]], -] - - -class DynamicCircuitInstructionDurations(InstructionDurations): - """For dynamic circuits the IBM Qiskit backend currently - reports instruction durations that differ compared with those - required for the legacy Qobj-based path. For now we use this - class to report updated InstructionDurations. - TODO: This would be mitigated by a specialized Backend/Target for - dynamic circuit backends. - """ - - MEASURE_PATCH_CYCLES = 160 - MEASURE_PATCH_ODD_OFFSET = 64 - - def __init__( - self, - instruction_durations: Optional[InstructionDurationsType] = None, - dt: float = None, - enable_patching: bool = True, - ): - """Dynamic circuit instruction durations.""" - self._enable_patching = enable_patching - super().__init__(instruction_durations=instruction_durations, dt=dt) - - def update( - self, inst_durations: Optional[InstructionDurationsType], dt: float = None - ) -> "DynamicCircuitInstructionDurations": - """Update self with inst_durations (inst_durations overwrite self). Overrides the default - durations for certain hardcoded instructions. - - Args: - inst_durations: Instruction durations to be merged into self (overwriting self). - dt: Sampling duration in seconds of the target backend. - - Returns: - InstructionDurations: The updated InstructionDurations. - - Raises: - TranspilerError: If the format of instruction_durations is invalid. - """ - - # First update as normal - super().update(inst_durations, dt=dt) - - if not self._enable_patching or inst_durations is None: - return self - - # Then update required instructions. This code is ugly - # because the InstructionDurations code is handling too many - # formats in update and this code must also. - if isinstance(inst_durations, InstructionDurations): - for key in inst_durations.keys(): - self._patch_instruction(key) - else: - for name, qubits, _, parameters, _ in inst_durations: - if isinstance(qubits, int): - qubits = [qubits] - - if isinstance(parameters, (int, float)): - parameters = [parameters] - - if qubits is None: - key = (name, None, None) - elif parameters is None: - key = (name, tuple(qubits), None) - else: - key = (name, tuple(qubits), tuple(parameters)) - - self._patch_instruction(key) - - return self - - def _patch_instruction(self, key: InstrKey) -> None: - """Dispatcher logic for instruction patches""" - name = key[0] - if name == "measure": - self._patch_measurement(key) - elif name == "reset": - self._patch_reset(key) - - def _patch_measurement(self, key: InstrKey) -> None: - """Patch measurement duration by extending duration by 160dt as temporarily - required by the dynamic circuit backend. - """ - prev_duration, unit = self._get_duration_dt(key) - if unit != "dt": - raise TranspilerError('Can currently only patch durations of "dt".') - odd_cycle_correction = self._get_odd_cycle_correction() - self._patch_key(key, prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, unit) - # Enforce patching of reset on measurement update - self._patch_reset(("reset", key[1], key[2])) - - def _patch_reset(self, key: InstrKey) -> None: - """Patch reset duration by extending duration by measurement patch as temporarily - required by the dynamic circuit backend. - """ - # We patch the reset to be the duration of the measurement if it - # is available as it currently - # triggers the end of scheduling after the measurement pulse - measure_key = ("measure", key[1], key[2]) - try: - measure_duration, unit = self._get_duration_dt(measure_key) - self._patch_key(key, measure_duration, unit) - except KeyError: - # Fall back to reset key if measure not available - prev_duration, unit = self._get_duration_dt(key) - if unit != "dt": - raise TranspilerError('Can currently only patch durations of "dt".') - odd_cycle_correction = self._get_odd_cycle_correction() - self._patch_key( - key, - prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, - unit, - ) - - def _get_duration_dt(self, key: InstrKey) -> Tuple[int, str]: - """Handling for the complicated structure of this class. - - TODO: This class implementation should be simplified in Qiskit. Too many edge cases. - """ - if key[1] is None and key[2] is None: - return self.duration_by_name[key[0]] - elif key[2] is None: - return self.duration_by_name_qubits[(key[0], key[1])] - - return self.duration_by_name_qubits_params[key] - - def _patch_key(self, key: InstrKey, duration: int, unit: str) -> None: - """Handling for the complicated structure of this class. - - TODO: This class implementation should be simplified in Qiskit. Too many edge cases. - """ - if key[1] is None and key[2] is None: - self.duration_by_name[key[0]] = (duration, unit) - elif key[2] is None: - self.duration_by_name_qubits[(key[0], key[1])] = (duration, unit) - - self.duration_by_name_qubits_params[key] = (duration, unit) - - def _get_odd_cycle_correction(self) -> int: - """Determine the amount of the odd cycle correction to apply - For devices with short gates with odd lenghts we add an extra 16dt to the measurement - - TODO: Eliminate the need for this correction - """ - key_pulse = "sx" - key_qubit = 0 - try: - key_duration = self.get(key_pulse, key_qubit, "dt") - except TranspilerError: - warnings.warn( - f"No {key_pulse} gate found for {key_qubit} for detection of " - "short odd gate lengths, default measurement timing will be used." - ) - key_duration = 160 # keyPulse gate not found - - if key_duration < 160 and key_duration % 32: - return self.MEASURE_PATCH_ODD_OFFSET - return 0 diff --git a/qiskit_ibm_runtime/transpiler/plugin.py b/qiskit_ibm_runtime/transpiler/plugin.py deleted file mode 100644 index 75f70cfe4..000000000 --- a/qiskit_ibm_runtime/transpiler/plugin.py +++ /dev/null @@ -1,98 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2022. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Plugin for IBM provider backend transpiler stages.""" - -from typing import Optional - -from qiskit.transpiler.passmanager import PassManager -from qiskit.transpiler.passmanager_config import PassManagerConfig -from qiskit.transpiler.preset_passmanagers.plugin import PassManagerStagePlugin -from qiskit.transpiler.preset_passmanagers import common -from qiskit.transpiler.passes import ConvertConditionsToIfOps - -from qiskit_ibm_provider.transpiler.passes.basis.convert_id_to_delay import ( - ConvertIdToDelay, -) - - -class IBMTranslationPlugin(PassManagerStagePlugin): - """A translation stage plugin for targeting Qiskit circuits - to IBM Quantum systems.""" - - def pass_manager( - self, - pass_manager_config: PassManagerConfig, - optimization_level: Optional[int] = None, - ) -> PassManager: - """Build IBMTranslationPlugin PassManager.""" - - translator_pm = common.generate_translation_passmanager( - target=pass_manager_config.target, - basis_gates=pass_manager_config.basis_gates, - approximation_degree=pass_manager_config.approximation_degree, - coupling_map=pass_manager_config.coupling_map, - backend_props=pass_manager_config.backend_properties, - unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, - unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, - hls_config=pass_manager_config.hls_config, - ) - - plugin_passes = [] - instruction_durations = pass_manager_config.instruction_durations - if instruction_durations: - plugin_passes.append(ConvertIdToDelay(instruction_durations)) - - return PassManager(plugin_passes) + translator_pm - - -class IBMDynamicTranslationPlugin(PassManagerStagePlugin): - """A translation stage plugin for targeting Qiskit circuits - to IBM Quantum systems.""" - - def pass_manager( - self, - pass_manager_config: PassManagerConfig, - optimization_level: Optional[int] = None, - ) -> PassManager: - """Build IBMTranslationPlugin PassManager.""" - - translator_pm = common.generate_translation_passmanager( - target=pass_manager_config.target, - basis_gates=pass_manager_config.basis_gates, - approximation_degree=pass_manager_config.approximation_degree, - coupling_map=pass_manager_config.coupling_map, - backend_props=pass_manager_config.backend_properties, - unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, - unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, - hls_config=pass_manager_config.hls_config, - ) - - instruction_durations = pass_manager_config.instruction_durations - plugin_passes = [] - if pass_manager_config.target is not None: - id_supported = "id" in pass_manager_config.target - else: - id_supported = "id" in pass_manager_config.basis_gates - - if instruction_durations and not id_supported: - plugin_passes.append(ConvertIdToDelay(instruction_durations)) - - # Only inject control-flow conversion pass at level 0 and level 1. As of - # qiskit 0.22.x transpile() with level 2 and 3 does not support - # control flow instructions (including if_else). This can be - # removed when higher optimization levels support control flow - # instructions. - if optimization_level in {0, 1}: - plugin_passes += [ConvertConditionsToIfOps()] - - return PassManager(plugin_passes) + translator_pm diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index 77cd51902..bec75ee44 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -13,13 +13,11 @@ """Tests for backend functions using real runtime service.""" from unittest import SkipTest, mock -from unittest.mock import patch from datetime import datetime, timedelta import copy from qiskit.transpiler.target import Target from qiskit import QuantumCircuit -from qiskit.providers.models import QasmBackendConfiguration from qiskit.providers.exceptions import QiskitBackendNotFoundError from qiskit.test.reference_circuits import ReferenceCircuits From e98456b45dffb09319c36338cf04dcef0147b226 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 09:38:59 +0000 Subject: [PATCH 24/47] Removed instance parameter from tests with backend.run() --- test/integration/test_ibm_job.py | 4 +--- test/integration/test_ibm_job_attributes.py | 4 +--- test/integration/test_ibm_qasm_simulator.py | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index f6fd9f170..1777d0f47 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -58,9 +58,7 @@ def setUpClass(cls, backend: IBMBackend, dependencies: IntegrationTestDependenci """Initial class level setup.""" # pylint: disable=arguments-differ super().setUpClass(dependencies=dependencies) - cls.sim_backend = dependencies.service.backend( - "ibmq_qasm_simulator", instance=dependencies.instance - ) + cls.sim_backend = dependencies.service.backend("ibmq_qasm_simulator") cls.real_device_backend = backend cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) cls.sim_job = cls.sim_backend.run(cls.bell) diff --git a/test/integration/test_ibm_job_attributes.py b/test/integration/test_ibm_job_attributes.py index 167586f4f..a94b72c50 100644 --- a/test/integration/test_ibm_job_attributes.py +++ b/test/integration/test_ibm_job_attributes.py @@ -60,9 +60,7 @@ def setUpClass(cls, dependencies: IntegrationTestDependencies) -> None: super().setUpClass() cls.dependencies = dependencies cls.service = dependencies.service - cls.sim_backend = dependencies.service.backend( - "ibmq_qasm_simulator", instance=dependencies.instance - ) + cls.sim_backend = dependencies.service.backend("ibmq_qasm_simulator") cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) cls.sim_job = cls.sim_backend.run(cls.bell) cls.last_week = datetime.now() - timedelta(days=7) diff --git a/test/integration/test_ibm_qasm_simulator.py b/test/integration/test_ibm_qasm_simulator.py index a03bb3697..c18964827 100644 --- a/test/integration/test_ibm_qasm_simulator.py +++ b/test/integration/test_ibm_qasm_simulator.py @@ -38,9 +38,7 @@ def setUp(self, backend: IBMBackend, dependencies: IntegrationTestDependencies) """Initial test setup.""" # pylint: disable=arguments-differ super().setUp() - self.sim_backend = self.service.backend( - "ibmq_qasm_simulator", instance=dependencies.instance - ) + self.sim_backend = self.service.backend("ibmq_qasm_simulator") self.real_device_backend = backend def test_execute_one_circuit_simulator_online(self): From 3806d0ad1fee076fa5a3d53b2d9df7dc805c350e Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 10:14:28 +0000 Subject: [PATCH 25/47] Removed instance from decorator --- test/decorators.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/decorators.py b/test/decorators.py index b81c9b750..007b7944e 100644 --- a/test/decorators.py +++ b/test/decorators.py @@ -187,12 +187,11 @@ def _wrapper(self, *args, **kwargs): if not staging: raise SkipTest("Tests not supported on staging.") if backend_name: - _backend = service.backend(name=backend_name, instance=dependencies.instance) + _backend = service.backend(name=backend_name) else: _backend = service.least_busy( min_num_qubits=min_num_qubits, simulator=simulator, - instance=dependencies.instance, ) if not _backend: # pylint: disable=broad-exception-raised From 191837ed236534d88d2211bb22133cd9eb6cc8c0 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 10:19:41 +0000 Subject: [PATCH 26/47] Changed test to run on quantum channel only --- test/integration/test_backend.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index bec75ee44..b53d59e06 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -234,6 +234,7 @@ def test_paused_backend_warning(self): with self.assertWarns(Warning): backend.run(ReferenceCircuits.bell()) + @quantum_only def test_backend_wrong_instance(self): """Test that an error is raised when retrieving a backend not in the instance.""" backends = self.service.backends() From ddcc32664805f6bd42b1b4057d930413d62cc896 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 10:20:31 +0000 Subject: [PATCH 27/47] Removed instance parameter when getting backend --- test/integration/test_ibm_job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index 1777d0f47..0ab89b85b 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -117,7 +117,7 @@ def test_run_multiple_simulator(self): def test_cancel(self): """Test job cancellation.""" # Find the most busy backend - backend = most_busy_backend(self.service, instance=self.dependencies.instance) + backend = most_busy_backend(self.service) submit_and_cancel(backend, self.log) def test_retrieve_jobs(self): @@ -371,7 +371,7 @@ def job_canceller(job_, exit_event, wait): def test_wait_for_final_state_timeout(self): """Test waiting for job to reach final state times out.""" - backend = most_busy_backend(TestIBMJob.service, instance=self.dependencies.instance) + backend = most_busy_backend(TestIBMJob.service) job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend)) try: self.assertRaises(RuntimeJobTimeoutError, job.wait_for_final_state, timeout=0.1) From 2f24d01aa5cfc0c20cd0a78db94038262b62d59c Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 11:29:47 +0000 Subject: [PATCH 28/47] lint --- test/integration/test_ibm_qasm_simulator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/test_ibm_qasm_simulator.py b/test/integration/test_ibm_qasm_simulator.py index c18964827..1cb074c84 100644 --- a/test/integration/test_ibm_qasm_simulator.py +++ b/test/integration/test_ibm_qasm_simulator.py @@ -36,6 +36,7 @@ class TestIBMQasmSimulator(IBMIntegrationTestCase): @integration_test_setup_with_backend(simulator=False) def setUp(self, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: """Initial test setup.""" + # pylint: disable=unused-argument # pylint: disable=arguments-differ super().setUp() self.sim_backend = self.service.backend("ibmq_qasm_simulator") From f900340260a26da19385ae25904647f3b842894a Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 12:04:26 +0000 Subject: [PATCH 29/47] Copied transpiler directory from the provider --- qiskit_ibm_runtime/transpiler/__init__.py | 31 + .../transpiler/passes/__init__.py | 36 + .../transpiler/passes/basis/__init__.py | 23 + .../passes/basis/convert_id_to_delay.py | 87 +++ .../transpiler/passes/scheduling/__init__.py | 397 +++++++++++ .../passes/scheduling/block_base_padder.py | 641 +++++++++++++++++ .../passes/scheduling/dynamical_decoupling.py | 575 +++++++++++++++ .../transpiler/passes/scheduling/pad_delay.py | 78 ++ .../transpiler/passes/scheduling/scheduler.py | 665 ++++++++++++++++++ .../transpiler/passes/scheduling/utils.py | 289 ++++++++ qiskit_ibm_runtime/transpiler/plugin.py | 98 +++ 11 files changed, 2920 insertions(+) create mode 100644 qiskit_ibm_runtime/transpiler/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py create mode 100644 qiskit_ibm_runtime/transpiler/plugin.py diff --git a/qiskit_ibm_runtime/transpiler/__init__.py b/qiskit_ibm_runtime/transpiler/__init__.py new file mode 100644 index 000000000..d6e62daa4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/__init__.py @@ -0,0 +1,31 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +==================================================================== +IBM Backend Transpiler Tools (:mod:`qiskit_ibm_provider.transpiler`) +==================================================================== + +A collection of transpiler tools for working with IBM Quantum's +next-generation backends that support advanced "dynamic circuit" +capabilities. Ie., circuits with support for classical +compute and control-flow/feedback based off of measurement results. + +Transpiler Passes +================== + +.. autosummary:: + :toctree: ../stubs/ + + passes + +""" diff --git a/qiskit_ibm_runtime/transpiler/passes/__init__.py b/qiskit_ibm_runtime/transpiler/passes/__init__.py new file mode 100644 index 000000000..2fe16514c --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/__init__.py @@ -0,0 +1,36 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +================================================================ +Transpiler Passes (:mod:`qiskit_ibm_provider.transpiler.passes`) +================================================================ + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes + +A collection of transpiler passes for IBM backends. + +.. autosummary:: + :toctree: ../stubs/ + + basis + scheduling + + +""" + +from .basis import ConvertIdToDelay + +# circuit scheduling +from .scheduling import ASAPScheduleAnalysis +from .scheduling import PadDynamicalDecoupling +from .scheduling import PadDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py new file mode 100644 index 000000000..0a71af010 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py @@ -0,0 +1,23 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +========================================================== +Basis (:mod:`qiskit_ibm_provider.transpiler.passes.basis`) +========================================================== + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes.basis + +Passes to layout circuits to IBM backend's instruction sets. +""" + +from .convert_id_to_delay import ConvertIdToDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py new file mode 100644 index 000000000..3906d9046 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py @@ -0,0 +1,87 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Pass to convert Id gate operations to a delay instruction.""" + +from typing import Dict + +from qiskit.converters import dag_to_circuit, circuit_to_dag + +from qiskit.circuit import ControlFlowOp +from qiskit.circuit import Delay +from qiskit.circuit.library import IGate +from qiskit.dagcircuit import DAGCircuit +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.instruction_durations import InstructionDurations + + +class ConvertIdToDelay(TransformationPass): + """Convert :class:`qiskit.circuit.library.standard_gates.IGate` to + a delay of the corresponding length. + """ + + def __init__(self, durations: InstructionDurations, gate: str = "sx"): + """Convert :class:`qiskit.circuit.library.IGate` to a + Convert :class:`qiskit.circuit.Delay`. + + Args: + duration: Duration of the delay to replace the identity gate with. + gate: Single qubit gate to extract duration from. + """ + self.durations = durations + self.gate = gate + self._cached_durations: Dict[int, int] = {} + + super().__init__() + + def run(self, dag: DAGCircuit) -> DAGCircuit: + self._run_inner(dag) + return dag + + def _run_inner(self, dag: DAGCircuit) -> bool: + """Run the pass on one :class:`.DAGCircuit`, mutating it. Returns ``True`` if the circuit + was modified and ``False`` if not.""" + modified = False + qubit_index_map = {bit: index for index, bit in enumerate(dag.qubits)} + for node in dag.op_nodes(): + if isinstance(node.op, ControlFlowOp): + modified_blocks = False + new_dags = [] + for block in node.op.blocks: + new_dag = circuit_to_dag(block) + modified_blocks |= self._run_inner(new_dag) + new_dags.append(new_dag) + if not modified_blocks: + continue + dag.substitute_node( + node, + node.op.replace_blocks(dag_to_circuit(block) for block in new_dags), + inplace=True, + ) + elif isinstance(node.op, IGate): + delay_op = Delay(self._get_duration(qubit_index_map[node.qargs[0]])) + dag.substitute_node(node, delay_op, inplace=True) + + modified = True + + return modified + + def _get_duration(self, qubit: int) -> int: + """Get the duration of a gate in dt.""" + duration = self._cached_durations.get(qubit, None) + if duration: + return duration + + duration = self.durations.get(self.gate, qubit) + self._cached_durations[qubit] = duration + + return duration diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py new file mode 100644 index 000000000..c3017e9bc --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py @@ -0,0 +1,397 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +==================================================================== +Scheduling (:mod:`qiskit_ibm_provider.transpiler.passes.scheduling`) +==================================================================== + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes.scheduling + +A collection of scheduling passes for working with IBM Quantum's next-generation +backends that support advanced "dynamic circuit" capabilities. Ie., +circuits with support for classical control-flow/feedback based off +of measurement results. + +.. warning:: + You should not mix these scheduling passes with Qiskit's builtin scheduling + passes as they will negatively interact with the scheduling routines for + dynamic circuits. This includes setting ``scheduling_method`` in + :func:`~qiskit.compiler.transpile` or + :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. + +Below we demonstrate how to schedule and pad a teleportation circuit with delays +for a dynamic circuit backend's execution model: + +.. jupyter-execute:: + + from qiskit.circuit import ClassicalRegister, QuantumCircuit, QuantumRegister + from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager + from qiskit.transpiler.passmanager import PassManager + + from qiskit_ibm_provider.transpiler.passes.scheduling import DynamicCircuitInstructionDurations + from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDelay + from qiskit.providers.fake_provider import FakeJakarta + + + backend = FakeJakarta() + + # Temporary workaround for mock backends. For real backends this is not required. + backend.configuration().basis_gates.append("if_else") + + + # Use this duration class to get appropriate durations for dynamic + # circuit backend scheduling + durations = DynamicCircuitInstructionDurations.from_backend(backend) + # Generate the main Qiskit transpile passes. + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + # Configure the as-late-as-possible scheduling pass + pm.scheduling = PassManager([ALAPScheduleAnalysis(durations), PadDelay()]) + + qr = QuantumRegister(3) + crz = ClassicalRegister(1, name="crz") + crx = ClassicalRegister(1, name="crx") + result = ClassicalRegister(1, name="result") + + teleport = QuantumCircuit(qr, crz, crx, result, name="Teleport") + + teleport.h(qr[1]) + teleport.cx(qr[1], qr[2]) + teleport.cx(qr[0], qr[1]) + teleport.h(qr[0]) + teleport.measure(qr[0], crz) + teleport.measure(qr[1], crx) + with teleport.if_test((crz, 1)): + teleport.z(qr[2]) + with teleport.if_test((crx, 1)): + teleport.x(qr[2]) + teleport.measure(qr[2], result) + + # Transpile. + scheduled_teleport = pm.run(teleport) + + scheduled_teleport.draw(output="mpl") + + +Instead of padding with delays we may also insert a dynamical decoupling sequence +using the :class:`PadDynamicalDecoupling` pass as shown below: + +.. jupyter-execute:: + + from qiskit.circuit.library import XGate + + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling + + + dd_sequence = [XGate(), XGate()] + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + dd_teleport = pm.run(teleport) + + dd_teleport.draw(output="mpl") + +When compiling a circuit with Qiskit, it is more efficient and more robust to perform all the +transformations in a single transpilation. This has been done above by extending Qiskit's preset +pass managers. For example, Qiskit's :func:`~qiskit.compiler.transpile` function internally builds +its pass set by using :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. +This returns instances of :class:`~qiskit.transpiler.StagedPassManager`, which can be extended. + + +Scheduling old format ``c_if`` conditioned gates +------------------------------------------------ + +Scheduling with old format ``c_if`` conditioned gates is not supported. + +.. jupyter-execute:: + + qc_c_if = QuantumCircuit(1, 1) + qc_c_if.x(0).c_if(0, 1) + qc_c_if.draw(output="mpl") + +The :class:`.IBMBackend` configures a translation plugin +:class:`.IBMTranslationPlugin` to automatically +apply transformations and optimizations for IBM hardware backends when invoking +:func:`~qiskit.compiler.transpile`. This will automatically convert all old style ``c_if`` +conditioned gates to new-style control-flow. +We may then schedule the transpiled circuit without further modification. + +.. jupyter-execute:: + + # Temporary workaround for mock backends. For real backends this is not required. + backend.get_translation_stage_plugin = lambda: "ibm_dynamic_circuits" + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + qc_if_dd = pm.run(qc_c_if, backend) + qc_if_dd.draw(output="mpl") + + +If you are not using the transpiler plugin stages to +work around this please manually run the pass +:class:`qiskit.transpiler.passes.ConvertConditionsToIfOps` +prior to your scheduling pass. + +.. jupyter-execute:: + + from qiskit.transpiler.passes import ConvertConditionsToIfOps + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ConvertConditionsToIfOps(), + ALAPScheduleAnalysis(durations), + PadDelay(), + ] + ) + + qc_if_dd = pm.run(qc_c_if) + qc_if_dd.draw(output="mpl") + + +Exploiting IBM backend's local parallel "fast-path" +--------------------------------------------------- + +IBM quantum hardware supports a localized "fast-path" which enables a block of gates +applied to a *single qubit* that are conditional on an immediately predecessor measurement +*of the same qubit* to be completed with lower latency. The hardware is also +able to do this in *parallel* on disjoint qubits that satisfy this condition. + +For example, the conditional gates below are performed in parallel with lower latency +as the measurements flow directly into the conditional blocks which in turn only apply +gates to the same measurement qubit. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + qc.x(0) + with qc.if_test((1, 1)): + qc.x(1) + + qc.draw(output="mpl") + + +The circuit below will not use the fast-path as the conditional gate is +on a different qubit than the measurement qubit. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(1) + + qc.draw(output="mpl") + +Similarly, the circuit below contains gates on multiple qubits +and will not be performed using the fast-path. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(0) + qc.x(1) + + qc.draw(output="mpl") + +A fast-path block may contain multiple gates as long as they are on the fast-path qubit. +If there are multiple fast-path blocks being performed in parallel each block will be +padded out to the duration of the longest block. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + qc.x(0) + # Will be padded out to a duration of 1600 on the backend. + with qc.if_test((1, 1)): + qc.delay(1600, 1) + + qc.draw(output="mpl") + +This behavior is also applied to the else condition of a fast-path eligible branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 1) + qc.measure(0, 0) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)) as else_: + qc.x(0) + # Will be padded out to a duration of 1600 on the backend. + with else_: + qc.delay(1600, 0) + + qc.draw(output="mpl") + + +If a single measurement result is used with several conditional blocks, if there is a fast-path +eligible block it will be applied followed by the non-fast-path blocks which will execute with +the standard higher latency conditional branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + # Uses fast-path + qc.x(0) + with qc.if_test((0, 1)): + # Does not use fast-path + qc.x(1) + + qc.draw(output="mpl") + +If you wish to prevent the usage of the fast-path you may insert a barrier between the measurement and +the conditional branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 2) + qc.measure(0, 0) + # Barrier prevents the fast-path. + qc.barrier() + with qc.if_test((0, 1)): + qc.x(0) + + qc.draw(output="mpl") + +Conditional measurements are not eligible for the fast-path. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + # Does not use the fast-path + qc.measure(0, 1) + + qc.draw(output="mpl") + +Similarly nested control-flow is not eligible. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 1) + qc.measure(0, 0) + with qc.if_test((0, 1)): + # Does not use the fast-path + qc.x(0) + with qc.if_test((0, 1)): + qc.x(0) + + qc.draw(output="mpl") + + +The scheduler is aware of the fast-path behavior and will not insert delays on idle qubits +in blocks that satisfy the fast-path conditions so as to avoid preventing the backend +compiler from performing the necessary optimizations to utilize the fast-path. If +there are fast-path blocks that will be performed in parallel they currently *will not* +be padded out by the scheduler to ensure they are of the same duration in Qiskit + +.. jupyter-execute:: + + dd_sequence = [XGate(), XGate()] + + pm = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + with qc.if_test((0, 1)): + qc.x(0) + # Is currently not padded to ensure + # a duration of 1000. If you desire + # this you would need to manually add + # qc.delay(840, 0) + with qc.if_test((1, 1)): + qc.delay(1000, 0) + + + qc.draw(output="mpl") + + qc_dd = pm.run(qc) + + qc_dd.draw(output="mpl") + +.. note:: + If there are qubits that are *not* involved in a fast-path decision it is not + currently possible to use them in a fast-path branch in parallel with the fast-path + qubits resulting from a measurement. This will be revised in the future as we + further improve these capabilities. + + For example: + + .. jupyter-execute:: + + qc = QuantumCircuit(3, 2) + qc.x(1) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(0) + # Qubit 1 sits idle throughout the fast-path decision + with qc.if_test((1, 0)): + # Qubit 2 is idle but there is no measurement + # to make it fast-path eligible. This will + # however avoid a communication event in the hardware + # since the condition is compile time evaluated. + qc.x(2) + + qc.draw(output="mpl") + + +Scheduling & Dynamical Decoupling +================================= +.. autosummary:: + :toctree: ../stubs/ + + BlockBasePadder + ALAPScheduleAnalysis + ASAPScheduleAnalysis + DynamicCircuitInstructionDurations + PadDelay + PadDynamicalDecoupling +""" + +from .block_base_padder import BlockBasePadder +from .dynamical_decoupling import PadDynamicalDecoupling +from .pad_delay import PadDelay +from .scheduler import ALAPScheduleAnalysis, ASAPScheduleAnalysis +from .utils import DynamicCircuitInstructionDurations diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py new file mode 100644 index 000000000..833bb5253 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py @@ -0,0 +1,641 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Padding pass to fill timeslots for IBM (dynamic circuit) backends.""" + +from typing import Dict, Iterable, List, Optional, Union, Set + +from qiskit.circuit import ( + Qubit, + Clbit, + ControlFlowOp, + Gate, + IfElseOp, + Instruction, + Measure, +) +from qiskit.circuit.bit import Bit +from qiskit.circuit.library import Barrier +from qiskit.circuit.delay import Delay +from qiskit.circuit.parameterexpression import ParameterExpression +from qiskit.converters import dag_to_circuit +from qiskit.dagcircuit import DAGCircuit, DAGNode +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.exceptions import TranspilerError + +from .utils import block_order_op_nodes + + +class BlockBasePadder(TransformationPass): + """The base class of padding pass. + + This pass requires one of scheduling passes to be executed before itself. + Since there are multiple scheduling strategies, the selection of scheduling + pass is left in the hands of the pass manager designer. + Once a scheduling analysis pass is run, ``node_start_time`` is generated + in the :attr:`property_set`. This information is represented by a python dictionary of + the expected instruction execution times keyed on the node instances. + The padding pass expects all ``DAGOpNode`` in the circuit to be scheduled. + + This base class doesn't define any sequence to interleave, but it manages + the location where the sequence is inserted, and provides a set of information necessary + to construct the proper sequence. Thus, a subclass of this pass just needs to implement + :meth:`_pad` method, in which the subclass constructs a circuit block to insert. + This mechanism removes lots of boilerplate logic to manage whole DAG circuits. + + Note that padding pass subclasses should define interleaving sequences satisfying: + + - Interleaved sequence does not change start time of other nodes + - Interleaved sequence should have total duration of the provided ``time_interval``. + + Any manipulation violating these constraints may prevent this base pass from correctly + tracking the start time of each instruction, + which may result in violation of hardware alignment constraints. + """ + + def __init__(self, schedule_idle_qubits: bool = False) -> None: + self._node_start_time = None + self._node_block_dags = None + self._idle_after: Optional[Dict[Qubit, int]] = None + self._root_dag = None + self._dag = None + self._block_dag = None + self._prev_node: Optional[DAGNode] = None + self._wire_map: Optional[Dict[Bit, Bit]] = None + self._block_duration = 0 + self._current_block_idx = 0 + self._conditional_block = False + self._bit_indices: Optional[Dict[Qubit, int]] = None + # Nodes that the scheduling of this node is tied to. + + self._last_node_to_touch: Optional[Dict[Qubit, DAGNode]] = None + # Last node to touch a bit + + self._fast_path_nodes: Set[DAGNode] = set() + + self._dirty_qubits: Set[Qubit] = set() + # Qubits that are dirty in the circuit. + self._schedule_idle_qubits = schedule_idle_qubits + self._idle_qubits: Set[Qubit] = set() + super().__init__() + + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the padding pass on ``dag``. + + Args: + dag: DAG to be checked. + + Returns: + DAGCircuit: DAG with idle time filled with instructions. + + Raises: + TranspilerError: When a particular node is not scheduled, likely some transform pass + is inserted before this node is called. + """ + if not self._schedule_idle_qubits: + self._idle_qubits = set( + wire for wire in dag.idle_wires() if isinstance(wire, Qubit) + ) + self._pre_runhook(dag) + + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + new_dag = self._visit_block(dag, wire_map) + + return new_dag + + def _init_run(self, dag: DAGCircuit) -> None: + """Setup for initial run.""" + self._node_start_time = self.property_set["node_start_time"].copy() + self._node_block_dags = self.property_set["node_block_dags"] + self._idle_after = {bit: 0 for bit in dag.qubits} + self._current_block_idx = 0 + self._conditional_block = False + self._block_duration = 0 + + # Prepare DAG to pad + self._root_dag = dag + self._dag = self._empty_dag_like(dag) + self._block_dag = self._dag + self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} + self._last_node_to_touch = {} + self._fast_path_nodes = set() + self._dirty_qubits = set() + + self.property_set["node_start_time"].clear() + self._prev_node = None + self._wire_map = {} + + def _empty_dag_like( + self, + dag: DAGCircuit, + pad_wires: bool = True, + wire_map: Optional[Dict[Qubit, Qubit]] = None, + ignore_idle: bool = False, + ) -> DAGCircuit: + """Create an empty dag like the input dag.""" + new_dag = DAGCircuit() + + # Ensure *all* registers are included from the input circuit + # so that they are scheduled in sub-blocks + + # The top-level QuantumCircuit has the full registers available + # Control flow blocks do not get the full register added to the + # block but just the bits. When testing for equivalency the register + # information is taken into account. To work around this we try to + # while enabling generic handling of QuantumCircuits we + # add the register if available and otherwise add the bits directly. + # We need this work around as otherwise the padded circuit will + # not be equivalent to one written manually as bits will not + # be defined on registers like in the test case. + + source_wire_dag = self._root_dag if pad_wires else dag + + # trivial wire map if not provided, or if the top-level dag is used + if not wire_map or pad_wires: + wire_map = {wire: wire for wire in source_wire_dag.wires} + if dag.qregs and self._schedule_idle_qubits or not ignore_idle: + for qreg in source_wire_dag.qregs.values(): + new_dag.add_qreg(qreg) + else: + new_dag.add_qubits( + [ + wire_map[qubit] + for qubit in source_wire_dag.qubits + if qubit not in self._idle_qubits or not ignore_idle + ] + ) + + # Don't add root cargs as these will not be padded. + # Just focus on current block dag. + if dag.cregs: + for creg in dag.cregs.values(): + new_dag.add_creg(creg) + else: + new_dag.add_clbits(dag.clbits) + + new_dag.name = dag.name + new_dag.metadata = dag.metadata + new_dag.unit = self.property_set["time_unit"] or "dt" + if new_dag.unit != "dt": + raise TranspilerError( + 'All blocks must have time units of "dt". ' + "Please run TimeUnitConversion pass prior to padding." + ) + + new_dag.calibrations = dag.calibrations + new_dag.global_phase = dag.global_phase + return new_dag + + def _pre_runhook(self, dag: DAGCircuit) -> None: + """Extra routine inserted before running the padding pass. + + Args: + dag: DAG circuit on which the sequence is applied. + + Raises: + TranspilerError: If the whole circuit or instruction is not scheduled. + """ + if "node_start_time" not in self.property_set: + raise TranspilerError( + f"The input circuit {dag.name} is not scheduled. Call one of scheduling passes " + f"before running the {self.__class__.__name__} pass." + ) + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + """Interleave instruction sequence in between two nodes. + + .. note:: + If a DAGOpNode is added here, it should update node_start_time property + in the property set so that the added node is also scheduled. + This is achieved by adding operation via :meth:`_apply_scheduled_op`. + + .. note:: + + This method doesn't check if the total duration of new DAGOpNode added here + is identical to the interval (``t_end - t_start``). + A developer of the pass must guarantee this is satisfied. + If the duration is greater than the interval, your circuit may be + compiled down to the target code with extra duration on the backend compiler, + which is then played normally without error. However, the outcome of your circuit + might be unexpected due to erroneous scheduling. + + Args: + block_idx: Execution block index for this node. + qubit: The wire that the sequence is applied on. + t_start: Absolute start time of this interval. + t_end: Absolute end time of this interval. + next_node: Node that follows the sequence. + prev_node: Node ahead of the sequence. + """ + raise NotImplementedError + + def _get_node_duration(self, node: DAGNode) -> int: + """Get the duration of a node.""" + if node.op.condition_bits or isinstance(node.op, ControlFlowOp): + # As we cannot currently schedule through conditionals model + # as zero duration to avoid padding. + return 0 + + indices = [self._bit_indices[qarg] for qarg in self._map_wires(node.qargs)] + + if self._block_dag.has_calibration_for(node): + # If node has calibration, this value should be the highest priority + cal_key = tuple(indices), tuple(float(p) for p in node.op.params) + duration = self._block_dag.calibrations[node.op.name][cal_key].duration + else: + duration = node.op.duration + + if isinstance(duration, ParameterExpression): + raise TranspilerError( + f"Parameterized duration ({duration}) " + f"of {node.op.name} on qubits {indices} is not bounded." + ) + if duration is None: + raise TranspilerError( + f"Duration of {node.op.name} on qubits {indices} is not found." + ) + + return duration + + def _needs_block_terminating_barrier( + self, prev_node: DAGNode, curr_node: DAGNode + ) -> bool: + # Only barrier if not in fast-path nodes + is_fast_path_node = curr_node in self._fast_path_nodes + + def _is_terminating_barrier(node: DAGNode) -> bool: + return ( + isinstance(node.op, (Barrier, ControlFlowOp)) + and len(node.qargs) == self._block_dag.num_qubits() + ) + + return not ( + prev_node is None + or ( + isinstance(prev_node.op, ControlFlowOp) + and isinstance(curr_node.op, ControlFlowOp) + ) + or _is_terminating_barrier(prev_node) + or _is_terminating_barrier(curr_node) + or is_fast_path_node + ) + + def _add_block_terminating_barrier( + self, block_idx: int, time: int, current_node: DAGNode, force: bool = False + ) -> None: + """Add a block terminating barrier to prevent topological ordering slide by. + + TODO: Fix by ensuring control-flow is a block terminator in the core circuit IR. + """ + # Only add a barrier to the end if a viable barrier is not already present on all qubits + # Only barrier if not in fast-path nodes + needs_terminating_barrier = True + if not force: + needs_terminating_barrier = self._needs_block_terminating_barrier( + self._prev_node, current_node + ) + + if needs_terminating_barrier: + # Terminate with a barrier to ensure topological ordering does not slide past + if self._schedule_idle_qubits: + barrier = Barrier(self._block_dag.num_qubits()) + qubits = self._block_dag.qubits + else: + barrier = Barrier(self._block_dag.num_qubits() - len(self._idle_qubits)) + qubits = [ + x for x in self._block_dag.qubits if x not in self._idle_qubits + ] + + barrier_node = self._apply_scheduled_op( + block_idx, + time, + barrier, + qubits, + [], + ) + barrier_node.op.duration = 0 + + def _visit_block( + self, + block: DAGCircuit, + wire_map: Dict[Qubit, Qubit], + pad_wires: bool = True, + ignore_idle: bool = False, + ) -> DAGCircuit: + # Push the previous block dag onto the stack + prev_node = self._prev_node + self._prev_node = None + prev_wire_map, self._wire_map = self._wire_map, wire_map + + prev_block_dag = self._block_dag + self._block_dag = new_block_dag = self._empty_dag_like( + block, pad_wires, wire_map=wire_map, ignore_idle=ignore_idle + ) + + self._block_duration = 0 + self._conditional_block = False + + for node in block_order_op_nodes(block): + self._visit_node(node) + + # Terminate the block to pad it after scheduling. + prev_block_duration = self._block_duration + prev_block_idx = self._current_block_idx + self._terminate_block(self._block_duration, self._current_block_idx) + + # Edge-case: Add a barrier if the final node is a fast-path + if self._prev_node in self._fast_path_nodes: + self._add_block_terminating_barrier( + prev_block_duration, prev_block_idx, self._prev_node, force=True + ) + + # Pop the previous block dag off the stack restoring it + self._block_dag = prev_block_dag + self._prev_node = prev_node + self._wire_map = prev_wire_map + + return new_block_dag + + def _visit_node(self, node: DAGNode) -> None: + if isinstance(node.op, ControlFlowOp): + if isinstance(node.op, IfElseOp): + self._visit_if_else_op(node) + else: + self._visit_control_flow_op(node) + elif node in self._node_start_time: + if isinstance(node.op, Delay): + self._visit_delay(node) + else: + self._visit_generic(node) + else: + raise TranspilerError( + f"Operation {repr(node)} is likely added after the circuit is scheduled. " + "Schedule the circuit again if you transformed it." + ) + self._prev_node = node + + def _visit_if_else_op(self, node: DAGNode) -> None: + """check if is fast-path eligible otherwise fall back + to standard ControlFlowOp handling.""" + + if self._will_use_fast_path(node): + self._fast_path_nodes.add(node) + self._visit_control_flow_op(node) + + def _will_use_fast_path(self, node: DAGNode) -> bool: + """Check if this conditional operation will be scheduled on the fastpath. + This will happen if + 1. This operation is a direct descendent of a current measurement block to be flushed + 2. The operation only operates on the qubit that is measured. + """ + # Verify IfElseOp has a direct measurement predecessor + condition_bits = node.op.condition_bits + # Fast-path valid only with a single bit. + if not condition_bits or len(condition_bits) > 1: + return False + + bit = condition_bits[0] + last_node, last_node_dag = self._last_node_to_touch.get(bit, (None, None)) + + last_node_in_block = last_node_dag is self._block_dag + + if not ( + last_node_in_block + and isinstance(last_node.op, Measure) + and set(self._map_wires(node.qargs)) + == set(self._map_wires(last_node.qargs)) + ): + return False + + # Fast path contents are limited to gates and delays + for block in node.op.blocks: + if not all( + isinstance(inst.operation, (Gate, Delay)) for inst in block.data + ): + return False + return True + + def _visit_control_flow_op(self, node: DAGNode) -> None: + """Visit a control-flow node to pad.""" + + # Control-flow terminator ends scheduling of block currently + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + self._terminate_block(t0, block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + # Only pad non-fast path nodes + fast_path_node = node in self._fast_path_nodes + + # TODO: This is a hack required to tie nodes of control-flow + # blocks across the scheduler and block_base_padder. This is + # because the current control flow nodes store the block as a + # circuit which is not hashable. For processing we are currently + # required to convert each circuit block to a dag which is inefficient + # and causes node relationships stored in analysis to be lost between + # passes as we are constantly recreating the block dags. + # We resolve this here by extracting the cached dag blocks that were + # stored by the scheduling pass. + new_node_block_dags = [] + for block_idx, _ in enumerate(node.op.blocks): + block_dag = self._node_block_dags[node][block_idx] + inner_wire_map = { + inner: outer + for outer, inner in zip( + self._map_wires(node.qargs + node.cargs), + block_dag.qubits + block_dag.clbits, + ) + } + new_node_block_dags.append( + self._visit_block( + block_dag, + pad_wires=not fast_path_node, + wire_map=inner_wire_map, + ignore_idle=True, + ) + ) + + # Build new control-flow operation containing scheduled blocks + # and apply to the DAG. + new_control_flow_op = node.op.replace_blocks( + dag_to_circuit(block) for block in new_node_block_dags + ) + # Enforce that this control-flow operation contains all wires since it has now been padded + # such that each qubit is scheduled within each block. Don't added all cargs as these will not + # be padded. + if fast_path_node: + padded_qubits = node.qargs + elif not self._schedule_idle_qubits: + padded_qubits = [ + q for q in self._block_dag.qubits if q not in self._idle_qubits + ] + else: + padded_qubits = self._block_dag.qubits + self._apply_scheduled_op( + block_idx, + t0, + new_control_flow_op, + padded_qubits, + self._map_wires(node.cargs), + ) + + def _visit_delay(self, node: DAGNode) -> None: + """The padding class considers a delay instruction as idle time + rather than instruction. Delay node is not added so that + we can extract non-delay predecessors. + """ + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + # Trigger the end of a block + if block_idx > self._current_block_idx: + self._terminate_block(self._block_duration, self._current_block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + self._conditional_block = bool(node.op.condition_bits) + + self._current_block_idx = block_idx + + t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name + self._block_duration = max(self._block_duration, t1) + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node to pad.""" + # Note: t0 is the relative time with respect to the current block specified + # by block_idx. + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + + # Trigger the end of a block + if block_idx > self._current_block_idx: + self._terminate_block(self._block_duration, self._current_block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + # This block will not be padded as it is conditional. + # See TODO below. + self._conditional_block = bool(node.op.condition_bits) + + # Now set the current block index. + self._current_block_idx = block_idx + + t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name + self._block_duration = max(self._block_duration, t1) + + for bit in self._map_wires(node.qargs): + if bit in self._idle_qubits: + continue + # Fill idle time with some sequence + if t0 - self._idle_after.get(bit, 0) > 0: + # Find previous node on the wire, i.e. always the latest node on the wire + prev_node = next( + self._block_dag.predecessors(self._block_dag.output_map[bit]) + ) + self._pad( + block_idx=block_idx, + qubit=bit, + t_start=self._idle_after[bit], + t_end=t0, + next_node=node, + prev_node=prev_node, + ) + + self._idle_after[bit] = t1 + + if not isinstance(node.op, (Barrier, Delay)): + self._dirty_qubits |= set(self._map_wires(node.qargs)) + + new_node = self._apply_scheduled_op( + block_idx, + t0, + node.op, + self._map_wires(node.qargs), + self._map_wires(node.cargs), + ) + self._last_node_to_touch.update( + { + bit: (new_node, self._block_dag) + for bit in new_node.qargs + new_node.cargs + } + ) + + def _terminate_block(self, block_duration: int, block_idx: int) -> None: + """Terminate the end of a block scheduling region.""" + # Update all other qubits as not idle so that delays are *not* + # inserted. This is because we need the delays to be inserted in + # the conditional circuit block. + self._block_duration = 0 + self._pad_until_block_end(block_duration, block_idx) + self._idle_after = {bit: 0 for bit in self._block_dag.qubits} + + def _pad_until_block_end(self, block_duration: int, block_idx: int) -> None: + # Add delays until the end of circuit. + for bit in self._block_dag.qubits: + if bit in self._idle_qubits: + continue + idle_after = self._idle_after.get(bit, 0) + if block_duration - idle_after > 0: + node = self._block_dag.output_map[bit] + prev_node = next(self._block_dag.predecessors(node)) + self._pad( + block_idx=block_idx, + qubit=bit, + t_start=idle_after, + t_end=block_duration, + next_node=node, + prev_node=prev_node, + ) + + def _apply_scheduled_op( + self, + block_idx: int, + t_start: int, + oper: Instruction, + qubits: Union[Qubit, Iterable[Qubit]], + clbits: Union[Clbit, Iterable[Clbit]] = (), + ) -> DAGNode: + """Add new operation to DAG with scheduled information. + + This is identical to apply_operation_back + updating the node_start_time propety. + + Args: + block_idx: Execution block index for this node. + t_start: Start time of new node. + oper: New operation that is added to the DAG circuit. + qubits: The list of qubits that the operation acts on. + clbits: The list of clbits that the operation acts on. + + Returns: + The DAGNode applied to. + """ + if isinstance(qubits, Qubit): + qubits = [qubits] + if isinstance(clbits, Clbit): + clbits = [clbits] + + new_node = self._block_dag.apply_operation_back(oper, qubits, clbits) + self.property_set["node_start_time"][new_node] = (block_idx, t_start) + return new_node + + def _map_wires(self, wires: Iterable[Bit]) -> List[Bit]: + """Map the wires from the current block to the top-level block's wires. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + return [self._wire_map[w] for w in wires] diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py new file mode 100644 index 000000000..acae82ce4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py @@ -0,0 +1,575 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Dynamical decoupling insertion pass for IBM (dynamic circuit) backends.""" + +import warnings +from typing import Dict, List, Optional, Union + +import numpy as np +import rustworkx as rx +from qiskit.circuit import Qubit, Gate +from qiskit.circuit.delay import Delay +from qiskit.circuit.library.standard_gates import IGate, UGate, U3Gate +from qiskit.circuit.reset import Reset +from qiskit.dagcircuit import DAGCircuit, DAGNode, DAGInNode, DAGOpNode +from qiskit.quantum_info.operators.predicates import matrix_equal +from qiskit.quantum_info.synthesis import OneQubitEulerDecomposer +from qiskit.transpiler.exceptions import TranspilerError +from qiskit.transpiler.instruction_durations import InstructionDurations +from qiskit.transpiler.passes.optimization import Optimize1qGates +from qiskit.transpiler import CouplingMap + +from .block_base_padder import BlockBasePadder + + +class PadDynamicalDecoupling(BlockBasePadder): + """Dynamical decoupling insertion pass for IBM dynamic circuit backends. + + This pass works on a scheduled, physical circuit. It scans the circuit for + idle periods of time (i.e. those containing delay instructions) and inserts + a DD sequence of gates in those spots. These gates amount to the identity, + so do not alter the logical action of the circuit, but have the effect of + mitigating decoherence in those idle periods. + As a special case, the pass allows a length-1 sequence (e.g. [XGate()]). + In this case the DD insertion happens only when the gate inverse can be + absorbed into a neighboring gate in the circuit (so we would still be + replacing Delay with something that is equivalent to the identity). + This can be used, for instance, as a Hahn echo. + This pass ensures that the inserted sequence preserves the circuit exactly + (including global phase). + + .. jupyter-execute:: + + import numpy as np + from qiskit.circuit import QuantumCircuit + from qiskit.circuit.library import XGate + from qiskit.transpiler import PassManager, InstructionDurations + from qiskit.visualization import timeline_drawer + + from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling + + circ = QuantumCircuit(4) + circ.h(0) + circ.cx(0, 1) + circ.cx(1, 2) + circ.cx(2, 3) + circ.measure_all() + durations = InstructionDurations( + [("h", 0, 50), ("cx", [0, 1], 700), ("reset", None, 10), + ("cx", [1, 2], 200), ("cx", [2, 3], 300), + ("x", None, 50), ("measure", None, 1000)] + ) + + .. jupyter-execute:: + + # balanced X-X sequence on all qubits + dd_sequence = [XGate(), XGate()] + pm = PassManager([ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence)]) + circ_dd = pm.run(circ) + circ_dd.draw() + + .. jupyter-execute:: + + # Uhrig sequence on qubit 0 + n = 8 + dd_sequence = [XGate()] * n + def uhrig_pulse_location(k): + return np.sin(np.pi * (k + 1) / (2 * n + 2)) ** 2 + spacings = [] + for k in range(n): + spacings.append(uhrig_pulse_location(k) - sum(spacings)) + spacings.append(1 - sum(spacings)) + pm = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence, qubits=[0], spacings=spacings), + ] + ) + circ_dd = pm.run(circ) + circ_dd.draw() + + .. note:: + + You need to call + :class:`~qiskit_ibm_provider.transpiler.passes.scheduling.ALAPScheduleAnalysis` + before running dynamical decoupling to guarantee your circuit satisfies acquisition + alignment constraints for dynamic circuit backends. + """ + + def __init__( + self, + durations: InstructionDurations, + dd_sequences: Union[List[Gate], List[List[Gate]]], + qubits: Optional[List[int]] = None, + spacings: Optional[Union[List[List[float]], List[float]]] = None, + skip_reset_qubits: bool = True, + pulse_alignment: int = 16, + extra_slack_distribution: str = "middle", + sequence_min_length_ratios: Optional[Union[int, List[int]]] = None, + insert_multiple_cycles: bool = False, + coupling_map: CouplingMap = None, + alt_spacings: Optional[Union[List[List[float]], List[float]]] = None, + schedule_idle_qubits: bool = False, + ): + """Dynamical decoupling initializer. + + Args: + durations: Durations of instructions to be used in scheduling. + dd_sequences: Sequence of gates to apply in idle spots. + Alternatively a list of gate sequences may be supplied that + will preferentially be inserted if there is a delay of sufficient + duration. This may be tuned by the optionally supplied + ``sequence_min_length_ratios``. + qubits: Physical qubits on which to apply DD. + If None, all qubits will undergo DD (when possible). + spacings: A list of lists of spacings between the DD gates. + The available slack will be divided according to this. + The list length must be one more than the length of dd_sequence, + and the elements must sum to 1. If None, a balanced spacing + will be used [d/2, d, d, ..., d, d, d/2]. This spacing only + applies to the first subcircuit, if a ``coupling_map`` is + specified + skip_reset_qubits: If True, does not insert DD on idle periods that + immediately follow initialized/reset qubits + (as qubits in the ground state are less susceptible to decoherence). + pulse_alignment: The hardware constraints for gate timing allocation. + This is usually provided from ``backend.configuration().timing_constraints``. + If provided, the delay length, i.e. ``spacing``, is implicitly adjusted to + satisfy this constraint. + extra_slack_distribution: The option to control the behavior of DD sequence generation. + The duration of the DD sequence should be identical to an idle time in the + scheduled quantum circuit, however, the delay in between gates comprising the sequence + should be integer number in units of dt, and it might be further truncated + when ``pulse_alignment`` is specified. This sometimes results in the duration of + the created sequence being shorter than the idle time + that you want to fill with the sequence, i.e. `extra slack`. + This option takes following values. + + * "middle": Put the extra slack to the interval at the middle of the sequence. + * "edges": Divide the extra slack as evenly as possible into + intervals at beginning and end of the sequence. + sequence_min_length_ratios: List of minimum delay length to DD sequence ratio to satisfy + in order to insert the DD sequence. For example if the X-X dynamical decoupling sequence + is 320dt samples long and the available delay is 384dt it has a ratio of 384dt/320dt=1.2. + From the perspective of dynamical decoupling this is likely to add more control noise + than decoupling error rate reductions. The defaults value is 2.0. + insert_multiple_cycles: If the available duration exceeds + 2*sequence_min_length_ratio*duration(dd_sequence) enable the insertion of multiple + rounds of the dynamical decoupling sequence in that delay. + coupling_map: directed graph representing the coupling map for the device. Specifying a + coupling map partitions the device into subcircuits, in order to apply DD sequences + with different pulse spacings within each. Currently support 2 subcircuits. + alt_spacings: A list of lists of spacings between the DD gates, for the second subcircuit, + as determined by the coupling map. If None, a balanced spacing that is staggered with + respect to the first subcircuit will be used [d, d, d, ..., d, d, 0]. + schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. + This is useful for timeline visualizations, but may cause issues + for execution on large backends. + Raises: + TranspilerError: When invalid DD sequence is specified. + TranspilerError: When pulse gate with the duration which is + non-multiple of the alignment constraint value is found. + TranspilerError: When the coupling map is not supported (i.e., if degree > 3) + """ + + super().__init__(schedule_idle_qubits=schedule_idle_qubits) + self._durations = durations + + # Enforce list of DD sequences + if dd_sequences: + try: + iter(dd_sequences[0]) + except TypeError: + dd_sequences = [dd_sequences] + self._dd_sequences = dd_sequences + self._qubits = qubits + self._skip_reset_qubits = skip_reset_qubits + self._alignment = pulse_alignment + self._coupling_map = coupling_map + self._coupling_coloring = None + + if spacings is not None: + try: + iter(spacings[0]) # type: ignore + except TypeError: + spacings = [spacings] # type: ignore + if alt_spacings is not None: + try: + iter(alt_spacings[0]) # type: ignore + except TypeError: + alt_spacings = [alt_spacings] # type: ignore + self._spacings = spacings + self._alt_spacings = alt_spacings + + if self._spacings and len(self._spacings) != len(self._dd_sequences): + raise TranspilerError( + "Number of sequence spacings must equal number of DD sequences." + ) + + if self._alt_spacings: + if not self._coupling_map: + warnings.warn( + "Alternate spacings are ignored because a coupling map was not provided" + ) + elif len(self._alt_spacings) != len(self._dd_sequences): + raise TranspilerError( + "Number of alternate sequence spacings must equal number of DD sequences." + ) + + self._extra_slack_distribution = extra_slack_distribution + + self._dd_sequence_lengths: Dict[Qubit, List[List[Gate]]] = {} + self._sequence_phase = 0 + + if sequence_min_length_ratios is None: + # Use 2.0 as a sane default + self._sequence_min_length_ratios = [2.0 for _ in self._dd_sequences] + else: + try: + iter(sequence_min_length_ratios) # type: ignore + except TypeError: + sequence_min_length_ratios = [sequence_min_length_ratios] # type: ignore + self._sequence_min_length_ratios = sequence_min_length_ratios # type: ignore + + if len(self._sequence_min_length_ratios) != len(self._dd_sequences): + raise TranspilerError( + "Number of sequence lengths must equal number of DD sequences." + ) + + self._insert_multiple_cycles = insert_multiple_cycles + + def _pre_runhook(self, dag: DAGCircuit) -> None: + super()._pre_runhook(dag) + + if self._coupling_map: + physical_qubits = [dag.qubits.index(q) for q in dag.qubits] + subgraph = self._coupling_map.graph.subgraph(physical_qubits) + self._coupling_coloring = rx.graph_greedy_color(subgraph.to_undirected()) + if any(c > 1 for c in self._coupling_coloring.values()): + raise TranspilerError( + "This circuit topology is not supported for staggered dynamical decoupling." + "The maximum connectivity is 3 nearest neighbors per qubit." + ) + + spacings_required = self._spacings is None + if spacings_required: + self._spacings = [] # type: ignore + alt_spacings_required = ( + self._alt_spacings is None and self._coupling_map is not None + ) + if alt_spacings_required: + self._alt_spacings = [] # type: ignore + + for seq_idx, seq in enumerate(self._dd_sequences): + num_pulses = len(self._dd_sequences[seq_idx]) + + # Check if physical circuit is given + if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: + raise TranspilerError("DD runs on physical circuits only.") + + # Set default spacing otherwise validate user input + if spacings_required: + mid = 1 / num_pulses + end = mid / 2 + self._spacings.append([end] + [mid] * (num_pulses - 1) + [end]) # type: ignore + else: + if sum(self._spacings[seq_idx]) != 1 or any( # type: ignore + a < 0 for a in self._spacings[seq_idx] # type: ignore + ): + raise TranspilerError( + "The spacings must be given in terms of fractions " + "of the slack period and sum to 1." + ) + + if self._coupling_map: + if alt_spacings_required: + mid = 1 / num_pulses + self._alt_spacings.append([mid] * num_pulses + [0]) # type: ignore + else: + if sum(self._alt_spacings[seq_idx]) != 1 or any( # type: ignore + a < 0 for a in self._alt_spacings[seq_idx] # type: ignore + ): + raise TranspilerError( + "The spacings must be given in terms of fractions " + "of the slack period and sum to 1." + ) + + # Check if DD sequence is identity + if num_pulses != 1: + if num_pulses % 2 != 0: + raise TranspilerError( + "DD sequence must contain an even number of gates (or 1)." + ) + # TODO: this check should use the quantum info package in Qiskit. + noop = np.eye(2) + for gate in self._dd_sequences[seq_idx]: + noop = noop.dot(gate.to_matrix()) + if not matrix_equal(noop, IGate().to_matrix(), ignore_phase=True): + raise TranspilerError( + "The DD sequence does not make an identity operation." + ) + self._sequence_phase = np.angle(noop[0][0]) + + # Precompute qubit-wise DD sequence length for performance + for qubit in dag.qubits: + seq_length_ = [] + if qubit not in self._dd_sequence_lengths: + self._dd_sequence_lengths[qubit] = [] + + physical_index = dag.qubits.index(qubit) + if self._qubits and physical_index not in self._qubits: + continue + + for index, gate in enumerate(seq): + try: + # Check calibration. + gate_length = dag.calibrations[gate.name][ + (physical_index, gate.params) + ] + if gate_length % self._alignment != 0: + # This is necessary to implement lightweight scheduling logic for this pass. + # Usually the pulse alignment constraint and pulse data chunk size take + # the same value, however, we can intentionally violate this pattern + # at the gate level. For example, we can create a schedule consisting of + # a pi-pulse of 32 dt followed by a post buffer, i.e. delay, of 4 dt + # on the device with 16 dt constraint. Note that the pi-pulse length + # is multiple of 16 dt but the gate length of 36 is not multiple of it. + # Such pulse gate should be excluded. + raise TranspilerError( + f"Pulse gate {gate.name} with length non-multiple of {self._alignment} " + f"is not acceptable in {self.__class__.__name__} pass." + ) + except KeyError: + gate_length = self._durations.get(gate, physical_index) + seq_length_.append(gate_length) + # Update gate duration. + # This is necessary for current timeline drawer, i.e. scheduled. + + if hasattr( + gate, "to_mutable" + ): # TODO this check can be removed after Qiskit 1.0, as it is always True + gate = gate.to_mutable() + seq[index] = gate + gate.duration = gate_length + self._dd_sequence_lengths[qubit].append(seq_length_) + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + # This routine takes care of the pulse alignment constraint for the DD sequence. + # Note that the alignment constraint acts on the t0 of the DAGOpNode. + # Now this constrained scheduling problem is simplified to the problem of + # finding a delay amount which is a multiple of the constraint value by assuming + # that the duration of every DAGOpNode is also a multiple of the constraint value. + # + # For example, given the constraint value of 16 and XY4 with 160 dt gates. + # Here we assume current interval is 992 dt. + # + # relative spacing := [0.125, 0.25, 0.25, 0.25, 0.125] + # slack = 992 dt - 4 x 160 dt = 352 dt + # + # unconstrained sequence: 44dt-X1-88dt-Y2-88dt-X3-88dt-Y4-44dt + # constrained sequence : 32dt-X1-80dt-Y2-80dt-X3-80dt-Y4-32dt + extra slack 48 dt + # + # Now we evenly split extra slack into start and end of the sequence. + # The distributed slack should be multiple of 16. + # Start = +16, End += 32 + # + # final sequence : 48dt-X1-80dt-Y2-80dt-X3-80dt-Y4-64dt / in total 992 dt + # + # Now we verify t0 of every node starts from multiple of 16 dt. + # + # X1: 48 dt (3 x 16 dt) + # Y2: 48 dt + 160 dt + 80 dt = 288 dt (18 x 16 dt) + # Y3: 288 dt + 160 dt + 80 dt = 528 dt (33 x 16 dt) + # Y4: 368 dt + 160 dt + 80 dt = 768 dt (48 x 16 dt) + # + # As you can see, constraints on t0 are all satified without explicit scheduling. + time_interval = t_end - t_start + + if self._qubits and self._block_dag.qubits.index(qubit) not in self._qubits: + # Target physical qubit is not the target of this DD sequence. + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return + + if ( + not isinstance(prev_node, DAGInNode) + and self._skip_reset_qubits + and isinstance(prev_node.op, Reset) + and qubit in prev_node.qargs + ): + self._dirty_qubits.remove(qubit) + + if qubit not in self._dirty_qubits: + # Previous node is the start edge or reset, i.e. qubit is ground state. + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return + + for sequence_idx, _ in enumerate(self._dd_sequences): + dd_sequence = self._dd_sequences[sequence_idx] + seq_lengths = self._dd_sequence_lengths[qubit][sequence_idx] + seq_length = np.sum(seq_lengths) + seq_ratio = self._sequence_min_length_ratios[sequence_idx] + spacings = self._spacings[sequence_idx] + alt_spacings = ( + np.asarray(self._alt_spacings[sequence_idx]) + if self._coupling_map + else None + ) + + # Verify the delay duration exceeds the minimum time to insert + if time_interval / seq_length <= seq_ratio: + continue + + if self._insert_multiple_cycles: + num_sequences = max(int(time_interval // (seq_length * seq_ratio)), 1) + if (num_sequences % 2 == 1) and len(dd_sequence) == 1: + warnings.warn( + "Sequence would result in an odd number of DD cycles with original DD " + "sequence of length 1. This may result in non-identity sequence insertion " + "and so are defaulting to 1 cycle insertion." + ) + num_sequences = 1 + else: + num_sequences = 1 + + # multiple dd sequences may be inserted + if num_sequences > 1: + dd_sequence = list(dd_sequence) * num_sequences + seq_lengths = seq_lengths * num_sequences + seq_length = np.sum(seq_lengths) + spacings = spacings * num_sequences + + spacings = np.asarray(spacings) / num_sequences + slack = time_interval - seq_length + sequence_gphase = self._sequence_phase + + if slack <= 0: + continue + + if len(dd_sequence) == 1: + # Special case of using a single gate for DD + u_inv = dd_sequence[0].inverse().to_matrix() + theta, phi, lam, phase = OneQubitEulerDecomposer().angles_and_phase( + u_inv + ) + if isinstance(next_node, DAGOpNode) and isinstance( + next_node.op, (UGate, U3Gate) + ): + # Absorb the inverse into the successor (from left in circuit) + theta_r, phi_r, lam_r = next_node.op.params + next_node.op.params = Optimize1qGates.compose_u3( + theta_r, phi_r, lam_r, theta, phi, lam + ) + sequence_gphase += phase + elif isinstance(prev_node, DAGOpNode) and isinstance( + prev_node.op, (UGate, U3Gate) + ): + # Absorb the inverse into the predecessor (from right in circuit) + theta_l, phi_l, lam_l = prev_node.op.params + prev_node.op.params = Optimize1qGates.compose_u3( + theta, phi, lam, theta_l, phi_l, lam_l + ) + sequence_gphase += phase + else: + # Don't do anything if there's no single-qubit gate to absorb the inverse + self._apply_scheduled_op( + block_idx, + t_start, + Delay(time_interval, self._block_dag.unit), + qubit, + ) + return + + def _constrained_length(values: np.array) -> np.array: + return self._alignment * np.floor(values / self._alignment) + + if self._coupling_map: + if self._coupling_coloring[self._dag.qubits.index(qubit)] == 0: + sub_spacings = spacings + else: + sub_spacings = alt_spacings + else: + sub_spacings = spacings + + # (1) Compute DD intervals satisfying the constraint + taus = _constrained_length(slack * sub_spacings) + extra_slack = slack - np.sum(taus) + # (2) Distribute extra slack + if self._extra_slack_distribution == "middle": + mid_ind = int((len(taus) - 1) / 2) + to_middle = _constrained_length(extra_slack) + taus[mid_ind] += to_middle + if extra_slack - to_middle: + # If to_middle is not a multiple value of the pulse alignment, + # it is truncated to the nearest multiple value and + # the rest of slack is added to the end. + taus[-1] += extra_slack - to_middle + elif self._extra_slack_distribution == "edges": + to_begin_edge = _constrained_length(extra_slack / 2) + taus[0] += to_begin_edge + taus[-1] += extra_slack - to_begin_edge + else: + raise TranspilerError( + f"Option extra_slack_distribution = {self._extra_slack_distribution} is invalid." + ) + + # (3) Construct DD sequence with delays + idle_after = t_start + dd_ind = 0 + # Interleave delays with DD sequence operations + for tau_idx, tau in enumerate(taus): + if tau > 0: + self._apply_scheduled_op( + block_idx, idle_after, Delay(tau, self._dag.unit), qubit + ) + idle_after += tau + + # Detect if we are on a sequence boundary + # If so skip insert of sequence to allow delays to combine + # There are two cases. + # 1. The number of delays to be inserted is equal to the number of gates. + # 2. There is an extra delay inserted after the last operation. + # The condition below handles both. + seq_length = int(len(taus) / num_sequences) + if len(dd_sequence) == len(taus) or tau_idx % seq_length != ( + seq_length - 1 + ): + gate = dd_sequence[dd_ind] + gate_length = seq_lengths[dd_ind] + self._apply_scheduled_op(block_idx, idle_after, gate, qubit) + idle_after += gate_length + dd_ind += 1 + + self._block_dag.global_phase = ( + self._block_dag.global_phase + sequence_gphase + ) + return + + # DD could not be applied, delay instead + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py new file mode 100644 index 000000000..fd61f8c49 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py @@ -0,0 +1,78 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Padding pass to insert Delay into empty timeslots for dynamic circuit backends.""" + +from qiskit.circuit import Qubit +from qiskit.circuit.delay import Delay +from qiskit.dagcircuit import DAGNode, DAGOutNode + +from .block_base_padder import BlockBasePadder + + +class PadDelay(BlockBasePadder): + """Padding idle time with Delay instructions. + + Consecutive delays will be merged in the output of this pass. + + .. code-block::python + + durations = InstructionDurations([("x", None, 160), ("cx", None, 800)]) + + qc = QuantumCircuit(2) + qc.delay(100, 0) + qc.x(1) + qc.cx(0, 1) + + The ASAP-scheduled circuit output may become + + .. parsed-literal:: + + ┌────────────────┐ + q_0: ┤ Delay(160[dt]) ├──■── + └─────┬───┬──────┘┌─┴─┐ + q_1: ──────┤ X ├───────┤ X ├ + └───┘ └───┘ + + Note that the additional idle time of 60dt on the ``q_0`` wire coming from the duration difference + between ``Delay`` of 100dt (``q_0``) and ``XGate`` of 160 dt (``q_1``) is absorbed in + the delay instruction on the ``q_0`` wire, i.e. in total 160 dt. + + See :class:`BlockBasePadder` pass for details. + """ + + def __init__(self, fill_very_end: bool = True, schedule_idle_qubits: bool = False): + """Create new padding delay pass. + + Args: + fill_very_end: Set ``True`` to fill the end of circuit with delay. + schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. + This is useful for timeline visualizations, but may cause issues for execution + on large backends. + """ + super().__init__(schedule_idle_qubits=schedule_idle_qubits) + self.fill_very_end = fill_very_end + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + if not self.fill_very_end and isinstance(next_node, DAGOutNode): + return + + time_interval = t_end - t_start + self._apply_scheduled_op(block_idx, t_start, Delay(time_interval, "dt"), qubit) diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py new file mode 100644 index 000000000..2a2f6c57e --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py @@ -0,0 +1,665 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Scheduler for dynamic circuit backends.""" + +from abc import abstractmethod +from typing import Dict, List, Optional, Union, Set, Tuple +import itertools + +import qiskit +from qiskit.circuit.parameterexpression import ParameterExpression +from qiskit.converters import circuit_to_dag +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.passes.scheduling.time_unit_conversion import TimeUnitConversion + +from qiskit.circuit import Barrier, Clbit, ControlFlowOp, Measure, Qubit, Reset +from qiskit.circuit.bit import Bit +from qiskit.dagcircuit import DAGCircuit, DAGNode +from qiskit.transpiler.exceptions import TranspilerError + +from .utils import block_order_op_nodes + + +class BaseDynamicCircuitAnalysis(TransformationPass): + """Base class for scheduling analysis + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def __init__( + self, durations: qiskit.transpiler.instruction_durations.InstructionDurations + ) -> None: + """Scheduler for dynamic circuit backends. + + Args: + durations: Durations of instructions to be used in scheduling. + """ + self._durations = durations + + self._dag: Optional[DAGCircuit] = None + self._block_dag: Optional[DAGCircuit] = None + self._wire_map: Optional[Dict[Bit, Bit]] = None + self._node_mapped_wires: Optional[Dict[DAGNode, List[Bit]]] = None + self._node_block_dags: Dict[DAGNode, DAGCircuit] = {} + # Mapping of control-flow nodes to their containing blocks + self._block_idx_dag_map: Dict[int, DAGCircuit] = {} + # Mapping of block indices to the respective DAGCircuit + + self._current_block_idx = 0 + self._max_block_t1: Optional[Dict[int, int]] = None + # Track as we build to avoid extra pass + self._control_flow_block = False + self._node_start_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None + self._node_stop_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None + self._bit_stop_times: Optional[Dict[int, Dict[Union[Qubit, Clbit], int]]] = None + # Dictionary of blocks each containing a dictionary with the key for each bit + # in the block and its value being the final time of the bit within the block. + self._current_block_measures: Set[DAGNode] = set() + self._current_block_measures_has_reset: bool = False + self._node_tied_to: Optional[Dict[DAGNode, Set[DAGNode]]] = None + # Nodes that the scheduling of this node is tied to. + self._bit_indices: Optional[Dict[Qubit, int]] = None + + self._time_unit_converter = TimeUnitConversion(durations) + + super().__init__() + + @property + def _current_block_bit_times(self) -> Dict[Union[Qubit, Clbit], int]: + return self._bit_stop_times[self._current_block_idx] + + def _visit_block(self, block: DAGCircuit, wire_map: Dict[Qubit, Qubit]) -> None: + # Push the previous block dag onto the stack + prev_block_dag = self._block_dag + self._block_dag = block + prev_wire_map, self._wire_map = self._wire_map, wire_map + + # We must run this on the individual block + # as the current implementation does not recurse + # into the circuit structure. + self._time_unit_converter.run(block) + self._begin_new_circuit_block() + + for node in block_order_op_nodes(block): + self._visit_node(node) + + # Final flush + self._flush_measures() + + # Pop the previous block dag off the stack restoring it + self._block_dag = prev_block_dag + self._wire_map = prev_wire_map + + def _visit_node(self, node: DAGNode) -> None: + if isinstance(node.op, ControlFlowOp): + self._visit_control_flow_op(node) + elif node.op.condition_bits: + raise TranspilerError( + "c_if control-flow is not supported by this pass. " + 'Please apply "ConvertConditionsToIfOps" to convert these ' + "conditional operations to new-style Qiskit control-flow." + ) + else: + if isinstance(node.op, Measure): + self._visit_measure(node) + elif isinstance(node.op, Reset): + self._visit_reset(node) + else: + self._visit_generic(node) + + def _visit_control_flow_op(self, node: DAGNode) -> None: + # TODO: This is a hack required to tie nodes of control-flow + # blocks across the scheduler and block_base_padder. This is + # because the current control flow nodes store the block as a + # circuit which is not hashable. For processing we are currently + # required to convert each circuit block to a dag which is inefficient + # and causes node relationships stored in analysis to be lost between + # passes as we are constantly recreating the block dags. + # We resolve this here by caching these dags in the property set. + self._node_block_dags[node] = node_block_dags = [] + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + # Duration is 0 as we do not schedule across terminator + t1 = t0 # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + for block in node.op.blocks: + self._control_flow_block = True + + new_dag = circuit_to_dag(block) + inner_wire_map = { + inner: outer + for outer, inner in zip( + self._map_wires(node), new_dag.qubits + new_dag.clbits + ) + } + node_block_dags.append(new_dag) + self._visit_block(new_dag, inner_wire_map) + + # Begin new block for exit to "then" block. + self._begin_new_circuit_block() + + @abstractmethod + def _visit_measure(self, node: DAGNode) -> None: + raise NotImplementedError + + @abstractmethod + def _visit_reset(self, node: DAGNode) -> None: + raise NotImplementedError + + @abstractmethod + def _visit_generic(self, node: DAGNode) -> None: + raise NotImplementedError + + def _init_run(self, dag: DAGCircuit) -> None: + """Setup for initial run.""" + + self._dag = dag + self._block_dag = None + self._wire_map = {wire: wire for wire in dag.wires} + self._node_mapped_wires = {} + self._node_block_dags = {} + self._block_idx_dag_map = {} + + self._current_block_idx = 0 + self._max_block_t1 = {} + self._control_flow_block = False + + if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: + raise TranspilerError("ASAP schedule runs on physical circuits only") + + self._node_start_time = {} + self._node_stop_time = {} + self._bit_stop_times = {0: {q: 0 for q in dag.qubits + dag.clbits}} + self._current_block_measures = set() + self._current_block_measures_has_reset = False + self._node_tied_to = {} + self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} + + def _get_duration(self, node: DAGNode, dag: Optional[DAGCircuit] = None) -> int: + if node.op.condition_bits or isinstance(node.op, ControlFlowOp): + # As we cannot currently schedule through conditionals model + # as zero duration to avoid padding. + return 0 + + indices = [self._bit_indices[qarg] for qarg in self._map_qubits(node)] + + # Fall back to current block dag if not specified. + dag = dag or self._block_dag + + if dag.has_calibration_for(node): + # If node has calibration, this value should be the highest priority + cal_key = tuple(indices), tuple(float(p) for p in node.op.params) + duration = dag.calibrations[node.op.name][cal_key].duration + node.op.duration = duration + else: + duration = node.op.duration + + if isinstance(duration, ParameterExpression): + raise TranspilerError( + f"Parameterized duration ({duration}) " + f"of {node.op.name} on qubits {indices} is not bounded." + ) + if duration is None: + raise TranspilerError( + f"Duration of {node.op.name} on qubits {indices} is not found." + ) + + return duration + + def _update_bit_times( # pylint: disable=invalid-name + self, node: DAGNode, t0: int, t1: int, update_cargs: bool = True + ) -> None: + self._max_block_t1[self._current_block_idx] = max( + self._max_block_t1.get(self._current_block_idx, 0), t1 + ) + + update_bits = self._map_wires(node) if update_cargs else self._map_qubits(node) + for bit in update_bits: + self._current_block_bit_times[bit] = t1 + + self._node_start_time[node] = (self._current_block_idx, t0) + self._node_stop_time[node] = (self._current_block_idx, t1) + + def _begin_new_circuit_block(self) -> None: + """Create a new timed circuit block completing the previous block.""" + self._current_block_idx += 1 + self._block_idx_dag_map[self._current_block_idx] = self._block_dag + self._control_flow_block = False + self._bit_stop_times[self._current_block_idx] = { + self._wire_map[wire]: 0 for wire in self._block_dag.wires + } + self._flush_measures() + + def _flush_measures(self) -> None: + """Flush currently accumulated measurements by resetting block measures.""" + for node in self._current_block_measures: + self._node_tied_to[node] = self._current_block_measures.copy() + + self._current_block_measures = set() + self._current_block_measures_has_reset = False + + def _current_block_measure_qargs(self) -> Set[Qubit]: + return set( + qarg + for measure in self._current_block_measures + for qarg in self._map_qubits(measure) + ) + + def _check_flush_measures(self, node: DAGNode) -> None: + if self._current_block_measure_qargs() & set(self._map_qubits(node)): + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + + def _map_wires(self, node: DAGNode) -> List[Qubit]: + """Map the wires from the current node to the top-level block's wires. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + if node not in self._node_mapped_wires: + self._node_mapped_wires[node] = wire_map = [ + self._wire_map[q] for q in node.qargs + node.cargs + ] + return wire_map + + return self._node_mapped_wires[node] + + def _map_qubits(self, node: DAGNode) -> List[Qubit]: + """Map the qubits from the current node to the top-level block's qubits. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + return [wire for wire in self._map_wires(node) if isinstance(wire, Qubit)] + + +class ASAPScheduleAnalysis(BaseDynamicCircuitAnalysis): + """Dynamic circuits as-soon-as-possible (ASAP) scheduling analysis pass. + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + In its current form this is similar to Qiskit's ASAP scheduler in which instructions + start as early as possible. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the ALAPSchedule pass on `dag`. + Args: + dag (DAGCircuit): DAG to schedule. + Raises: + TranspilerError: if the circuit is not mapped on physical qubits. + TranspilerError: if conditional bit is added to non-supported instruction. + Returns: + The scheduled DAGCircuit. + """ + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + self._visit_block(dag, wire_map) + + self.property_set["node_start_time"] = self._node_start_time + self.property_set["node_block_dags"] = self._node_block_dags + return dag + + def _visit_measure(self, node: DAGNode) -> None: + """Visit a measurement node. + + Measurement currently triggers the end of a deterministically scheduled block + of instructions in IBM dynamic circuits hardware. + This means that it is possible to schedule *up to* a measurement (and during its pulses) + but the measurement will be followed by a period of indeterminism. + All measurements on disjoint qubits that topologically follow another + measurement will be collected and performed in parallel. A measurement on a qubit + intersecting with the set of qubits to be measured in parallel will trigger the + end of a scheduling block with said measurement occurring in a following block + which begins another grouping sequence. This behavior will change in future + backend software updates.""" + + current_block_measure_qargs = self._current_block_measure_qargs() + # We handle a set of qubits here as _visit_reset currently calls + # this method and a reset may have multiple qubits. + measure_qargs = set(self._map_qubits(node)) + + t0q = max( + self._current_block_bit_times[q] for q in measure_qargs + ) # pylint: disable=invalid-name + + # If the measurement qubits overlap, we need to flush measurements and start a + # new scheduling block. + if current_block_measure_qargs & measure_qargs: + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + t0q = 0 + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + else: + # Otherwise we need to increment all measurements to start at the same time within the block. + t0q = max( # pylint: disable=invalid-name + itertools.chain( + [t0q], + ( + self._node_start_time[measure][1] + for measure in self._current_block_measures + ), + ) + ) + + # Insert this measure into the block + self._current_block_measures.add(node) + + for measure in self._current_block_measures: + t0 = t0q # pylint: disable=invalid-name + bit_indices = { + bit: index for index, bit in enumerate(self._block_dag.qubits) + } + measure_duration = self._durations.get( + Measure(), + [bit_indices[qarg] for qarg in self._map_qubits(measure)], + unit="dt", + ) + t1 = t0 + measure_duration # pylint: disable=invalid-name + self._update_bit_times(measure, t0, t1) + + def _visit_reset(self, node: DAGNode) -> None: + """Visit a reset node. + + Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware + as conditional reset is performed internally using a c_if. This means that it is + possible to schedule *up to* a reset (and during its measurement pulses) + but the reset will be followed by a period of conditional indeterminism. + All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. + """ + # Process as measurement + self._current_block_measures_has_reset = True + self._visit_measure(node) + # Then set that we are now a conditional node. + self._control_flow_block = True + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node such as a gate or barrier.""" + op_duration = self._get_duration(node) + + # If the measurement qubits overlap, we need to flush the measurement group + self._check_flush_measures(node) + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + t1 = t0 + op_duration # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + +class ALAPScheduleAnalysis(BaseDynamicCircuitAnalysis): + """Dynamic circuits as-late-as-possible (ALAP) scheduling analysis pass. + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + In its current form this is similar to Qiskit's ALAP scheduler in which instructions + start as late as possible. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def run(self, dag: DAGCircuit) -> None: + """Run the ASAPSchedule pass on `dag`. + Args: + dag (DAGCircuit): DAG to schedule. + Raises: + TranspilerError: if the circuit is not mapped on physical qubits. + TranspilerError: if conditional bit is added to non-supported instruction. + Returns: + The scheduled DAGCircuit. + """ + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + self._visit_block(dag, wire_map) + self._push_block_durations() + self.property_set["node_start_time"] = self._node_start_time + self.property_set["node_block_dags"] = self._node_block_dags + return dag + + def _visit_measure(self, node: DAGNode) -> None: + """Visit a measurement node. + + Measurement currently triggers the end of a deterministically scheduled block + of instructions in IBM dynamic circuits hardware. + This means that it is possible to schedule *up to* a measurement (and during its pulses) + but the measurement will be followed by a period of indeterminism. + All measurements on disjoint qubits that topologically follow another + measurement will be collected and performed in parallel. A measurement on a qubit + intersecting with the set of qubits to be measured in parallel will trigger the + end of a scheduling block with said measurement occurring in a following block + which begins another grouping sequence. This behavior will change in future + backend software updates.""" + + current_block_measure_qargs = self._current_block_measure_qargs() + # We handle a set of qubits here as _visit_reset currently calls + # this method and a reset may have multiple qubits. + measure_qargs = set(self._map_qubits(node)) + + t0q = max( + self._current_block_bit_times[q] for q in measure_qargs + ) # pylint: disable=invalid-name + + # If the measurement qubits overlap, we need to flush measurements and start a + # new scheduling block. + if current_block_measure_qargs & measure_qargs: + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + t0q = 0 + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + else: + # Otherwise we need to increment all measurements to start at the same time within the block. + t0q = max( # pylint: disable=invalid-name + itertools.chain( + [t0q], + ( + self._node_start_time[measure][1] + for measure in self._current_block_measures + ), + ) + ) + + # Insert this measure into the block + self._current_block_measures.add(node) + + for measure in self._current_block_measures: + t0 = t0q # pylint: disable=invalid-name + bit_indices = { + bit: index for index, bit in enumerate(self._block_dag.qubits) + } + measure_duration = self._durations.get( + Measure(), + [bit_indices[qarg] for qarg in self._map_qubits(measure)], + unit="dt", + ) + t1 = t0 + measure_duration # pylint: disable=invalid-name + self._update_bit_times(measure, t0, t1) + + def _visit_reset(self, node: DAGNode) -> None: + """Visit a reset node. + + Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware + as conditional reset is performed internally using a c_if. This means that it is + possible to schedule *up to* a reset (and during its measurement pulses) + but the reset will be followed by a period of conditional indeterminism. + All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. + """ + # Process as measurement + self._current_block_measures_has_reset = True + self._visit_measure(node) + # Then set that we are now a conditional node. + self._control_flow_block = True + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node such as a gate or barrier.""" + + # If True we are coming from a conditional block. + # start a new block for the unconditional operations. + if self._control_flow_block: + self._begin_new_circuit_block() + + op_duration = self._get_duration(node) + + # If the measurement qubits overlap, we need to flush the measurement group + self._check_flush_measures(node) + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + t1 = t0 + op_duration # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + def _push_block_durations(self) -> None: + """After scheduling of each block, pass over and push the times of all nodes.""" + + # Store the next available time to push to for the block by bit + block_bit_times = {} + # Iterated nodes starting at the first, from the node with the + # last time, preferring barriers over non-barriers + + def order_ops( + item: Tuple[DAGNode, Tuple[int, int]] + ) -> Tuple[int, int, bool, int]: + """Iterated nodes ordering by channel, time and preferring that barriers are processed + first.""" + return ( + item[1][0], + -item[1][1], + not isinstance(item[0].op, Barrier), + self._get_duration(item[0], dag=self._block_idx_dag_map[item[1][0]]), + ) + + iterate_nodes = sorted(self._node_stop_time.items(), key=order_ops) + + new_node_start_time = {} + new_node_stop_time = {} + + def _calculate_new_times( + block: int, node: DAGNode, block_bit_times: Dict[int, Dict[Qubit, int]] + ) -> int: + max_block_time = min( + block_bit_times[block][bit] for bit in self._map_qubits(node) + ) + + t0 = self._node_start_time[node][1] # pylint: disable=invalid-name + t1 = self._node_stop_time[node][1] # pylint: disable=invalid-name + # Determine how much to shift by + node_offset = max_block_time - t1 + new_t0 = t0 + node_offset + return new_t0 + + scheduled = set() + + def _update_time( + block: int, + node: DAGNode, + new_time: int, + block_bit_times: Dict[int, Dict[Qubit, int]], + ) -> None: + scheduled.add(node) + + new_node_start_time[node] = (block, new_time) + new_node_stop_time[node] = ( + block, + new_time + self._get_duration(node, dag=self._block_idx_dag_map[block]), + ) + + # Update available times by bit + for bit in self._map_qubits(node): + block_bit_times[block][bit] = new_time + + for node, ( + block, + _, + ) in iterate_nodes: # pylint: disable=invalid-name + # skip already scheduled + if node in scheduled: + continue + # Start with last time as the time to push to + if block not in block_bit_times: + block_bit_times[block] = { + q: self._max_block_t1[block] for q in self._dag.wires + } + + # Calculate the latest available time to push to collectively for tied nodes + tied_nodes = self._node_tied_to.get(node, None) + if tied_nodes is not None: + # Take the minimum time that will be schedulable + # self._node_tied_to includes the node itself. + new_times = [ + _calculate_new_times(block, tied_node, block_bit_times) + for tied_node in self._node_tied_to[node] + ] + new_time = min(new_times) + for tied_node in tied_nodes: + _update_time(block, tied_node, new_time, block_bit_times) + + else: + new_t0 = _calculate_new_times(block, node, block_bit_times) + _update_time(block, node, new_t0, block_bit_times) + + self._node_start_time = new_node_start_time + self._node_stop_time = new_node_stop_time diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py new file mode 100644 index 000000000..b2bc81737 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py @@ -0,0 +1,289 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Utility functions for scheduling passes.""" + +import warnings +from typing import List, Generator, Optional, Tuple, Union + +from qiskit.circuit import ControlFlowOp, Measure, Reset, Parameter +from qiskit.dagcircuit import DAGCircuit, DAGOpNode +from qiskit.transpiler.instruction_durations import ( + InstructionDurations, + InstructionDurationsType, +) +from qiskit.transpiler.exceptions import TranspilerError + + +def block_order_op_nodes(dag: DAGCircuit) -> Generator[DAGOpNode, None, None]: + """Yield nodes such that they are sorted into groups of blocks that minimize synchronization. + + Measurements are also grouped. + """ + + def _is_grouped_measure(node: DAGOpNode) -> bool: + """Does this node need to be grouped?""" + return isinstance(node.op, (Reset, Measure)) + + def _is_block_trigger(node: DAGOpNode) -> bool: + """Does this node trigger the end of a block?""" + return isinstance(node.op, ControlFlowOp) + + def _emit( + node: DAGOpNode, + grouped_measure: List[DAGOpNode], + block_triggers: List[DAGOpNode], + ) -> bool: + """Should we emit this node?""" + for measure in grouped_measure: + if dag.is_predecessor(node, measure): + return True + for block_trigger in block_triggers: + if dag.is_predecessor(node, block_trigger): + return True + + return _is_grouped_measure(node) or _is_block_trigger(node) + + # Begin processing nodes in order + next_nodes = dag.topological_op_nodes() + while next_nodes: + curr_nodes = next_nodes # Setup the next iteration nodes + next_nodes_set = set() # Nodes that will make it into the next iteration + next_nodes = [] # Nodes to process in order in the next iteration + to_push = [] # Do we push this to the very last block? + yield_measures = [] # Measures/resets we will yield first + yield_block_triggers = [] # Followed by block triggers (conditionals) + block_break = False # Did we encounter a block trigger in this iteration? + for node in curr_nodes: + # If we have added this node to the next set of nodes + # skip for now. + if node in next_nodes_set: + next_nodes.append(node) + continue + + # If this nodes is a measurement + # push on the measurements to process + if _is_grouped_measure(node): + block_break = True + node_descendants = dag.descendants(node) + next_nodes_set |= set(node_descendants) + yield_measures.append(node) + # If this node is a block push this onto + # the block trigger list. + elif _is_block_trigger(node): + block_break = True + node_descendants = dag.descendants(node) + next_nodes_set |= set(node_descendants) + yield_block_triggers.append(node) + # Otherwise we push onto the final list of blocks to emit + # as part of the final block. + else: + to_push.append(node) + + new_to_push = [] + for node in to_push: + node_descendants = dag.descendants(node) + if any( + _emit(descendant, yield_measures, yield_block_triggers) + for descendant in node_descendants + if isinstance(descendant, DAGOpNode) + ): + yield node + else: + new_to_push.append(node) + + to_push = new_to_push + + # First emit the measurements which will feed + for node in yield_measures: + yield node + # Into the block triggers we will emit. + for node in yield_block_triggers: + yield node + + # We're at the last block and emit the final nodes + if not block_break: + for node in to_push: + yield node + break + # Otherwise emit the final nodes + # Add to the front of the list to be processed next + to_push.extend(next_nodes) + next_nodes = to_push + + +InstrKey = Union[ + Tuple[str, None, None], + Tuple[str, Tuple[int], None], + Tuple[str, Tuple[int], Tuple[Parameter]], +] + + +class DynamicCircuitInstructionDurations(InstructionDurations): + """For dynamic circuits the IBM Qiskit backend currently + reports instruction durations that differ compared with those + required for the legacy Qobj-based path. For now we use this + class to report updated InstructionDurations. + TODO: This would be mitigated by a specialized Backend/Target for + dynamic circuit backends. + """ + + MEASURE_PATCH_CYCLES = 160 + MEASURE_PATCH_ODD_OFFSET = 64 + + def __init__( + self, + instruction_durations: Optional[InstructionDurationsType] = None, + dt: float = None, + enable_patching: bool = True, + ): + """Dynamic circuit instruction durations.""" + self._enable_patching = enable_patching + super().__init__(instruction_durations=instruction_durations, dt=dt) + + def update( + self, inst_durations: Optional[InstructionDurationsType], dt: float = None + ) -> "DynamicCircuitInstructionDurations": + """Update self with inst_durations (inst_durations overwrite self). Overrides the default + durations for certain hardcoded instructions. + + Args: + inst_durations: Instruction durations to be merged into self (overwriting self). + dt: Sampling duration in seconds of the target backend. + + Returns: + InstructionDurations: The updated InstructionDurations. + + Raises: + TranspilerError: If the format of instruction_durations is invalid. + """ + + # First update as normal + super().update(inst_durations, dt=dt) + + if not self._enable_patching or inst_durations is None: + return self + + # Then update required instructions. This code is ugly + # because the InstructionDurations code is handling too many + # formats in update and this code must also. + if isinstance(inst_durations, InstructionDurations): + for key in inst_durations.keys(): + self._patch_instruction(key) + else: + for name, qubits, _, parameters, _ in inst_durations: + if isinstance(qubits, int): + qubits = [qubits] + + if isinstance(parameters, (int, float)): + parameters = [parameters] + + if qubits is None: + key = (name, None, None) + elif parameters is None: + key = (name, tuple(qubits), None) + else: + key = (name, tuple(qubits), tuple(parameters)) + + self._patch_instruction(key) + + return self + + def _patch_instruction(self, key: InstrKey) -> None: + """Dispatcher logic for instruction patches""" + name = key[0] + if name == "measure": + self._patch_measurement(key) + elif name == "reset": + self._patch_reset(key) + + def _patch_measurement(self, key: InstrKey) -> None: + """Patch measurement duration by extending duration by 160dt as temporarily + required by the dynamic circuit backend. + """ + prev_duration, unit = self._get_duration_dt(key) + if unit != "dt": + raise TranspilerError('Can currently only patch durations of "dt".') + odd_cycle_correction = self._get_odd_cycle_correction() + self._patch_key( + key, prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, unit + ) + # Enforce patching of reset on measurement update + self._patch_reset(("reset", key[1], key[2])) + + def _patch_reset(self, key: InstrKey) -> None: + """Patch reset duration by extending duration by measurement patch as temporarily + required by the dynamic circuit backend. + """ + # We patch the reset to be the duration of the measurement if it + # is available as it currently + # triggers the end of scheduling after the measurement pulse + measure_key = ("measure", key[1], key[2]) + try: + measure_duration, unit = self._get_duration_dt(measure_key) + self._patch_key(key, measure_duration, unit) + except KeyError: + # Fall back to reset key if measure not available + prev_duration, unit = self._get_duration_dt(key) + if unit != "dt": + raise TranspilerError('Can currently only patch durations of "dt".') + odd_cycle_correction = self._get_odd_cycle_correction() + self._patch_key( + key, + prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, + unit, + ) + + def _get_duration_dt(self, key: InstrKey) -> Tuple[int, str]: + """Handling for the complicated structure of this class. + + TODO: This class implementation should be simplified in Qiskit. Too many edge cases. + """ + if key[1] is None and key[2] is None: + return self.duration_by_name[key[0]] + elif key[2] is None: + return self.duration_by_name_qubits[(key[0], key[1])] + + return self.duration_by_name_qubits_params[key] + + def _patch_key(self, key: InstrKey, duration: int, unit: str) -> None: + """Handling for the complicated structure of this class. + + TODO: This class implementation should be simplified in Qiskit. Too many edge cases. + """ + if key[1] is None and key[2] is None: + self.duration_by_name[key[0]] = (duration, unit) + elif key[2] is None: + self.duration_by_name_qubits[(key[0], key[1])] = (duration, unit) + + self.duration_by_name_qubits_params[key] = (duration, unit) + + def _get_odd_cycle_correction(self) -> int: + """Determine the amount of the odd cycle correction to apply + For devices with short gates with odd lenghts we add an extra 16dt to the measurement + + TODO: Eliminate the need for this correction + """ + key_pulse = "sx" + key_qubit = 0 + try: + key_duration = self.get(key_pulse, key_qubit, "dt") + except TranspilerError: + warnings.warn( + f"No {key_pulse} gate found for {key_qubit} for detection of " + "short odd gate lengths, default measurement timing will be used." + ) + key_duration = 160 # keyPulse gate not found + + if key_duration < 160 and key_duration % 32: + return self.MEASURE_PATCH_ODD_OFFSET + return 0 diff --git a/qiskit_ibm_runtime/transpiler/plugin.py b/qiskit_ibm_runtime/transpiler/plugin.py new file mode 100644 index 000000000..75f70cfe4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/plugin.py @@ -0,0 +1,98 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Plugin for IBM provider backend transpiler stages.""" + +from typing import Optional + +from qiskit.transpiler.passmanager import PassManager +from qiskit.transpiler.passmanager_config import PassManagerConfig +from qiskit.transpiler.preset_passmanagers.plugin import PassManagerStagePlugin +from qiskit.transpiler.preset_passmanagers import common +from qiskit.transpiler.passes import ConvertConditionsToIfOps + +from qiskit_ibm_provider.transpiler.passes.basis.convert_id_to_delay import ( + ConvertIdToDelay, +) + + +class IBMTranslationPlugin(PassManagerStagePlugin): + """A translation stage plugin for targeting Qiskit circuits + to IBM Quantum systems.""" + + def pass_manager( + self, + pass_manager_config: PassManagerConfig, + optimization_level: Optional[int] = None, + ) -> PassManager: + """Build IBMTranslationPlugin PassManager.""" + + translator_pm = common.generate_translation_passmanager( + target=pass_manager_config.target, + basis_gates=pass_manager_config.basis_gates, + approximation_degree=pass_manager_config.approximation_degree, + coupling_map=pass_manager_config.coupling_map, + backend_props=pass_manager_config.backend_properties, + unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, + unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, + hls_config=pass_manager_config.hls_config, + ) + + plugin_passes = [] + instruction_durations = pass_manager_config.instruction_durations + if instruction_durations: + plugin_passes.append(ConvertIdToDelay(instruction_durations)) + + return PassManager(plugin_passes) + translator_pm + + +class IBMDynamicTranslationPlugin(PassManagerStagePlugin): + """A translation stage plugin for targeting Qiskit circuits + to IBM Quantum systems.""" + + def pass_manager( + self, + pass_manager_config: PassManagerConfig, + optimization_level: Optional[int] = None, + ) -> PassManager: + """Build IBMTranslationPlugin PassManager.""" + + translator_pm = common.generate_translation_passmanager( + target=pass_manager_config.target, + basis_gates=pass_manager_config.basis_gates, + approximation_degree=pass_manager_config.approximation_degree, + coupling_map=pass_manager_config.coupling_map, + backend_props=pass_manager_config.backend_properties, + unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, + unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, + hls_config=pass_manager_config.hls_config, + ) + + instruction_durations = pass_manager_config.instruction_durations + plugin_passes = [] + if pass_manager_config.target is not None: + id_supported = "id" in pass_manager_config.target + else: + id_supported = "id" in pass_manager_config.basis_gates + + if instruction_durations and not id_supported: + plugin_passes.append(ConvertIdToDelay(instruction_durations)) + + # Only inject control-flow conversion pass at level 0 and level 1. As of + # qiskit 0.22.x transpile() with level 2 and 3 does not support + # control flow instructions (including if_else). This can be + # removed when higher optimization levels support control flow + # instructions. + if optimization_level in {0, 1}: + plugin_passes += [ConvertConditionsToIfOps()] + + return PassManager(plugin_passes) + translator_pm From 60c5ecd54ef405ecd7041ede5bcdd5c4caad51ce Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 12:17:25 +0000 Subject: [PATCH 30/47] black --- .../passes/scheduling/block_base_padder.py | 41 +++++------------ .../passes/scheduling/dynamical_decoupling.py | 44 +++++-------------- .../transpiler/passes/scheduling/scheduler.py | 42 +++++------------- .../transpiler/passes/scheduling/utils.py | 4 +- 4 files changed, 32 insertions(+), 99 deletions(-) diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py index 833bb5253..1232750a5 100644 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py @@ -102,9 +102,7 @@ def run(self, dag: DAGCircuit) -> DAGCircuit: is inserted before this node is called. """ if not self._schedule_idle_qubits: - self._idle_qubits = set( - wire for wire in dag.idle_wires() if isinstance(wire, Qubit) - ) + self._idle_qubits = set(wire for wire in dag.idle_wires() if isinstance(wire, Qubit)) self._pre_runhook(dag) self._init_run(dag) @@ -272,15 +270,11 @@ def _get_node_duration(self, node: DAGNode) -> int: f"of {node.op.name} on qubits {indices} is not bounded." ) if duration is None: - raise TranspilerError( - f"Duration of {node.op.name} on qubits {indices} is not found." - ) + raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") return duration - def _needs_block_terminating_barrier( - self, prev_node: DAGNode, curr_node: DAGNode - ) -> bool: + def _needs_block_terminating_barrier(self, prev_node: DAGNode, curr_node: DAGNode) -> bool: # Only barrier if not in fast-path nodes is_fast_path_node = curr_node in self._fast_path_nodes @@ -292,10 +286,7 @@ def _is_terminating_barrier(node: DAGNode) -> bool: return not ( prev_node is None - or ( - isinstance(prev_node.op, ControlFlowOp) - and isinstance(curr_node.op, ControlFlowOp) - ) + or (isinstance(prev_node.op, ControlFlowOp) and isinstance(curr_node.op, ControlFlowOp)) or _is_terminating_barrier(prev_node) or _is_terminating_barrier(curr_node) or is_fast_path_node @@ -323,9 +314,7 @@ def _add_block_terminating_barrier( qubits = self._block_dag.qubits else: barrier = Barrier(self._block_dag.num_qubits() - len(self._idle_qubits)) - qubits = [ - x for x in self._block_dag.qubits if x not in self._idle_qubits - ] + qubits = [x for x in self._block_dag.qubits if x not in self._idle_qubits] barrier_node = self._apply_scheduled_op( block_idx, @@ -423,16 +412,13 @@ def _will_use_fast_path(self, node: DAGNode) -> bool: if not ( last_node_in_block and isinstance(last_node.op, Measure) - and set(self._map_wires(node.qargs)) - == set(self._map_wires(last_node.qargs)) + and set(self._map_wires(node.qargs)) == set(self._map_wires(last_node.qargs)) ): return False # Fast path contents are limited to gates and delays for block in node.op.blocks: - if not all( - isinstance(inst.operation, (Gate, Delay)) for inst in block.data - ): + if not all(isinstance(inst.operation, (Gate, Delay)) for inst in block.data): return False return True @@ -486,9 +472,7 @@ def _visit_control_flow_op(self, node: DAGNode) -> None: if fast_path_node: padded_qubits = node.qargs elif not self._schedule_idle_qubits: - padded_qubits = [ - q for q in self._block_dag.qubits if q not in self._idle_qubits - ] + padded_qubits = [q for q in self._block_dag.qubits if q not in self._idle_qubits] else: padded_qubits = self._block_dag.qubits self._apply_scheduled_op( @@ -544,9 +528,7 @@ def _visit_generic(self, node: DAGNode) -> None: # Fill idle time with some sequence if t0 - self._idle_after.get(bit, 0) > 0: # Find previous node on the wire, i.e. always the latest node on the wire - prev_node = next( - self._block_dag.predecessors(self._block_dag.output_map[bit]) - ) + prev_node = next(self._block_dag.predecessors(self._block_dag.output_map[bit])) self._pad( block_idx=block_idx, qubit=bit, @@ -569,10 +551,7 @@ def _visit_generic(self, node: DAGNode) -> None: self._map_wires(node.cargs), ) self._last_node_to_touch.update( - { - bit: (new_node, self._block_dag) - for bit in new_node.qargs + new_node.cargs - } + {bit: (new_node, self._block_dag) for bit in new_node.qargs + new_node.cargs} ) def _terminate_block(self, block_duration: int, block_idx: int) -> None: diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py index acae82ce4..006c53feb 100644 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py @@ -214,9 +214,7 @@ def __init__( self._alt_spacings = alt_spacings if self._spacings and len(self._spacings) != len(self._dd_sequences): - raise TranspilerError( - "Number of sequence spacings must equal number of DD sequences." - ) + raise TranspilerError("Number of sequence spacings must equal number of DD sequences.") if self._alt_spacings: if not self._coupling_map: @@ -244,9 +242,7 @@ def __init__( self._sequence_min_length_ratios = sequence_min_length_ratios # type: ignore if len(self._sequence_min_length_ratios) != len(self._dd_sequences): - raise TranspilerError( - "Number of sequence lengths must equal number of DD sequences." - ) + raise TranspilerError("Number of sequence lengths must equal number of DD sequences.") self._insert_multiple_cycles = insert_multiple_cycles @@ -266,9 +262,7 @@ def _pre_runhook(self, dag: DAGCircuit) -> None: spacings_required = self._spacings is None if spacings_required: self._spacings = [] # type: ignore - alt_spacings_required = ( - self._alt_spacings is None and self._coupling_map is not None - ) + alt_spacings_required = self._alt_spacings is None and self._coupling_map is not None if alt_spacings_required: self._alt_spacings = [] # type: ignore @@ -317,9 +311,7 @@ def _pre_runhook(self, dag: DAGCircuit) -> None: for gate in self._dd_sequences[seq_idx]: noop = noop.dot(gate.to_matrix()) if not matrix_equal(noop, IGate().to_matrix(), ignore_phase=True): - raise TranspilerError( - "The DD sequence does not make an identity operation." - ) + raise TranspilerError("The DD sequence does not make an identity operation.") self._sequence_phase = np.angle(noop[0][0]) # Precompute qubit-wise DD sequence length for performance @@ -335,9 +327,7 @@ def _pre_runhook(self, dag: DAGCircuit) -> None: for index, gate in enumerate(seq): try: # Check calibration. - gate_length = dag.calibrations[gate.name][ - (physical_index, gate.params) - ] + gate_length = dag.calibrations[gate.name][(physical_index, gate.params)] if gate_length % self._alignment != 0: # This is necessary to implement lightweight scheduling logic for this pass. # Usually the pulse alignment constraint and pulse data chunk size take @@ -434,9 +424,7 @@ def _pad( seq_ratio = self._sequence_min_length_ratios[sequence_idx] spacings = self._spacings[sequence_idx] alt_spacings = ( - np.asarray(self._alt_spacings[sequence_idx]) - if self._coupling_map - else None + np.asarray(self._alt_spacings[sequence_idx]) if self._coupling_map else None ) # Verify the delay duration exceeds the minimum time to insert @@ -472,21 +460,15 @@ def _pad( if len(dd_sequence) == 1: # Special case of using a single gate for DD u_inv = dd_sequence[0].inverse().to_matrix() - theta, phi, lam, phase = OneQubitEulerDecomposer().angles_and_phase( - u_inv - ) - if isinstance(next_node, DAGOpNode) and isinstance( - next_node.op, (UGate, U3Gate) - ): + theta, phi, lam, phase = OneQubitEulerDecomposer().angles_and_phase(u_inv) + if isinstance(next_node, DAGOpNode) and isinstance(next_node.op, (UGate, U3Gate)): # Absorb the inverse into the successor (from left in circuit) theta_r, phi_r, lam_r = next_node.op.params next_node.op.params = Optimize1qGates.compose_u3( theta_r, phi_r, lam_r, theta, phi, lam ) sequence_gphase += phase - elif isinstance(prev_node, DAGOpNode) and isinstance( - prev_node.op, (UGate, U3Gate) - ): + elif isinstance(prev_node, DAGOpNode) and isinstance(prev_node.op, (UGate, U3Gate)): # Absorb the inverse into the predecessor (from right in circuit) theta_l, phi_l, lam_l = prev_node.op.params prev_node.op.params = Optimize1qGates.compose_u3( @@ -554,18 +536,14 @@ def _constrained_length(values: np.array) -> np.array: # 2. There is an extra delay inserted after the last operation. # The condition below handles both. seq_length = int(len(taus) / num_sequences) - if len(dd_sequence) == len(taus) or tau_idx % seq_length != ( - seq_length - 1 - ): + if len(dd_sequence) == len(taus) or tau_idx % seq_length != (seq_length - 1): gate = dd_sequence[dd_ind] gate_length = seq_lengths[dd_ind] self._apply_scheduled_op(block_idx, idle_after, gate, qubit) idle_after += gate_length dd_ind += 1 - self._block_dag.global_phase = ( - self._block_dag.global_phase + sequence_gphase - ) + self._block_dag.global_phase = self._block_dag.global_phase + sequence_gphase return # DD could not be applied, delay instead diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py index 2a2f6c57e..b18ee32c6 100644 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py @@ -154,9 +154,7 @@ def _visit_control_flow_op(self, node: DAGNode) -> None: new_dag = circuit_to_dag(block) inner_wire_map = { inner: outer - for outer, inner in zip( - self._map_wires(node), new_dag.qubits + new_dag.clbits - ) + for outer, inner in zip(self._map_wires(node), new_dag.qubits + new_dag.clbits) } node_block_dags.append(new_dag) self._visit_block(new_dag, inner_wire_map) @@ -226,9 +224,7 @@ def _get_duration(self, node: DAGNode, dag: Optional[DAGCircuit] = None) -> int: f"of {node.op.name} on qubits {indices} is not bounded." ) if duration is None: - raise TranspilerError( - f"Duration of {node.op.name} on qubits {indices} is not found." - ) + raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") return duration @@ -266,9 +262,7 @@ def _flush_measures(self) -> None: def _current_block_measure_qargs(self) -> Set[Qubit]: return set( - qarg - for measure in self._current_block_measures - for qarg in self._map_qubits(measure) + qarg for measure in self._current_block_measures for qarg in self._map_qubits(measure) ) def _check_flush_measures(self, node: DAGNode) -> None: @@ -381,10 +375,7 @@ def _visit_measure(self, node: DAGNode) -> None: t0q = max( # pylint: disable=invalid-name itertools.chain( [t0q], - ( - self._node_start_time[measure][1] - for measure in self._current_block_measures - ), + (self._node_start_time[measure][1] for measure in self._current_block_measures), ) ) @@ -393,9 +384,7 @@ def _visit_measure(self, node: DAGNode) -> None: for measure in self._current_block_measures: t0 = t0q # pylint: disable=invalid-name - bit_indices = { - bit: index for index, bit in enumerate(self._block_dag.qubits) - } + bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} measure_duration = self._durations.get( Measure(), [bit_indices[qarg] for qarg in self._map_qubits(measure)], @@ -514,10 +503,7 @@ def _visit_measure(self, node: DAGNode) -> None: t0q = max( # pylint: disable=invalid-name itertools.chain( [t0q], - ( - self._node_start_time[measure][1] - for measure in self._current_block_measures - ), + (self._node_start_time[measure][1] for measure in self._current_block_measures), ) ) @@ -526,9 +512,7 @@ def _visit_measure(self, node: DAGNode) -> None: for measure in self._current_block_measures: t0 = t0q # pylint: disable=invalid-name - bit_indices = { - bit: index for index, bit in enumerate(self._block_dag.qubits) - } + bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} measure_duration = self._durations.get( Measure(), [bit_indices[qarg] for qarg in self._map_qubits(measure)], @@ -580,9 +564,7 @@ def _push_block_durations(self) -> None: # Iterated nodes starting at the first, from the node with the # last time, preferring barriers over non-barriers - def order_ops( - item: Tuple[DAGNode, Tuple[int, int]] - ) -> Tuple[int, int, bool, int]: + def order_ops(item: Tuple[DAGNode, Tuple[int, int]]) -> Tuple[int, int, bool, int]: """Iterated nodes ordering by channel, time and preferring that barriers are processed first.""" return ( @@ -600,9 +582,7 @@ def order_ops( def _calculate_new_times( block: int, node: DAGNode, block_bit_times: Dict[int, Dict[Qubit, int]] ) -> int: - max_block_time = min( - block_bit_times[block][bit] for bit in self._map_qubits(node) - ) + max_block_time = min(block_bit_times[block][bit] for bit in self._map_qubits(node)) t0 = self._node_start_time[node][1] # pylint: disable=invalid-name t1 = self._node_stop_time[node][1] # pylint: disable=invalid-name @@ -640,9 +620,7 @@ def _update_time( continue # Start with last time as the time to push to if block not in block_bit_times: - block_bit_times[block] = { - q: self._max_block_t1[block] for q in self._dag.wires - } + block_bit_times[block] = {q: self._max_block_t1[block] for q in self._dag.wires} # Calculate the latest available time to push to collectively for tied nodes tied_nodes = self._node_tied_to.get(node, None) diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py index b2bc81737..bf7665cd1 100644 --- a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py @@ -214,9 +214,7 @@ def _patch_measurement(self, key: InstrKey) -> None: if unit != "dt": raise TranspilerError('Can currently only patch durations of "dt".') odd_cycle_correction = self._get_odd_cycle_correction() - self._patch_key( - key, prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, unit - ) + self._patch_key(key, prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, unit) # Enforce patching of reset on measurement update self._patch_reset(("reset", key[1], key[2])) From 5906284cc8603926cb90656680b63e4ace7005dd Mon Sep 17 00:00:00 2001 From: kevin-tian Date: Fri, 27 Oct 2023 11:18:45 -0400 Subject: [PATCH 31/47] fix more tests --- qiskit_ibm_runtime/ibm_backend.py | 4 +++- test/integration/test_backend.py | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 552a2b3fb..c2031b4fa 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -752,7 +752,9 @@ def _runtime_run( image: Optional[str] = None, ) -> RuntimeJob: """Runs the runtime program and returns the corresponding job object""" - hgp_name = self._instance or self._service._get_hgp().name + hgp_name = None + if self._service._channel == "ibm_quantum": + hgp_name = self._instance or self._service._get_hgp().name session = self._session diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index b53d59e06..605ab8bc2 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -82,7 +82,7 @@ def setUpClass(cls): super().setUpClass() if cls.dependencies.channel == "ibm_cloud": # TODO use real device when cloud supports it - cls.backend = cls.dependencies.service.least_busy(min_num_qubits=5) + cls.backend = cls.dependencies.service.least_busy(simulator=False, min_num_qubits=5) if cls.dependencies.channel == "ibm_quantum": cls.backend = cls.dependencies.service.least_busy( simulator=False, min_num_qubits=5, instance=cls.dependencies.instance @@ -234,9 +234,11 @@ def test_paused_backend_warning(self): with self.assertWarns(Warning): backend.run(ReferenceCircuits.bell()) - @quantum_only def test_backend_wrong_instance(self): """Test that an error is raised when retrieving a backend not in the instance.""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud channel does not have instance.") + backends = self.service.backends() hgps = self.service._hgps.values() if len(hgps) >= 2: From 9b8d4aab60ac59cf8ddb77c02d032b0b86773865 Mon Sep 17 00:00:00 2001 From: kevin-tian Date: Fri, 27 Oct 2023 12:55:21 -0400 Subject: [PATCH 32/47] update test_session --- test/integration/test_session.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/test_session.py b/test/integration/test_session.py index abccdd796..6b7d77c5d 100644 --- a/test/integration/test_session.py +++ b/test/integration/test_session.py @@ -120,6 +120,7 @@ def test_backend_run_with_session(self): backend = self.service.backend("ibmq_qasm_simulator") backend.open_session() result = backend.run(circuits=ReferenceCircuits.bell(), shots=shots).result() + backend.cancel_session() self.assertIsInstance(result, Result) self.assertEqual(result.results[0].shots, shots) self.assertAlmostEqual( From 064a8c0cda98fc9fef6849cc91a67757b6bb1a98 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 29 Oct 2023 11:26:03 +0000 Subject: [PATCH 33/47] added tranpiler passes entry point --- setup.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/setup.py b/setup.py index e89854899..60f7240bf 100644 --- a/setup.py +++ b/setup.py @@ -77,4 +77,10 @@ "Documentation": "https://qiskit.org/documentation/", "Source Code": "https://github.com/Qiskit/qiskit-ibm-runtime", }, + entry_points={ + "qiskit.transpiler.translation": [ + "ibm_backend = qiskit_ibm_provider.transpiler.plugin:IBMTranslationPlugin", + "ibm_dynamic_circuits = qiskit_ibm_provider.transpiler.plugin:IBMDynamicTranslationPlugin", + ] + }, ) From 7d1c88580a2e2d24dc1e3091fbf3c119f62165e8 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 29 Oct 2023 12:55:36 +0000 Subject: [PATCH 34/47] Removed obsolete JobStatus types, and removed the tests that were checking them --- qiskit_ibm_runtime/constants.py | 11 - test/jobtestcase.py | 33 -- test/unit/test_ibm_job_states.py | 606 ------------------------------- 3 files changed, 650 deletions(-) delete mode 100644 test/jobtestcase.py delete mode 100644 test/unit/test_ibm_job_states.py diff --git a/qiskit_ibm_runtime/constants.py b/qiskit_ibm_runtime/constants.py index ed69c468c..3a5568cab 100644 --- a/qiskit_ibm_runtime/constants.py +++ b/qiskit_ibm_runtime/constants.py @@ -23,21 +23,10 @@ QISKIT_IBM_RUNTIME_API_URL = "https://auth.quantum-computing.ibm.com/api" API_TO_JOB_STATUS = { - "CREATING": JobStatus.INITIALIZING, - "CREATED": JobStatus.INITIALIZING, - "TRANSPILING": JobStatus.INITIALIZING, - "TRANSPILED": JobStatus.INITIALIZING, - "VALIDATING": JobStatus.VALIDATING, - "VALIDATED": JobStatus.VALIDATING, "QUEUED": JobStatus.QUEUED, - "PENDING_IN_QUEUE": JobStatus.QUEUED, "RUNNING": JobStatus.RUNNING, "COMPLETED": JobStatus.DONE, "FAILED": JobStatus.ERROR, - "ERROR_CREATING_JOB": JobStatus.ERROR, - "ERROR_VALIDATING_JOB": JobStatus.ERROR, - "ERROR_RUNNING_JOB": JobStatus.ERROR, - "ERROR_TRANSPILING_JOB": JobStatus.ERROR, "CANCELLED": JobStatus.CANCELLED, } diff --git a/test/jobtestcase.py b/test/jobtestcase.py deleted file mode 100644 index 4c38170d3..000000000 --- a/test/jobtestcase.py +++ /dev/null @@ -1,33 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2021. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Custom TestCase for Jobs.""" - -import time - -from qiskit.providers import JobStatus - -from .ibm_test_case import IBMTestCase - - -class JobTestCase(IBMTestCase): - """Include common functionality when testing jobs.""" - - def wait_for_initialization(self, job, timeout=1): - """Waits until job progresses from `INITIALIZING` to other status.""" - waited = 0 - wait = 0.1 - while job.status() is JobStatus.INITIALIZING: - time.sleep(wait) - waited += wait - if waited > timeout: - self.fail(msg="The JOB is still initializing after timeout ({}s)".format(timeout)) diff --git a/test/unit/test_ibm_job_states.py b/test/unit/test_ibm_job_states.py deleted file mode 100644 index f92e33d69..000000000 --- a/test/unit/test_ibm_job_states.py +++ /dev/null @@ -1,606 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2021. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -# pylint: disable=missing-docstring - -"""IBMJob states test-suite.""" - -import copy -import time -import json -from datetime import datetime -from concurrent import futures -from concurrent.futures import ThreadPoolExecutor -from contextlib import suppress -from unittest import mock -from unittest.mock import MagicMock -from typing import List, Any, Dict - -from qiskit import transpile -from qiskit.providers import JobTimeoutError -from qiskit.providers.jobstatus import JobStatus -from qiskit.providers.fake_provider.backends.bogota.fake_bogota import FakeBogota - -from qiskit.test.reference_circuits import ReferenceCircuits - -from qiskit_ibm_provider.apiconstants import API_JOB_FINAL_STATES, ApiJobStatus - -from qiskit_ibm_runtime.api.exceptions import ( - ApiError, - UserTimeoutExceededError, - ApiIBMProtocolError, -) -from qiskit_ibm_runtime import IBMBackend -from qiskit_ibm_runtime.exceptions import RuntimeInvalidStateError -from ..jobtestcase import JobTestCase - -MOCKED_ERROR_RESULT: Dict[str, Any] = { - "qObjectResult": { - "backend_name": "fake_backend", - "backend_version": "0.1.1", - "qobj_id": "123", - "job_id": "123", - "success": False, - "results": [ - {"status": "DONE", "success": True, "shots": 1, "data": {}}, - {"status": "Error 1", "success": False, "shots": 1, "data": {}}, - {"status": "Error 2", "success": False, "shots": 1, "data": {}}, - ], - } -} - -VALID_QOBJ_RESPONSE = { - "status": "COMPLETED", - "kind": "q-object", - "creationDate": "2019-01-01T12:57:15.052Z", - "id": "0123456789", - "qObjectResult": { - "backend_name": "ibmqx2", - "backend_version": "1.1.1", - "job_id": "XC1323XG2", - "qobj_id": "Experiment1", - "success": True, - "status": "COMPLETED", - "results": [ - { - "header": { - "name": "Bell state", - "creg_sizes": [["c", 2]], - "clbit_labels": [["c", 0], ["c", 1]], - "qubit_labels": [["q", 0], ["q", 1]], - }, - "shots": 1024, - "status": "DONE", - "success": True, - "data": {"counts": {"0x0": 480, "0x3": 490, "0x1": 20, "0x2": 34}}, - }, - { - "header": { - "name": "Bell state XY", - "creg_sizes": [["c", 2]], - "clbit_labels": [["c", 0], ["c", 1]], - "qubit_labels": [["q", 0], ["q", 1]], - }, - "shots": 1024, - "status": "DONE", - "success": True, - "data": {"counts": {"0x0": 29, "0x3": 15, "0x1": 510, "0x2": 480}}, - }, - ], - }, -} - - -VALID_JOB_RESPONSE = { - "id": "TEST_ID", - "job_id": "TEST_ID", - "kind": "q-object", - "status": "CREATING", - "creation_date": "2019-01-01T13:15:58.425972", -} - - -class TestIBMJobStates(JobTestCase): - """Test the states of an IBMJob.""" - - def setUp(self): - """Initial test setup.""" - super().setUp() - self._current_api = None - self._current_qjob = None - - def test_done_status(self): - """Test job status progresses to done.""" - job = self.run_with_api(QueuedAPI()) - - self.assertFalse(job.done()) - self.wait_for_initialization(job) - - self._current_api.progress() - self.assertFalse(job.done()) - - self._current_api.progress() - self.assertTrue(job.done()) - - def test_running_status(self): - """Test job status progresses to running.""" - job = self.run_with_api(ValidatingAPI()) - - self.assertFalse(job.running()) - self.wait_for_initialization(job) - - self._current_api.progress() - self.assertTrue(job.running()) - - def test_cancelled_status(self): - """Test job status is cancelled.""" - job = self.run_with_api(CancellableAPI()) - - self.assertFalse(job.cancelled()) - self.wait_for_initialization(job) - - self._current_api.progress() - self.assertTrue(job.cancelled()) - - def test_validating_job(self): - """Test job status is validating.""" - job = self.run_with_api(ValidatingAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.VALIDATING) - - def test_error_while_creating_job(self): - """Test job failing during creation.""" - job = self.run_with_api(ErrorWhileCreatingAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.ERROR) - - def test_error_while_validating_job(self): - """Test job failing during validation.""" - job = self.run_with_api(ErrorWhileValidatingAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.VALIDATING) - - self._current_api.progress() - self.assertEqual(job.status(), JobStatus.ERROR) - - def test_status_flow_for_non_queued_job(self): - """Test job status progressing to done without being queued.""" - job = self.run_with_api(NonQueuedAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.RUNNING) - - self._current_api.progress() - self.assertEqual(job.status(), JobStatus.DONE) - - def test_status_flow_for_queued_job(self): - """Test job status progressing from queued to done.""" - job = self.run_with_api(QueuedAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.QUEUED) - - self._current_api.progress() - self.assertEqual(job.status(), JobStatus.RUNNING) - - self._current_api.progress() - self.assertEqual(job.status(), JobStatus.DONE) - - def test_status_flow_for_cancellable_job(self): - """Test job status going from running to cancelled.""" - job = self.run_with_api(CancellableAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.RUNNING) - - job.cancel() - - self._current_api.progress() - self.assertEqual(job.status(), JobStatus.CANCELLED) - - def test_status_flow_for_unable_to_run_valid_qobj(self): - """Test API error while running a job.""" - with self.assertRaises(ApiError): - self.run_with_api(UnavailableRunAPI()) - - # TODO fix test case - def test_error_while_running_job(self): - """Test job failed.""" - job = self.run_with_api(ErrorWhileRunningAPI()) - - self.wait_for_initialization(job) - self.assertEqual(job.status(), JobStatus.RUNNING) - - self._current_api.progress() - self.assertEqual(job.status(), JobStatus.ERROR) - # self.assertIn("Error 1", job.error_message()) - # self.assertIn("Error 2", job.error_message()) - - def test_cancelled_result(self): - """Test getting results for a cancelled job.""" - job = self.run_with_api(CancellableAPI()) - - self.wait_for_initialization(job) - job.cancel() - self._current_api.progress() - with self.assertRaises(RuntimeInvalidStateError): - _ = job.result() - self.assertEqual(job.status(), JobStatus.CANCELLED) - - def test_completed_result(self): - """Test getting results for a completed job.""" - job = self.run_with_api(NonQueuedAPI()) - - self.wait_for_initialization(job) - self._current_api.progress() - self.assertEqual(job.result().success, True) - self.assertEqual(job.status(), JobStatus.DONE) - - def test_block_on_result_waiting_until_completed(self): - """Test waiting for job results.""" - - job = self.run_with_api(NonQueuedAPI()) - with futures.ThreadPoolExecutor() as executor: - executor.submit(_auto_progress_api, self._current_api) - - result = job.result() - self.assertEqual(result.success, True) - self.assertEqual(job.status(), JobStatus.DONE) - - def test_block_on_result_waiting_until_cancelled(self): - """Test canceling job while waiting for results.""" - - job = self.run_with_api(CancellableAPI()) - with ThreadPoolExecutor() as executor: - executor.submit(_auto_progress_api, self._current_api) - - with self.assertRaises(RuntimeInvalidStateError): - job.result() - - self.assertEqual(job.status(), JobStatus.CANCELLED) - - def test_never_complete_result_with_timeout(self): - """Test timing out while waiting for job results.""" - job = self.run_with_api(NonQueuedAPI()) - - self.wait_for_initialization(job) - with self.assertRaises(JobTimeoutError): - job.result(timeout=0.2) - - def test_only_final_states_cause_detailed_request(self): - """Test job status call does not provide detailed information.""" - # The state ERROR_CREATING_JOB is only handled when running the job, - # and not while checking the status, so it is not tested. - all_state_apis = { - "COMPLETED": NonQueuedAPI, - "CANCELLED": CancellableAPI, - "ERROR_VALIDATING_JOB": ErrorWhileValidatingAPI, - "ERROR_RUNNING_JOB": ErrorWhileRunningAPI, - } - - for status, api in all_state_apis.items(): - with self.subTest(status=status): - job = self.run_with_api(api()) - self.wait_for_initialization(job) - - with suppress(BaseFakeAPI.NoMoreStatesError): - self._current_api.progress() - - with mock.patch.object( - self._current_api, "job_get", wraps=self._current_api.job_get - ): - job.status() - if ApiJobStatus(status) in API_JOB_FINAL_STATES: - self.assertTrue(self._current_api.job_get.called) - else: - self.assertFalse(self._current_api.job_get.called) - - def test_transpiling_status(self): - """Test transpiling job state.""" - job = self.run_with_api(TranspilingStatusAPI()) - time.sleep(0.2) - self.assertEqual(job.status(), JobStatus.INITIALIZING) - - def run_with_api(self, api): - """Creates a new ``IBMJob`` running with the provided API object.""" - backend = IBMBackend(FakeBogota().configuration(), MagicMock(), api_client=api) - backend._api_client = api - circuit = transpile(ReferenceCircuits.bell()) - self._current_api = api - self._current_qjob = backend.run(circuit) - self._current_qjob.refresh = MagicMock() - return self._current_qjob - - -def _auto_progress_api(api, interval=0.2): - """Progress a ``BaseFakeAPI`` instance every `interval` seconds until reaching - the final state. - """ - with suppress(BaseFakeAPI.NoMoreStatesError): - while True: - time.sleep(interval) - api.progress() - - -class BaseFakeAPI: - """Base class for faking the IBM Quantum API.""" - - class NoMoreStatesError(Exception): - """Raised when it is not possible to progress more.""" - - _job_status: List[Any] = [] - - _can_cancel = False - - def __init__(self): - """BaseFakeAPI constructor.""" - self._params = MagicMock() - self._state = 0 - self.config = {"hub": None, "group": None, "project": None} - if self._can_cancel: - self.config.update( - {"hub": "test-hub", "group": "test-group", "project": "test-project"} - ) - - def job_get(self, job_id): - """Return information about a job.""" - if not job_id: - return {"status": "Error", "error": "Job ID not specified"} - - return { - "created": datetime.now().isoformat(), - "state": self._job_status[self._state], - "metadata": {}, - } - - def job_metadata(self, job_id: str) -> Dict: - """Return job metadata""" - return self.job_get(job_id)["metadata"] - - def job_status(self, job_id): - """Return the status of a job.""" - summary_fields = ["status", "error", "info_queue"] - complete_response = self.job_get(job_id)["state"] - try: - ApiJobStatus(complete_response["status"]) - except ValueError: - raise ApiIBMProtocolError("Api Error") - return {key: value for key, value in complete_response.items() if key in summary_fields} - - @staticmethod - def program_run(*_args, **_kwargs): - """Submit the job.""" - time.sleep(0.2) - return VALID_JOB_RESPONSE - - @classmethod - def job_submit(cls, *_args, **_kwargs): - """Submit the job.""" - time.sleep(0.2) - return VALID_JOB_RESPONSE - - def job_cancel(self, job_id, *_args, **_kwargs): - """Cancel the job.""" - if not job_id: - return {"status": "Error", "error": "Job ID not specified"} - return ( - {"cancelled": True} - if self._can_cancel - else {"error": "testing fake API can not cancel"} - ) - - def job_final_status(self, job_id, *_args, **_kwargs): - """Wait for job to enter a final state.""" - start_time = time.time() - status_response = self.job_status(job_id) - while ApiJobStatus(status_response["status"]) not in API_JOB_FINAL_STATES: - elapsed_time = time.time() - start_time - timeout = _kwargs.get("timeout", None) - if timeout is not None and elapsed_time >= timeout: - raise UserTimeoutExceededError("Timeout while waiting for job {}".format(job_id)) - time.sleep(5) - status_response = self.job_status(job_id) - return status_response - - def job_results(self, job_id: str) -> Any: - """Return job result""" - result = self.job_get(job_id) - return json.dumps(result["state"]["qObjectResult"]) - - def job_result(self, job_id, *_args, **_kwargs): - """Get job result.""" - return self.job_get(job_id)["qObjectResult"] - - def progress(self): - """Progress to the next job state.""" - if self._state == len(self._job_status) - 1: - raise self.NoMoreStatesError() - self._state += 1 - - @staticmethod - def backend_status(backend_name: str) -> Dict[str, Any]: - """Return the status of the backend.""" - return { - "backend_name": backend_name, - "backend_version": "0.0.0", - "operational": True, - "pending_jobs": 0, - "status_msg": "active", - } - - @staticmethod - def backend_properties(*args, **kwargs): # pylint: disable=unused-argument - return None - - @staticmethod - def job_type(job_id: str) -> str: - if job_id[0] != "c" and len(job_id) == 24: - return "IQX" - return "RUNTIME" - - -class UnknownStatusAPI(BaseFakeAPI): - """Class for emulating an API with unknown status codes.""" - - _job_status = [{"status": "UNKNOWN"}] - - -class ValidatingAPI(BaseFakeAPI): - """Class for emulating an API with job validation.""" - - _job_status = [{"status": "VALIDATING"}, {"status": "RUNNING"}] - - -class ErrorWhileValidatingAPI(BaseFakeAPI): - """Class for emulating an API processing an invalid job.""" - - _job_status = [ - {"status": "VALIDATING"}, - {"status": "ERROR_VALIDATING_JOB", **MOCKED_ERROR_RESULT}, - ] - - -class NonQueuedAPI(BaseFakeAPI): - """Class for emulating a successfully-completed non-queued API.""" - - _job_status = [{"status": "RUNNING"}, VALID_QOBJ_RESPONSE] - - -class ErrorWhileCreatingAPI(BaseFakeAPI): - """Class emulating an API processing a job that errors while creating the job.""" - - _job_status = [{"status": "ERROR_CREATING_JOB", **MOCKED_ERROR_RESULT}] - - -class ErrorWhileRunningAPI(BaseFakeAPI): - """Class emulating an API processing a job that errors while running.""" - - _job_status = [ - {"status": "RUNNING"}, - {"status": "ERROR_RUNNING_JOB", **MOCKED_ERROR_RESULT}, - ] - - -class QueuedAPI(BaseFakeAPI): - """Class for emulating a successfully-completed queued API.""" - - _job_status = [{"status": "QUEUED"}, {"status": "RUNNING"}, {"status": "COMPLETED"}] - - -class RejectingJobAPI(BaseFakeAPI): - """Class for emulating an API unable of initializing.""" - - @classmethod - def job_submit(cls, *_args, **_kwargs): - return {"error": "invalid qobj"} - - -class UnavailableRunAPI(BaseFakeAPI): - """Class for emulating an API throwing before even initializing.""" - - @staticmethod - def program_run(*_args, **_kwargs): - time.sleep(0.2) - raise ApiError("Api Error") - - -class ThrowingAPI(BaseFakeAPI): - """Class for emulating an API throwing in the middle of execution.""" - - _job_status = [{"status": "RUNNING"}] - - def job_get(self, job_id): - raise ApiError("Api Error") - - -class ThrowingNonJobRelatedErrorAPI(BaseFakeAPI): - """Class for emulating an scenario where the job is done but the API - fails some times for non job-related errors. - """ - - _job_status = [{"status": "COMPLETED"}] - - def __init__(self, errors_before_success=2): - super().__init__() - self._number_of_exceptions_to_throw = errors_before_success - - def job_get(self, job_id): - if self._number_of_exceptions_to_throw != 0: - self._number_of_exceptions_to_throw -= 1 - raise ApiError("Api Error") - - return super().job_get(job_id) - - -class ThrowingGetJobAPI(BaseFakeAPI): - """Class for emulating an API throwing in the middle of execution. But not in - ``job_status()``, just in ``job_get()``. - """ - - _job_status = [{"status": "COMPLETED"}] - - def job_status(self, job_id): - return self._job_status[self._state] - - def job_get(self, job_id): - raise ApiError("Unexpected error") - - -class CancellableAPI(BaseFakeAPI): - """Class for emulating an API with cancellation.""" - - _job_status = [{"status": "RUNNING"}, {"status": "CANCELLED"}] - - _can_cancel = True - - -class NonCancellableAPI(BaseFakeAPI): - """Class for emulating an API without cancellation running a long job.""" - - _job_status = [{"status": "RUNNING"}, {"status": "RUNNING"}, {"status": "RUNNING"}] - - -class ErroredCancellationAPI(BaseFakeAPI): - """Class for emulating an API with cancellation but throwing while trying.""" - - _job_status = [{"status": "RUNNING"}, {"status": "RUNNING"}, {"status": "RUNNING"}] - - _can_cancel = True - - def job_cancel(self, job_id, *_args, **_kwargs): - return {"status": "Error", "error": "test-error-while-cancelling"} - - -class NoKindJobAPI(BaseFakeAPI): - """Class for emulating an API with QASM jobs.""" - - _job_status = [{"status": "COMPLETED"}] - - no_kind_response = copy.deepcopy(VALID_JOB_RESPONSE) - del no_kind_response["kind"] - - @classmethod - def job_submit(cls, *_args, **_kwargs): - return cls.no_kind_response - - @classmethod - def job_result(cls, job_id, *_args, **_kwargs): - return cls.no_kind_response - - -class TranspilingStatusAPI(BaseFakeAPI): - """Class for emulating an API with transpiling status codes.""" - - _job_status = [{"status": "TRANSPILING"}, {"status": "TRANSPILED"}] From 99c27f0b2c9e255b7560e563e95259eda44dcea2 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 29 Oct 2023 12:56:30 +0000 Subject: [PATCH 35/47] Removed unnecessary check --- qiskit_ibm_runtime/ibm_backend.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index c2031b4fa..4edf6b1b6 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -521,13 +521,12 @@ def _check_circuits_attributes(self, circuits: List[QuantumCircuit]) -> None: f"maximum for this backend, {self._max_circuits})" ) for circ in circuits: - if isinstance(circ, QuantumCircuit): - if circ.num_qubits > self._configuration.num_qubits: - raise IBMBackendValueError( - f"Circuit contains {circ.num_qubits} qubits, " - f"but backend has only {self.num_qubits}." - ) - self.check_faulty(circ) + if circ.num_qubits > self._configuration.num_qubits: + raise IBMBackendValueError( + f"Circuit contains {circ.num_qubits} qubits, " + f"but backend has only {self.num_qubits}." + ) + self.check_faulty(circ) def check_faulty(self, circuit: QuantumCircuit) -> None: """Check if the input circuit uses faulty qubits or edges. From ebf564719e98bf812ee4545bd9be955ce28eed91 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 29 Oct 2023 13:36:31 +0000 Subject: [PATCH 36/47] Removed exception parameter from validate_job_tags. Use 'import_job_tags' from runtime instead of from provider --- qiskit_ibm_runtime/ibm_backend.py | 10 ++++------ qiskit_ibm_runtime/qiskit_runtime_service.py | 5 ++--- qiskit_ibm_runtime/runtime_job.py | 7 +++---- qiskit_ibm_runtime/runtime_options.py | 2 +- qiskit_ibm_runtime/utils/utils.py | 9 ++++----- 5 files changed, 14 insertions(+), 19 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 4edf6b1b6..301706b60 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -45,8 +45,7 @@ defaults_from_server_data, properties_from_server_data, ) -from qiskit_ibm_provider.utils import local_to_utc -from qiskit_ibm_provider.utils import validate_job_tags, are_circuits_dynamic +from qiskit_ibm_provider.utils import local_to_utc, are_circuits_dynamic from qiskit_ibm_provider.utils.options import QASM2Options, QASM3Options from qiskit_ibm_provider.exceptions import IBMBackendValueError, IBMBackendApiError from qiskit_ibm_provider.api.exceptions import RequestsApiError @@ -56,9 +55,8 @@ Session as ProviderSession, ) # temporary until we unite the 2 Session classes -from qiskit_ibm_runtime import ( # pylint: disable=unused-import,cyclic-import - qiskit_runtime_service, -) +from .utils.utils import validate_job_tags +from . import qiskit_runtime_service # pylint: disable=unused-import,cyclic-import from .runtime_job import RuntimeJob from .api.clients import RuntimeClient @@ -669,7 +667,7 @@ def run( - If ESP readout is used and the backend does not support this. """ # pylint: disable=arguments-differ - validate_job_tags(job_tags, IBMBackendValueError) + validate_job_tags(job_tags) if not isinstance(circuits, List): circuits = [circuits] self._check_circuits_attributes(circuits) diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index 4818b6ba4..b7a48c98b 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -30,12 +30,11 @@ ) from qiskit_ibm_provider.proxies import ProxyConfiguration -from qiskit_ibm_provider.utils import validate_job_tags from qiskit_ibm_provider.utils.hgp import to_instance_format, from_instance_format from qiskit_ibm_provider.utils.backend_decoder import configuration_from_server_data -from qiskit_ibm_provider.exceptions import IBMBackendValueError from qiskit_ibm_runtime import ibm_backend +from .utils.utils import validate_job_tags from .accounts import AccountManager, Account, ChannelType from .api.clients import AuthClient, VersionClient from .api.clients.runtime import RuntimeClient @@ -1347,7 +1346,7 @@ def jobs( ) hub, group, project = from_instance_format(instance) if job_tags: - validate_job_tags(job_tags, IBMBackendValueError) + validate_job_tags(job_tags) job_responses = [] # type: List[Dict[str, Any]] current_page_limit = limit or 20 diff --git a/qiskit_ibm_runtime/runtime_job.py b/qiskit_ibm_runtime/runtime_job.py index aa2496583..9e9a80356 100644 --- a/qiskit_ibm_runtime/runtime_job.py +++ b/qiskit_ibm_runtime/runtime_job.py @@ -27,9 +27,10 @@ from qiskit.providers.job import JobV1 as Job # pylint: disable=unused-import,cyclic-import -from qiskit_ibm_provider.utils import validate_job_tags, utc_to_local +from qiskit_ibm_provider.utils import utc_to_local from qiskit_ibm_runtime import qiskit_runtime_service +from .utils.utils import validate_job_tags from .constants import API_TO_JOB_ERROR_MESSAGE, API_TO_JOB_STATUS, DEFAULT_DECODERS from .exceptions import ( IBMApiError, @@ -40,12 +41,10 @@ RuntimeJobMaxTimeoutError, ) from .program.result_decoder import ResultDecoder -from .utils import RuntimeDecoder from .api.clients import RuntimeClient, RuntimeWebsocketClient, WebsocketClientCloseCode from .exceptions import IBMError from .api.exceptions import RequestsApiError from .api.client_parameters import ClientParameters -from .utils.utils import CallableStr logger = logging.getLogger(__name__) @@ -419,7 +418,7 @@ def update_tags(self, new_tags: List[str]) -> List[str]: with the server or updating the job tags. """ tags_to_update = set(new_tags) - validate_job_tags(new_tags, RuntimeInvalidStateError) + validate_job_tags(new_tags) response = self._api_client.update_tags(job_id=self.job_id(), tags=list(tags_to_update)) diff --git a/qiskit_ibm_runtime/runtime_options.py b/qiskit_ibm_runtime/runtime_options.py index eea530628..354ae1a28 100644 --- a/qiskit_ibm_runtime/runtime_options.py +++ b/qiskit_ibm_runtime/runtime_options.py @@ -103,4 +103,4 @@ def validate(self, channel: str) -> None: ) if self.job_tags: - validate_job_tags(self.job_tags, IBMInputValueError) + validate_job_tags(self.job_tags) diff --git a/qiskit_ibm_runtime/utils/utils.py b/qiskit_ibm_runtime/utils/utils.py index 271ee515b..83f2aeb42 100644 --- a/qiskit_ibm_runtime/utils/utils.py +++ b/qiskit_ibm_runtime/utils/utils.py @@ -19,7 +19,7 @@ import hashlib from queue import Queue from threading import Condition -from typing import List, Optional, Any, Dict, Union, Tuple, Type +from typing import List, Optional, Any, Dict, Union, Tuple from urllib.parse import urlparse import requests @@ -29,20 +29,19 @@ from ibm_platform_services import ResourceControllerV2 # pylint: disable=import-error -def validate_job_tags(job_tags: Optional[List[str]], exception: Type[Exception]) -> None: +def validate_job_tags(job_tags: Optional[List[str]]) -> None: """Validates input job tags. Args: job_tags: Job tags to be validated. - exception: Exception to raise if the tags are invalid. Raises: - Exception: If the job tags are invalid. + ValueError: If the job tags are invalid. """ if job_tags and ( not isinstance(job_tags, list) or not all(isinstance(tag, str) for tag in job_tags) ): - raise exception("job_tags needs to be a list of strings.") + raise ValueError("job_tags needs to be a list of strings.") def get_iam_api_url(cloud_url: str) -> str: From 349e12d5c4489a34f483c4b5cd82b549426b6dd5 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Sun, 29 Oct 2023 14:01:21 +0000 Subject: [PATCH 37/47] Put back the check if circuit is indeed of type 'QuantumCircuit'. Updated the hint accordingly --- qiskit_ibm_runtime/ibm_backend.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 301706b60..1df89dbb4 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -507,7 +507,7 @@ def __call__(self) -> "IBMBackend": # For backward compatibility only, can be removed later. return self - def _check_circuits_attributes(self, circuits: List[QuantumCircuit]) -> None: + def _check_circuits_attributes(self, circuits: Union[List[QuantumCircuit], str]) -> None: """Check that circuits can be executed on backend. Raises: IBMBackendValueError: @@ -519,12 +519,13 @@ def _check_circuits_attributes(self, circuits: List[QuantumCircuit]) -> None: f"maximum for this backend, {self._max_circuits})" ) for circ in circuits: - if circ.num_qubits > self._configuration.num_qubits: - raise IBMBackendValueError( - f"Circuit contains {circ.num_qubits} qubits, " - f"but backend has only {self.num_qubits}." - ) - self.check_faulty(circ) + if isinstance(circ, QuantumCircuit): + if circ.num_qubits > self._configuration.num_qubits: + raise IBMBackendValueError( + f"Circuit contains {circ.num_qubits} qubits, " + f"but backend has only {self.num_qubits}." + ) + self.check_faulty(circ) def check_faulty(self, circuit: QuantumCircuit) -> None: """Check if the input circuit uses faulty qubits or edges. From db9afb4763726c52919d92dc611a6dfa6c2d34ae Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 31 Oct 2023 10:55:33 +0200 Subject: [PATCH 38/47] Update qiskit_ibm_runtime/ibm_backend.py Co-authored-by: Jessie Yu --- qiskit_ibm_runtime/ibm_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 1df89dbb4..4e850f988 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -507,7 +507,7 @@ def __call__(self) -> "IBMBackend": # For backward compatibility only, can be removed later. return self - def _check_circuits_attributes(self, circuits: Union[List[QuantumCircuit], str]) -> None: + def _check_circuits_attributes(self, circuits: List[Union[QuantumCircuit, str]]) -> None: """Check that circuits can be executed on backend. Raises: IBMBackendValueError: From 7bb34478d107483ad8befd9d230c8c1351847d90 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 31 Oct 2023 09:09:04 +0000 Subject: [PATCH 39/47] Cleaned up code involving session setup --- qiskit_ibm_runtime/ibm_backend.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 4e850f988..78de36585 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -755,17 +755,10 @@ def _runtime_run( hgp_name = self._instance or self._service._get_hgp().name session = self._session - - if session: - if not session.active: - raise RuntimeError(f"The session {session.session_id} is closed.") - session_id = session.session_id or None - max_execution_time = session._max_time - start_session = session_id is None - else: - session_id = None - max_execution_time = None - start_session = False + if session and not session.active: + raise RuntimeError(f"The session {session.session_id} is closed.") + session_id = session.session_id if session else None + start_session = session_id is None log_level = getattr(self.options, "log_level", None) # temporary try: @@ -778,7 +771,6 @@ def _runtime_run( job_tags=job_tags, session_id=session_id, start_session=start_session, - max_execution_time=max_execution_time, image=image, ) except RequestsApiError as ex: From bdbcdaee426895f40dd7300f6d3434ce4b583c8f Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 31 Oct 2023 10:57:51 +0000 Subject: [PATCH 40/47] Removed setting of 'skip_transpilation' because set by default by Qasm3 --- qiskit_ibm_runtime/ibm_backend.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 78de36585..ae37b53dc 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -729,9 +729,6 @@ def run( ) run_config_dict["circuits"] = circuits - if not program_id.startswith(QASM3RUNNERPROGRAMID): - # Transpiling in circuit-runner is deprecated. - run_config_dict["skip_transpilation"] = True return self._runtime_run( program_id=program_id, From c13d8794d4f7d0da1387ccf9d000397d326bfd43 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 31 Oct 2023 11:00:08 +0000 Subject: [PATCH 41/47] Replaced in path 'qiskit-ibm-provider' with 'qiskit-ibm-runtime'. --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 60f7240bf..84b49d071 100644 --- a/setup.py +++ b/setup.py @@ -79,8 +79,8 @@ }, entry_points={ "qiskit.transpiler.translation": [ - "ibm_backend = qiskit_ibm_provider.transpiler.plugin:IBMTranslationPlugin", - "ibm_dynamic_circuits = qiskit_ibm_provider.transpiler.plugin:IBMDynamicTranslationPlugin", + "ibm_backend = qiskit_ibm_runtime.transpiler.plugin:IBMTranslationPlugin", + "ibm_dynamic_circuits = qiskit_ibm_runtime.transpiler.plugin:IBMDynamicTranslationPlugin", ] }, ) From 9f9c09936622813bdbb43f22482feade185b4c0e Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 31 Oct 2023 11:18:21 +0000 Subject: [PATCH 42/47] Added None to get() statement --- qiskit_ibm_runtime/ibm_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index ae37b53dc..d41af3564 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -772,7 +772,7 @@ def _runtime_run( ) except RequestsApiError as ex: raise IBMBackendApiError("Error submitting job: {}".format(str(ex))) from ex - session_id = response.get("session_id") + session_id = response.get("session_id", None) if self._session: self._session._session_id = session_id try: From b78acfc2638ff650438e402e9e69c32496c7deef Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 6 Nov 2023 08:37:24 +0000 Subject: [PATCH 43/47] Changed warning to error when init_circuit is boolean --- qiskit_ibm_runtime/ibm_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index d41af3564..c880d4254 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -703,7 +703,7 @@ def run( image = str(image) if isinstance(init_circuit, bool): - warnings.warn( + raise IBMBackendApiError( "init_circuit does not accept boolean values. " "A quantum circuit should be passed in instead." ) From c004bbb3c64d6e9835bdd93b21de22bb7b08a959 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 6 Nov 2023 10:00:42 +0000 Subject: [PATCH 44/47] Fixed setting of start_session --- qiskit_ibm_runtime/ibm_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index c880d4254..50202bae8 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -755,7 +755,7 @@ def _runtime_run( if session and not session.active: raise RuntimeError(f"The session {session.session_id} is closed.") session_id = session.session_id if session else None - start_session = session_id is None + start_session = session is not None and session_id is None log_level = getattr(self.options, "log_level", None) # temporary try: From e6022c2f90c8bbab845a9f89a632cdbff1952dc6 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 6 Nov 2023 11:27:20 +0000 Subject: [PATCH 45/47] Removed max_time parameter, because wasn't reaching the server. --- qiskit_ibm_runtime/ibm_backend.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 50202bae8..972515739 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -817,9 +817,9 @@ def _get_run_config(self, program_id: str, **kwargs: Any) -> Dict: run_config_dict[key] = backend_options[key] return run_config_dict - def open_session(self, max_time: Optional[Union[int, str]] = None) -> ProviderSession: + def open_session(self) -> ProviderSession: """Open session""" - self._session = ProviderSession(max_time) + self._session = ProviderSession() return self._session @property From 7dbe1d47c481ec471575a91b896fc4bd516c6039 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Tue, 7 Nov 2023 11:19:49 +0000 Subject: [PATCH 46/47] Release note --- releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml diff --git a/releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml b/releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml new file mode 100644 index 000000000..5322a9b7e --- /dev/null +++ b/releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml @@ -0,0 +1,6 @@ +--- + +features: + - | + Added support for ``backend.run()``. The functionality is similar to that in ``qiskit-ibm-provider``. + From 12fa82f33a565ac06bc41921e90ea00d21964dd7 Mon Sep 17 00:00:00 2001 From: kevin-tian Date: Tue, 7 Nov 2023 16:59:22 -0500 Subject: [PATCH 47/47] address comment --- qiskit_ibm_runtime/ibm_backend.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 972515739..d70c81683 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -751,11 +751,14 @@ def _runtime_run( if self._service._channel == "ibm_quantum": hgp_name = self._instance or self._service._get_hgp().name - session = self._session - if session and not session.active: - raise RuntimeError(f"The session {session.session_id} is closed.") - session_id = session.session_id if session else None - start_session = session is not None and session_id is None + if self._session: + if not self._session.active: + raise RuntimeError(f"The session {self._session.session_id} is closed.") + session_id = self._session.session_id + start_session = session_id is None + else: + session_id = None + start_session = False log_level = getattr(self.options, "log_level", None) # temporary try: