diff --git a/.github/workflows/check-format.yml b/.github/workflows/check-format.yml index a56c0273f..ed12dd335 100644 --- a/.github/workflows/check-format.yml +++ b/.github/workflows/check-format.yml @@ -16,7 +16,7 @@ jobs: check-code-format: runs-on: ubuntu-latest steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Python uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: diff --git a/.github/workflows/dependent-tests.yml b/.github/workflows/dependent-tests.yml index c4e4a0052..732600ba2 100644 --- a/.github/workflows/dependent-tests.yml +++ b/.github/workflows/dependent-tests.yml @@ -21,7 +21,7 @@ jobs: - amazon-braket-pennylane-plugin-python steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 0fd1b03a2..a26168c11 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -12,7 +12,7 @@ jobs: name: Build and publish distribution to PyPi runs-on: ubuntu-latest steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Python uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 8f148cdfd..63f84bd27 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -24,7 +24,7 @@ jobs: python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: diff --git a/.github/workflows/twine-check.yml b/.github/workflows/twine-check.yml index 46be37a24..5a5966763 100644 --- a/.github/workflows/twine-check.yml +++ b/.github/workflows/twine-check.yml @@ -14,7 +14,7 @@ jobs: name: Check long description runs-on: ubuntu-latest steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Python uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b08e4120..e83803bab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## v1.55.0 (2023-09-09) + +### Features + + * add Aria2 enum + +## v1.54.3.post0 (2023-09-04) + +### Documentation Changes + + * standardize task and job naming to quantum task and hybrid job + +## v1.54.3 (2023-08-30) + +### Bug Fixes and Other Changes + + * Move inline `_flatten` to top of `qubit_set.py` + * build(deps): bump actions/setup-python from 4.6.1 to 4.7.0 + ## v1.54.2 (2023-08-28) ### Bug Fixes and Other Changes diff --git a/README.md b/README.md index eea4e659b..1f7fcb460 100644 --- a/README.md +++ b/README.md @@ -84,16 +84,16 @@ task = device.run(bell, shots=100) print(task.result().measurement_counts) ``` -The code sample imports the Amazon Braket framework, then defines the device to use (the SV1 AWS simulator). It then creates a Bell Pair circuit, executes the circuit on the simulator and prints the results of the job. This example can be found in `../examples/bell.py`. +The code sample imports the Amazon Braket framework, then defines the device to use (the SV1 AWS simulator). It then creates a Bell Pair circuit, executes the circuit on the simulator and prints the results of the hybrid job. This example can be found in `../examples/bell.py`. -### Running multiple tasks at once +### Running multiple quantum tasks at once -Many quantum algorithms need to run multiple independent circuits, and submitting the circuits in parallel can be faster than submitting them one at a time. In particular, parallel task processing provides a significant speed up when using simulator devices. The following example shows how to run a batch of tasks on SV1: +Many quantum algorithms need to run multiple independent circuits, and submitting the circuits in parallel can be faster than submitting them one at a time. In particular, parallel quantum task processing provides a significant speed up when using simulator devices. The following example shows how to run a batch of quantum tasks on SV1: ```python circuits = [bell for _ in range(5)] batch = device.run_batch(circuits, shots=100) -print(batch.results()[0].measurement_counts) # The result of the first task in the batch +print(batch.results()[0].measurement_counts) # The result of the first quantum task in the batch ``` ### Running a hybrid job @@ -112,19 +112,19 @@ print(job.result()) where `run_job` is a function in the file `job.py`. -The code sample imports the Amazon Braket framework, then creates a hybrid job with the entry point being the `run_job` function. The hybrid job creates quantum tasks against the SV1 AWS Simulator. The job runs synchronously, and prints logs until it completes. The complete example can be found in `../examples/job.py`. +The code sample imports the Amazon Braket framework, then creates a hybrid job with the entry point being the `run_job` function. The hybrid job creates quantum tasks against the SV1 AWS Simulator. The hybrid job runs synchronously, and prints logs until it completes. The complete example can be found in `../examples/job.py`. ### Available Simulators Amazon Braket provides access to two types of simulators: fully managed simulators, available through the Amazon Braket service, and the local simulators that are part of the Amazon Braket SDK. - Fully managed simulators offer high-performance circuit simulations. These simulators can handle circuits larger than circuits that run on quantum hardware. For example, the SV1 state vector simulator shown in the previous examples requires approximately 1 or 2 hours to complete a 34-qubit, dense, and square circuit (circuit depth = 34), depending on the type of gates used and other factors. -- The Amazon Braket Python SDK includes an implementation of quantum simulators that can run circuits on your local, classic hardware. For example the braket_sv local simulator is well suited for rapid prototyping on small circuits up to 25 qubits, depending on the hardware specifications of your Braket notebook instance or your local environment. An example of how to execute the task locally is included in the repository `../examples/local_bell.py`. +- The Amazon Braket Python SDK includes an implementation of quantum simulators that can run circuits on your local, classic hardware. For example the braket_sv local simulator is well suited for rapid prototyping on small circuits up to 25 qubits, depending on the hardware specifications of your Braket notebook instance or your local environment. An example of how to execute the quantum task locally is included in the repository `../examples/local_bell.py`. For a list of available simulators and their features, consult the [Amazon Braket Developer Guide](https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html). ### Debugging logs -Tasks sent to QPUs don't always run right away. To view task status, you can enable debugging logs. An example of how to enable these logs is included in repo: `../examples/debug_bell.py`. This example enables task logging so that status updates are continuously printed to the terminal after a quantum task is executed. The logs can also be configured to save to a file or output to another stream. You can use the debugging example to get information on the tasks you submit, such as the current status, so that you know when your task completes. +Quantum tasks sent to QPUs don't always run right away. To view quantum task status, you can enable debugging logs. An example of how to enable these logs is included in repo: `../examples/debug_bell.py`. This example enables quantum task logging so that status updates are continuously printed to the terminal after a quantum task is executed. The logs can also be configured to save to a file or output to another stream. You can use the debugging example to get information on the quantum tasks you submit, such as the current status, so that you know when your quantum task completes. ### Running a Quantum Algorithm on a Quantum Computer With Amazon Braket, you can run your quantum circuit on a physical quantum computer. @@ -152,7 +152,7 @@ print(task.result().measurement_counts) To select a quantum hardware device, specify its ARN as the value of the `device_arn` argument. A list of available quantum devices and their features can be found in the [Amazon Braket Developer Guide](https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html). -**Important** Tasks may not run immediately on the QPU. The QPUs only execute tasks during execution windows. To find their execution windows, please refer to the [AWS console](https://console.aws.amazon.com/braket/home) in the "Devices" tab. +**Important** Quantum tasks may not run immediately on the QPU. The QPUs only execute quantum tasks during execution windows. To find their execution windows, please refer to the [AWS console](https://console.aws.amazon.com/braket/home) in the "Devices" tab. ## Sample Notebooks Sample Jupyter notebooks can be found in the [amazon-braket-examples](https://github.com/aws/amazon-braket-examples/) repo. @@ -214,7 +214,7 @@ After you create a profile, use the following command to set the `AWS_PROFILE` s ```bash export AWS_PROFILE=YOUR_PROFILE_NAME ``` -To run the integration tests for local jobs, you need to have Docker installed and running. To install Docker follow these instructions: [Install Docker](https://docs.docker.com/get-docker/) +To run the integration tests for local hybrid jobs, you need to have Docker installed and running. To install Docker follow these instructions: [Install Docker](https://docs.docker.com/get-docker/) Run the tests: diff --git a/doc/examples-braket-features.rst b/doc/examples-braket-features.rst index cbb81fb60..1bfc9c0d7 100644 --- a/doc/examples-braket-features.rst +++ b/doc/examples-braket-features.rst @@ -8,12 +8,12 @@ Learn more about the indivudal features of Amazon Braket. :maxdepth: 2 ******************************************************************************************************************************************************************************************************************************* -`Getting notifications when a task completes `_ +`Getting notifications when a quantum task completes `_ ******************************************************************************************************************************************************************************************************************************* This tutorial illustrates how Amazon Braket integrates with Amazon EventBridge for event-based processing. In the tutorial, you will learn how to configure Amazon Braket -and Amazon Eventbridge to receive text notification about task completions on your phone. +and Amazon Eventbridge to receive text notification about quantum task completions on your phone. ************************************************************************************************************************************************************* `Allocating Qubits on QPU Devices `_ diff --git a/doc/examples-hybrid-jobs.rst b/doc/examples-hybrid-jobs.rst index 88d4e8f6a..76b2026eb 100644 --- a/doc/examples-hybrid-jobs.rst +++ b/doc/examples-hybrid-jobs.rst @@ -20,7 +20,7 @@ This tutorial shows how to run your first Amazon Braket Hybrid Job. This notebook demonstrates a typical quantum machine learning workflow, including uploading data, monitoring training, and tuning hyperparameters. ******************************************************************************************************************************************************************************************** -`Using Pennylane with Braket Jobs `_ +`Using Pennylane with Braket Hybrid Jobs `_ ******************************************************************************************************************************************************************************************** In this tutorial, we use PennyLane within Amazon Braket Hybrid Jobs to run the Quantum Approximate Optimization Algorithm (QAOA) on a Max-Cut problem. diff --git a/examples/autoqasm/ionq_gates.py b/examples/autoqasm/ionq_gates.py index 8a2f12ef1..a8e69c6b1 100644 --- a/examples/autoqasm/ionq_gates.py +++ b/examples/autoqasm/ionq_gates.py @@ -1,4 +1,5 @@ import numpy as np + import braket.experimental.autoqasm as aq from braket.experimental.autoqasm.instructions import gpi, gpi2, ms diff --git a/src/braket/_sdk/_version.py b/src/braket/_sdk/_version.py index b9c61a5e4..46367d959 100644 --- a/src/braket/_sdk/_version.py +++ b/src/braket/_sdk/_version.py @@ -15,4 +15,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "1.54.3.dev0" +__version__ = "1.55.1.dev0" diff --git a/src/braket/aws/aws_device.py b/src/braket/aws/aws_device.py index a34facaca..fbd493071 100644 --- a/src/braket/aws/aws_device.py +++ b/src/braket/aws/aws_device.py @@ -123,15 +123,16 @@ def run( **aws_quantum_task_kwargs, ) -> AwsQuantumTask: """ - Run a quantum task specification on this device. A task can be a circuit or an + Run a quantum task specification on this device. A quantum task can be a circuit or an annealing problem. Args: task_specification (Union[Circuit, Problem, OpenQasmProgram, BlackbirdProgram, PulseSequence, AnalogHamiltonianSimulation]): # noqa - Specification of task (circuit or annealing problem or program) to run on device. + Specification of quantum task (circuit, OpenQASM program or AHS program) + to run on device. s3_destination_folder (Optional[S3DestinationFolder]): The S3 location to - save the task's results to. Default is `/tasks` if evoked outside a - Braket Job, `/jobs//tasks` if evoked inside a Braket Job. + save the quantum task's results to. Default is `/tasks` if evoked outside a + Braket Hybrid Job, `/jobs//tasks` if evoked inside a Braket Hybrid Job. shots (Optional[int]): The number of times to run the circuit or annealing problem. Default is 1000 for QPUs and 0 for simulators. poll_timeout_seconds (float): The polling timeout for `AwsQuantumTask.result()`, @@ -233,20 +234,20 @@ def run_batch( *aws_quantum_task_args, **aws_quantum_task_kwargs, ) -> AwsQuantumTaskBatch: - """Executes a batch of tasks in parallel + """Executes a batch of quantum tasks in parallel Args: task_specifications (Union[Union[Circuit, Problem, OpenQasmProgram, BlackbirdProgram, PulseSequence, AnalogHamiltonianSimulation], List[Union[ Circuit, Problem, OpenQasmProgram, BlackbirdProgram, PulseSequence, AnalogHamiltonianSimulation]]]): # noqa Single instance or list of circuits, annealing problems, pulse sequences, or photonics program to run on device. s3_destination_folder (Optional[S3DestinationFolder]): The S3 location to - save the tasks' results to. Default is `/tasks` if evoked outside a + save the quantum tasks' results to. Default is `/tasks` if evoked outside a Braket Job, `/jobs//tasks` if evoked inside a Braket Job. shots (Optional[int]): The number of times to run the circuit or annealing problem. Default is 1000 for QPUs and 0 for simulators. - max_parallel (Optional[int]): The maximum number of tasks to run on AWS in parallel. + max_parallel (Optional[int]): The maximum number of quantum tasks to run on AWS in parallel. Batch creation will fail if this value is greater than the maximum allowed - concurrent tasks on the device. Default: 10 + concurrent quantum tasks on the device. Default: 10 max_connections (int): The maximum number of connections in the Boto3 connection pool. Also the maximum number of thread pool workers for the batch. Default: 100 poll_timeout_seconds (float): The polling timeout for `AwsQuantumTask.result()`, @@ -264,7 +265,7 @@ def run_batch( Default: None. Returns: - AwsQuantumTaskBatch: A batch containing all of the tasks run + AwsQuantumTaskBatch: A batch containing all of the quantum tasks run See Also: `braket.aws.aws_quantum_task_batch.AwsQuantumTaskBatch` diff --git a/src/braket/aws/aws_quantum_job.py b/src/braket/aws/aws_quantum_job.py index a74307e6d..288f17257 100644 --- a/src/braket/aws/aws_quantum_job.py +++ b/src/braket/aws/aws_quantum_job.py @@ -82,32 +82,32 @@ def create( tags: Dict[str, str] = None, logger: Logger = getLogger(__name__), ) -> AwsQuantumJob: - """Creates a job by invoking the Braket CreateJob API. + """Creates a hybrid job by invoking the Braket CreateJob API. Args: device (str): ARN for the AWS device which is primarily accessed for the execution - of this job. Alternatively, a string of the format "local:/" - for using a local simulator for the job. This string will be available as the - environment variable `AMZN_BRAKET_DEVICE_ARN` inside the job container when - using a Braket container. + of this hybrid job. Alternatively, a string of the format + "local:/" for using a local simulator for the hybrid job. + This string will be available as the environment variable `AMZN_BRAKET_DEVICE_ARN` + inside the hybrid job container when using a Braket container. source_module (str): Path (absolute, relative or an S3 URI) to a python module to be tarred and uploaded. If `source_module` is an S3 URI, it must point to a tar.gz file. Otherwise, source_module may be a file or directory. - entry_point (str): A str that specifies the entry point of the job, relative to + entry_point (str): A str that specifies the entry point of the hybrid job, relative to the source module. The entry point must be in the format `importable.module` or `importable.module:callable`. For example, `source_module.submodule:start_here` indicates the `start_here` function contained in `source_module.submodule`. If source_module is an S3 URI, entry point must be given. Default: source_module's name - image_uri (str): A str that specifies the ECR image to use for executing the job. + image_uri (str): A str that specifies the ECR image to use for executing the hybrid job. `image_uris.retrieve_image()` function may be used for retrieving the ECR image URIs for the containers supported by Braket. Default = ``. - job_name (str): A str that specifies the name with which the job is created. - Allowed pattern for job name: `^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,50}$` + job_name (str): A str that specifies the name with which the hybrid job is created. + Allowed pattern for hybrid job name: `^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,50}$` Default: f'{image_uri_type}-{timestamp}'. code_location (str): The S3 prefix URI where custom code will be uploaded. @@ -116,11 +116,12 @@ def create( role_arn (str): A str providing the IAM role ARN used to execute the script. Default: IAM role returned by AwsSession's `get_default_jobs_role()`. - wait_until_complete (bool): `True` if we should wait until the job completes. - This would tail the job logs as it waits. Otherwise `False`. Default: `False`. + wait_until_complete (bool): `True` if we should wait until the hybrid job completes. + This would tail the hybrid job logs as it waits. Otherwise `False`. + Default: `False`. - hyperparameters (Dict[str, Any]): Hyperparameters accessible to the job. - The hyperparameters are made accessible as a Dict[str, str] to the job. + hyperparameters (Dict[str, Any]): Hyperparameters accessible to the hybrid job. + The hyperparameters are made accessible as a Dict[str, str] to the hybrid job. For convenience, this accepts other types for keys and values, but `str()` is called to convert them before being passed on. Default: None. @@ -133,26 +134,28 @@ def create( Default: {}. instance_config (InstanceConfig): Configuration of the instances to be used - to execute the job. Default: InstanceConfig(instanceType='ml.m5.large', + to execute the hybrid job. Default: InstanceConfig(instanceType='ml.m5.large', instanceCount=1, volumeSizeInGB=30). - distribution (str): A str that specifies how the job should be distributed. If set to - "data_parallel", the hyperparameters for the job will be set to use data parallelism - features for PyTorch or TensorFlow. Default: None. + distribution (str): A str that specifies how the hybrid job should be distributed. + If set to "data_parallel", the hyperparameters for the hybrid job will be set + to use data parallelism features for PyTorch or TensorFlow. Default: None. stopping_condition (StoppingCondition): The maximum length of time, in seconds, - and the maximum number of tasks that a job can run before being forcefully stopped. + and the maximum number of quantum tasks that a hybrid job can run before being + forcefully stopped. Default: StoppingCondition(maxRuntimeInSeconds=5 * 24 * 60 * 60). - output_data_config (OutputDataConfig): Specifies the location for the output of the job. + output_data_config (OutputDataConfig): Specifies the location for the output of the + hybrid job. Default: OutputDataConfig(s3Path=f's3://{default_bucket_name}/jobs/{job_name}/data', kmsKeyId=None). - copy_checkpoints_from_job (str): A str that specifies the job ARN whose checkpoint you - want to use in the current job. Specifying this value will copy over the checkpoint - data from `use_checkpoints_from_job`'s checkpoint_config s3Uri to the current job's - checkpoint_config s3Uri, making it available at checkpoint_config.localPath during - the job execution. Default: None + copy_checkpoints_from_job (str): A str that specifies the hybrid job ARN whose + checkpoint you want to use in the current hybrid job. Specifying this value will + copy over the checkpoint data from `use_checkpoints_from_job`'s checkpoint_config + s3Uri to the current hybrid job's checkpoint_config s3Uri, making it available at + checkpoint_config.localPath during the hybrid job execution. Default: None checkpoint_config (CheckpointConfig): Configuration that specifies the location where checkpoint data is stored. @@ -162,14 +165,15 @@ def create( aws_session (AwsSession): AwsSession for connecting to AWS Services. Default: AwsSession() - tags (Dict[str, str]): Dict specifying the key-value pairs for tagging this job. + tags (Dict[str, str]): Dict specifying the key-value pairs for tagging this hybrid job. Default: {}. - logger (Logger): Logger object with which to write logs, such as task statuses - while waiting for task to be in a terminal state. Default is `getLogger(__name__)` + logger (Logger): Logger object with which to write logs, such as quantum task statuses + while waiting for quantum task to be in a terminal state. Default is + `getLogger(__name__)` Returns: - AwsQuantumJob: Job tracking the execution on Amazon Braket. + AwsQuantumJob: Hybrid job tracking the execution on Amazon Braket. Raises: ValueError: Raises ValueError if the parameters are not valid. @@ -208,10 +212,10 @@ def create( def __init__(self, arn: str, aws_session: AwsSession = None): """ Args: - arn (str): The ARN of the job. + arn (str): The ARN of the hybrid job. aws_session (AwsSession): The `AwsSession` for connecting to AWS services. Default is `None`, in which case an `AwsSession` object will be created with the - region of the job. + region of the hybrid job. """ self._arn: str = arn if aws_session: @@ -234,13 +238,14 @@ def _is_valid_aws_session_region_for_job_arn(aws_session: AwsSession, job_arn: s @staticmethod def _default_session_for_job_arn(job_arn: str) -> AwsSession: - """Get an AwsSession for the Job ARN. The AWS session should be in the region of the job. + """Get an AwsSession for the Hybrid Job ARN. The AWS session should be in the region of the + hybrid job. Args: - job_arn (str): The ARN for the quantum job. + job_arn (str): The ARN for the quantum hybrid job. Returns: - AwsSession: `AwsSession` object with default `boto_session` in job's region. + AwsSession: `AwsSession` object with default `boto_session` in hybrid job's region. """ job_region = job_arn.split(":")[3] boto_session = boto3.Session(region_name=job_region) @@ -248,7 +253,7 @@ def _default_session_for_job_arn(job_arn: str) -> AwsSession: @property def arn(self) -> str: - """str: The ARN (Amazon Resource Name) of the quantum job.""" + """str: The ARN (Amazon Resource Name) of the quantum hybrid job.""" return self._arn @property @@ -257,7 +262,7 @@ def name(self) -> str: return self._arn.partition("job/")[-1] def state(self, use_cached_value: bool = False) -> str: - """The state of the quantum job. + """The state of the quantum hybrid job. Args: use_cached_value (bool): If `True`, uses the value most recently retrieved @@ -274,28 +279,29 @@ def state(self, use_cached_value: bool = False) -> str: return self.metadata(use_cached_value).get("status") def logs(self, wait: bool = False, poll_interval_seconds: int = 5) -> None: - """Display logs for a given job, optionally tailing them until job is complete. + """Display logs for a given hybrid job, optionally tailing them until hybrid job is + complete. If the output is a tty or a Jupyter cell, it will be color-coded based on which instance the log entry is from. Args: - wait (bool): `True` to keep looking for new log entries until the job completes; + wait (bool): `True` to keep looking for new log entries until the hybrid job completes; otherwise `False`. Default: `False`. poll_interval_seconds (int): The interval of time, in seconds, between polling for - new log entries and job completion (default: 5). + new log entries and hybrid job completion (default: 5). Raises: - exceptions.UnexpectedStatusException: If waiting and the training job fails. + exceptions.UnexpectedStatusException: If waiting and the training hybrid job fails. """ - # The loop below implements a state machine that alternates between checking the job status - # and reading whatever is available in the logs at this point. Note, that if we were - # called with wait == False, we never check the job status. + # The loop below implements a state machine that alternates between checking the hybrid job + # status and reading whatever is available in the logs at this point. Note, that if we were + # called with wait == False, we never check the hybrid job status. # - # If wait == TRUE and job is not completed, the initial state is TAILING - # If wait == FALSE, the initial state is COMPLETE (doesn't matter if the job really is - # complete). + # If wait == TRUE and hybrid job is not completed, the initial state is TAILING + # If wait == FALSE, the initial state is COMPLETE (doesn't matter if the hybrid job really + # is complete). # # The state table: # @@ -348,7 +354,7 @@ def logs(self, wait: bool = False, poll_interval_seconds: int = 5) -> None: log_state = AwsQuantumJob.LogState.JOB_COMPLETE def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: - """Gets the job metadata defined in Amazon Braket. + """Gets the hybrid job metadata defined in Amazon Braket. Args: use_cached_value (bool): If `True`, uses the value most recently retrieved @@ -356,7 +362,7 @@ def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: `GetJob` is called to retrieve the metadata. If `False`, always calls `GetJob`, which also updates the cached value. Default: `False`. Returns: - Dict[str, Any]: Dict that specifies the job metadata defined in Amazon Braket. + Dict[str, Any]: Dict that specifies the hybrid job metadata defined in Amazon Braket. """ if not use_cached_value or not self._metadata: self._metadata = self._aws_session.get_job(self._arn) @@ -413,7 +419,7 @@ def result( poll_timeout_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_INTERVAL, ) -> Dict[str, Any]: - """Retrieves the job result persisted using save_job_result() function. + """Retrieves the hybrid job result persisted using save_job_result() function. Args: poll_timeout_seconds (float): The polling timeout, in seconds, for `result()`. @@ -425,8 +431,8 @@ def result( Dict[str, Any]: Dict specifying the job results. Raises: - RuntimeError: if job is in a FAILED or CANCELLED state. - TimeoutError: if job execution exceeds the polling timeout period. + RuntimeError: if hybrid job is in a FAILED or CANCELLED state. + TimeoutError: if hybrid job execution exceeds the polling timeout period. """ with tempfile.TemporaryDirectory() as temp_dir: @@ -459,13 +465,13 @@ def download_result( poll_timeout_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_INTERVAL, ) -> None: - """Downloads the results from the job output S3 bucket and extracts the tar.gz + """Downloads the results from the hybrid job output S3 bucket and extracts the tar.gz bundle to the location specified by `extract_to`. If no location is specified, the results are extracted to the current directory. Args: extract_to (str): The directory to which the results are extracted. The results - are extracted to a folder titled with the job name within this directory. + are extracted to a folder titled with the hybrid job name within this directory. Default= `Current working directory`. poll_timeout_seconds (float): The polling timeout, in seconds, for `download_result()`. Default: 10 days. @@ -473,8 +479,8 @@ def download_result( `download_result()`.Default: 5 seconds. Raises: - RuntimeError: if job is in a FAILED or CANCELLED state. - TimeoutError: if job execution exceeds the polling timeout period. + RuntimeError: if hybrid job is in a FAILED or CANCELLED state. + TimeoutError: if hybrid job execution exceeds the polling timeout period. """ extract_to = extract_to or Path.cwd() diff --git a/src/braket/aws/aws_quantum_task.py b/src/braket/aws/aws_quantum_task.py index 76fa3d565..a4538cdd9 100644 --- a/src/braket/aws/aws_quantum_task.py +++ b/src/braket/aws/aws_quantum_task.py @@ -73,8 +73,8 @@ class AwsQuantumTask(QuantumTask): - """Amazon Braket implementation of a quantum task. A task can be a circuit or an annealing - problem.""" + """Amazon Braket implementation of a quantum task. A quantum task can be a circuit, + an OpenQASM program or an AHS program.""" # TODO: Add API documentation that defines these states. Make it clear this is the contract. NO_RESULT_TERMINAL_STATES = {"FAILED", "CANCELLED"} @@ -116,17 +116,17 @@ def create( device_arn (str): The ARN of the quantum device. - task_specification (Union[Circuit, Problem, OpenQASMProgram, BlackbirdProgram,PulseSequence, AnalogHamiltonianSimulation]): # noqa - The specification of the task to run on device. + task_specification (Union[Circuit, Problem, OpenQASMProgram, BlackbirdProgram, PulseSequence, AnalogHamiltonianSimulation]): # noqa + The specification of the quantum task to run on device. s3_destination_folder (AwsSession.S3DestinationFolder): NamedTuple, with bucket for index 0 and key for index 1, that specifies the Amazon S3 bucket and folder - to store task results in. + to store quantum task results in. - shots (int): The number of times to run the task on the device. If the device is a - simulator, this implies the state is sampled N times, where N = `shots`. + shots (int): The number of times to run the quantum task on the device. If the device is + a simulator, this implies the state is sampled N times, where N = `shots`. `shots=0` is only available on simulators and means that the simulator - will compute the exact results based on the task specification. + will compute the exact results based on the quantum task specification. device_parameters (Dict[str, Any]): Additional parameters to send to the device. @@ -151,7 +151,7 @@ def create( Default: None. Returns: - AwsQuantumTask: AwsQuantumTask tracking the task execution on the device. + AwsQuantumTask: AwsQuantumTask tracking the quantum task execution on the device. Note: The following arguments are typically defined via clients of Device. @@ -209,14 +209,15 @@ def __init__( ): """ Args: - arn (str): The ARN of the task. + arn (str): The ARN of the quantum task. aws_session (AwsSession): The `AwsSession` for connecting to AWS services. Default is `None`, in which case an `AwsSession` object will be created with the - region of the task. + region of the quantum task. poll_timeout_seconds (float): The polling timeout for `result()`. Default: 5 days. poll_interval_seconds (float): The polling interval for `result()`. Default: 1 second. - logger (Logger): Logger object with which to write logs, such as task statuses - while waiting for task to be in a terminal state. Default is `getLogger(__name__)` + logger (Logger): Logger object with which to write logs, such as quantum task statuses + while waiting for quantum task to be in a terminal state. Default is + `getLogger(__name__)` Examples: >>> task = AwsQuantumTask(arn='task_arn') @@ -247,10 +248,11 @@ def __init__( @staticmethod def _aws_session_for_task_arn(task_arn: str) -> AwsSession: """ - Get an AwsSession for the Task ARN. The AWS session should be in the region of the task. + Get an AwsSession for the Quantum Task ARN. The AWS session should be in the region of the + quantum task. Returns: - AwsSession: `AwsSession` object with default `boto_session` in task's region. + AwsSession: `AwsSession` object with default `boto_session` in quantum task's region. """ task_region = task_arn.split(":")[3] boto_session = boto3.Session(region_name=task_region) @@ -270,13 +272,14 @@ def _cancel_future(self) -> None: self._future.cancel() def cancel(self) -> None: - """Cancel the quantum task. This cancels the future and the task in Amazon Braket.""" + """Cancel the quantum task. This cancels the future and the quantum task in Amazon + Braket.""" self._cancel_future() self._aws_session.cancel_quantum_task(self._arn) def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: """ - Get task metadata defined in Amazon Braket. + Get quantum task metadata defined in Amazon Braket. Args: use_cached_value (bool): If `True`, uses the value most recently retrieved @@ -335,7 +338,7 @@ def result( ]: """ Get the quantum task result by polling Amazon Braket to see if the task is completed. - Once the task is completed, the result is retrieved from S3 and returned as a + Once the quantum task is completed, the result is retrieved from S3 and returned as a `GateModelQuantumTaskResult` or `AnnealingQuantumTaskResult` This method is a blocking thread call and synchronously returns a result. @@ -344,8 +347,8 @@ def result( Returns: Union[GateModelQuantumTaskResult, AnnealingQuantumTaskResult, PhotonicModelQuantumTaskResult]: # noqa - The result of the task, if the task completed successfully; returns `None` if the task - did not complete successfully or the future timed out. + The result of the quantum task, if the quantum task completed successfully; returns + `None` if the quantum task did not complete successfully or the future timed out. """ if self._result or ( self._metadata and self._status(True) in self.NO_RESULT_TERMINAL_STATES diff --git a/src/braket/aws/aws_quantum_task_batch.py b/src/braket/aws/aws_quantum_task_batch.py index b0cd3a387..caa48b39c 100644 --- a/src/braket/aws/aws_quantum_task_batch.py +++ b/src/braket/aws/aws_quantum_task_batch.py @@ -31,7 +31,7 @@ class AwsQuantumTaskBatch(QuantumTaskBatch): """Executes a batch of quantum tasks in parallel. - Using this class can yield vast speedups over executing tasks sequentially, + Using this class can yield vast speedups over executing quantum tasks sequentially, and is particularly useful for computations that can be parallelized, such as calculating quantum gradients or statistics of terms in a Hamiltonian. @@ -71,18 +71,18 @@ def __init__( device_arn (str): The ARN of the quantum device. task_specifications (Union[Union[Circuit,Problem,OpenQasmProgram,BlackbirdProgram,AnalogHamiltonianSimulation],List[Union[Circuit,Problem,OpenQasmProgram,BlackbirdProgram,AnalogHamiltonianSimulation]]]): # noqa Single instance or list of circuits, annealing - problems, pulse sequences, or photonics program as specification of task + problems, pulse sequences, or photonics program as specification of quantum task to run on device. s3_destination_folder (AwsSession.S3DestinationFolder): NamedTuple, with bucket for index 0 and key for index 1, that specifies the Amazon S3 bucket and folder - to store task results in. - shots (int): The number of times to run the task on the device. If the device is a - simulator, this implies the state is sampled N times, where N = `shots`. + to store quantum task results in. + shots (int): The number of times to run the quantum task on the device. If the device is + a simulator, this implies the state is sampled N times, where N = `shots`. `shots=0` is only available on simulators and means that the simulator - will compute the exact results based on the task specification. - max_parallel (int): The maximum number of tasks to run on AWS in parallel. + will compute the exact results based on the quantum task specification. + max_parallel (int): The maximum number of quantum tasks to run on AWS in parallel. Batch creation will fail if this value is greater than the maximum allowed - concurrent tasks on the device. + concurrent quantum tasks on the device. max_workers (int): The maximum number of thread pool workers. Default: 100 poll_timeout_seconds (float): The polling timeout for `AwsQuantumTask.result()`, in seconds. Default: 5 days. @@ -224,7 +224,7 @@ def _execute( ] except KeyboardInterrupt: # If an exception is thrown before the thread pool has finished, - # clean up the tasks which have not yet been created before reraising it. + # clean up the quantum tasks which have not yet been created before reraising it. if "task_futures" in locals(): for future in task_futures: future.cancel() @@ -265,7 +265,7 @@ def _create_task( remaining.pop() - # If the task hits a terminal state before all tasks have been created, + # If the quantum task hits a terminal state before all quantum tasks have been created, # it can be returned immediately while remaining: if task.state() in AwsQuantumTask.TERMINAL_STATES: @@ -279,24 +279,24 @@ def results( max_retries: int = MAX_RETRIES, use_cached_value: bool = True, ) -> List[AwsQuantumTask]: - """Retrieves the result of every task in the batch. + """Retrieves the result of every quantum task in the batch. - Polling for results happens in parallel; this method returns when all tasks + Polling for results happens in parallel; this method returns when all quantum tasks have reached a terminal state. The result of this method is cached. Args: fail_unsuccessful (bool): If set to `True`, this method will fail - if any task in the batch fails to return a result even after + if any quantum task in the batch fails to return a result even after `max_retries` retries. - max_retries (int): Maximum number of times to retry any failed tasks, - i.e. any tasks in the `FAILED` or `CANCELLED` state or that didn't + max_retries (int): Maximum number of times to retry any failed quantum tasks, + i.e. any quantum tasks in the `FAILED` or `CANCELLED` state or that didn't complete within the timeout. Default: 3. use_cached_value (bool): If `False`, will refetch the results from S3, even when results have already been cached. Default: `True`. Returns: - List[AwsQuantumTask]: The results of all of the tasks in the batch. - `FAILED`, `CANCELLED`, or timed out tasks will have a result of None + List[AwsQuantumTask]: The results of all of the quantum tasks in the batch. + `FAILED`, `CANCELLED`, or timed out quantum tasks will have a result of None """ if not self._results or not use_cached_value: self._results = AwsQuantumTaskBatch._retrieve_results(self._tasks, self._max_workers) @@ -322,14 +322,14 @@ def _retrieve_results(tasks: List[AwsQuantumTask], max_workers: int) -> List[Aws return [future.result() for future in result_futures] def retry_unsuccessful_tasks(self) -> bool: - """Retries any tasks in the batch without valid results. + """Retries any quantum tasks in the batch without valid results. This method should only be called after `results()` has been called at least once. - The method will generate new tasks for any failed tasks, so `self.task` and + The method will generate new quantum tasks for any failed quantum tasks, so `self.task` and `self.results()` may return different values after a call to this method. Returns: - bool: Whether or not all retried tasks completed successfully. + bool: Whether or not all retried quantum tasks completed successfully. """ if not self._results: raise RuntimeError("results() should be called before attempting to retry") @@ -363,19 +363,20 @@ def retry_unsuccessful_tasks(self) -> bool: @property def tasks(self) -> List[AwsQuantumTask]: - """List[AwsQuantumTask]: The tasks in this batch, as a list of AwsQuantumTask objects""" + """List[AwsQuantumTask]: The quantum tasks in this batch, as a list of AwsQuantumTask + objects""" return list(self._tasks) @property def size(self) -> int: - """int: The number of tasks in the batch""" + """int: The number of quantum tasks in the batch""" return len(self._tasks) @property def unfinished(self) -> Set[str]: - """Gets all the IDs of all the tasks in teh batch that have yet to complete. + """Gets all the IDs of all the quantum tasks in teh batch that have yet to complete. Returns: - Set[str]: The IDs of all the tasks in the batch that have yet to complete. + Set[str]: The IDs of all the quantum tasks in the batch that have yet to complete. """ with ThreadPoolExecutor(max_workers=self._max_workers) as executor: status_futures = {task.id: executor.submit(task.state) for task in self._tasks} @@ -390,5 +391,6 @@ def unfinished(self) -> Set[str]: @property def unsuccessful(self) -> Set[str]: - """Set[str]: The IDs of all the FAILED, CANCELLED, or timed out tasks in the batch.""" + """Set[str]: The IDs of all the FAILED, CANCELLED, or timed out quantum tasks in the + batch.""" return set(self._unsuccessful) diff --git a/src/braket/aws/aws_session.py b/src/braket/aws/aws_session.py index 56a4cc1b8..300aee13e 100644 --- a/src/braket/aws/aws_session.py +++ b/src/braket/aws/aws_session.py @@ -240,13 +240,13 @@ def create_quantum_task(self, **boto3_kwargs) -> str: def create_job(self, **boto3_kwargs) -> str: """ - Create a quantum job. + Create a quantum hybrid job. Args: ``**boto3_kwargs``: Keyword arguments for the Amazon Braket `CreateJob` operation. Returns: - str: The ARN of the job. + str: The ARN of the hybrid job. """ response = self.braket_client.create_job(**boto3_kwargs) return response["jobArn"] @@ -285,7 +285,7 @@ def get_quantum_task(self, arn: str) -> Dict[str, Any]: def get_default_jobs_role(self) -> str: """ - Returns the role ARN for the default jobs role created in the Amazon Braket Console. + Returns the role ARN for the default hybrid jobs role created in the Amazon Braket Console. It will pick the first role it finds with the `RoleName` prefix `AmazonBraketJobsExecutionRole` with a `PathPrefix` of `/service-role/`. @@ -316,10 +316,10 @@ def get_default_jobs_role(self) -> str: ) def get_job(self, arn: str) -> Dict[str, Any]: """ - Gets the quantum job. + Gets the hybrid job. Args: - arn (str): The ARN of the quantum job to get. + arn (str): The ARN of the hybrid job to get. Returns: Dict[str, Any]: The response from the Amazon Braket `GetQuantumJob` operation. @@ -328,10 +328,10 @@ def get_job(self, arn: str) -> Dict[str, Any]: def cancel_job(self, arn: str) -> Dict[str, Any]: """ - Cancel the quantum job. + Cancel the hybrid job. Args: - arn (str): The ARN of the quantum job to cancel. + arn (str): The ARN of the hybrid job to cancel. Returns: Dict[str, Any]: The response from the Amazon Braket `CancelJob` operation. @@ -502,8 +502,8 @@ def default_bucket(self) -> str: Returns the name of the default bucket of the AWS Session. In the following order of priority, it will return either the parameter `default_bucket` set during initialization of the AwsSession (if not None), the bucket being used by the - currently running Braket Job (if evoked inside of a Braket Job), or a default value of - "amazon-braket--. Except in the case of a user- + currently running Braket Hybrid Job (if evoked inside of a Braket Hybrid Job), or a default + value of "amazon-braket--. Except in the case of a user- specified bucket name, this method will create the default bucket if it does not exist. diff --git a/src/braket/circuits/instruction.py b/src/braket/circuits/instruction.py index 6c555e3ab..148711c79 100644 --- a/src/braket/circuits/instruction.py +++ b/src/braket/circuits/instruction.py @@ -30,7 +30,8 @@ class Instruction: """ - An instruction is a quantum directive that describes the task to perform on a quantum device. + An instruction is a quantum directive that describes the quantum task to perform on a quantum + device. """ def __init__( diff --git a/src/braket/devices/device.py b/src/braket/devices/device.py index 7223ff6b8..6ecb2ecc2 100644 --- a/src/braket/devices/device.py +++ b/src/braket/devices/device.py @@ -41,13 +41,13 @@ def run( *args, **kwargs ) -> QuantumTask: - """Run a quantum task specification on this quantum device. A task can be a circuit + """Run a quantum task specification on this quantum device. A quantum task can be a circuit or an annealing problem. Args: - task_specification (Union[Circuit, Problem]): Specification of a task + task_specification (Union[Circuit, Problem]): Specification of a quantum task to run on device. - shots (Optional[int]): The number of times to run the task on the device. + shots (Optional[int]): The number of times to run the quantum task on the device. Default is `None`. inputs (Optional[Dict[str, float]]): Inputs to be passed along with the IR. If IR is an OpenQASM Program, the inputs will be updated with this value. @@ -70,21 +70,21 @@ def run_batch( *args, **kwargs ) -> QuantumTaskBatch: - """Executes a batch of tasks in parallel + """Executes a batch of quantum tasks in parallel Args: task_specifications (Union[Union[Circuit, Problem], List[Union[Circuit, Problem]]]): Single instance or list of circuits or problems to run on device. shots (Optional[int]): The number of times to run the circuit or annealing problem. - max_parallel (Optional[int]): The maximum number of tasks to run in parallel. + max_parallel (Optional[int]): The maximum number of quantum tasks to run in parallel. Batch creation will fail if this value is greater than the maximum allowed - concurrent tasks on the device. + concurrent quantum tasks on the device. inputs (Optional[Union[Dict[str, float], List[Dict[str, float]]]]): Inputs to be passed along with the IR. If the IR supports inputs, the inputs will be updated with this value. Returns: - QuantumTaskBatch: A batch containing all of the tasks run + QuantumTaskBatch: A batch containing all of the qauntum tasks run """ @property diff --git a/src/braket/devices/devices.py b/src/braket/devices/devices.py index e179bdfd5..8f8730336 100644 --- a/src/braket/devices/devices.py +++ b/src/braket/devices/devices.py @@ -30,6 +30,7 @@ class _DWave(str, Enum): class _IonQ(str, Enum): Harmony = "arn:aws:braket:us-east-1::device/qpu/ionq/Harmony" Aria1 = "arn:aws:braket:us-east-1::device/qpu/ionq/Aria-1" + Aria2 = "arn:aws:braket:us-east-1::device/qpu/ionq/Aria-2" class _OQC(str, Enum): Lucy = "arn:aws:braket:eu-west-2::device/qpu/oqc/Lucy" diff --git a/src/braket/devices/local_simulator.py b/src/braket/devices/local_simulator.py index 5b0092e3a..96c51daef 100644 --- a/src/braket/devices/local_simulator.py +++ b/src/braket/devices/local_simulator.py @@ -82,7 +82,7 @@ def run( The task specification. shots (int): The number of times to run the circuit or annealing problem. Default is 0, which means that the simulator will compute the exact - results based on the task specification. + results based on the quantum task specification. Sampling is not supported for shots=0. inputs (Optional[Dict[str, float]]): Inputs to be passed along with the IR. If the IR supports inputs, the inputs will be updated with this @@ -118,21 +118,21 @@ def run_batch( *args, **kwargs, ) -> LocalQuantumTaskBatch: - """Executes a batch of tasks in parallel + """Executes a batch of quantum tasks in parallel Args: task_specifications (Union[Union[Circuit, Problem, Program, AnalogHamiltonianSimulation, SerializableProgram], List[Union[Circuit, Problem, Program, AnalogHamiltonianSimulation, SerializableProgram]]]): # noqa E501 Single instance or list of task specification. shots (Optional[int]): The number of times to run the task. Default: 0. - max_parallel (Optional[int]): The maximum number of tasks to run in parallel. Default + max_parallel (Optional[int]): The maximum number of quantum tasks to run in parallel. Default is the number of CPU. inputs (Optional[Union[Dict[str, float], List[Dict[str, float]]]]): Inputs to be passed along with the IR. If the IR supports inputs, the inputs will be updated with this value. Default: {}. Returns: - LocalQuantumTaskBatch: A batch containing all of the tasks run + LocalQuantumTaskBatch: A batch containing all of the quantum tasks run See Also: `braket.tasks.local_quantum_task_batch.LocalQuantumTaskBatch` diff --git a/src/braket/experimental/autoqasm/__init__.py b/src/braket/experimental/autoqasm/__init__.py index 23098ace2..1c9187b01 100644 --- a/src/braket/experimental/autoqasm/__init__.py +++ b/src/braket/experimental/autoqasm/__init__.py @@ -40,7 +40,7 @@ def my_program(): result[1] = measure __qubits__[1]; """ -from .api import gate, main, subroutine # noqa: F401 +from .api import gate, gate_calibration, main, subroutine # noqa: F401 from .instructions import QubitIdentifierType as Qubit # noqa: F401 from .program import Program, build_program, verbatim # noqa: F401 from .transpiler import transpiler # noqa: F401 diff --git a/src/braket/experimental/autoqasm/api.py b/src/braket/experimental/autoqasm/api.py index f0f769cb7..1073715ee 100644 --- a/src/braket/experimental/autoqasm/api.py +++ b/src/braket/experimental/autoqasm/api.py @@ -36,6 +36,9 @@ is_autograph_artifact, ) from braket.experimental.autoqasm.autograph.tf_utils import tf_decorator +from braket.experimental.autoqasm.instructions.qubits import QubitIdentifierType as Qubit +from braket.experimental.autoqasm.instructions.qubits import is_qubit_identifier_type +from braket.experimental.autoqasm.program.gate_calibrations import GateCalibration def main( @@ -88,6 +91,25 @@ def gate(*args) -> Callable[[Any], None]: return _function_wrapper(*args, converter_callback=_convert_gate) +def gate_calibration(*args, implements: Callable, **kwargs) -> Callable[[], GateCalibration]: + """A decorator that register the decorated function as a gate calibration definition. The + decorated function is added to a main program using `with_calibrations` method of the main + program. The fixed values of qubits or angles that the calibration is implemented against + are supplied as kwargs. The name of the kwargs must match the args of the gate function it + implements. + + Args: + implements (Callable): Gate function. + + Returns: + Callable[[], GateCalibration]: A callable to be added to a main program using + `with_calibrations` method of the main program. + """ + converter_args = {"gate_function": implements, **kwargs} + + return _function_wrapper(args, _convert_calibration, converter_args) + + def _function_wrapper( *args: Tuple[Any], converter_callback: Callable, @@ -429,10 +451,11 @@ def _convert_gate( program_conversion_context = aq_program.get_program_conversion_context() # Wrap the function into an oqpy gate definition - wrapped_f, gate_args = _wrap_for_oqpy_gate(f, options) + wrapped_f = _wrap_for_oqpy_gate(f, options) gate_name = f.__name__ # Validate that the gate definition acts on at least one qubit + gate_args = _get_gate_args(f) if not gate_args.qubits: raise errors.ParameterTypeError( f'Gate definition "{gate_name}" has no arguments of type aq.Qubit. ' @@ -467,7 +490,33 @@ def _convert_gate( def _wrap_for_oqpy_gate( f: Callable, options: converter.ConversionOptions, -) -> Tuple[Callable[..., None], aq_program.GateArgs]: +) -> Callable[..., None]: + """Wraps the given function into a callable expected by oqpy.gate. + + Args: + f (Callable): The function to be wrapped. + options (converter.ConversionOptions): Converter options. + + Returns: + Callable[...,]: The modified function for use with oqpy.gate. + """ + + def _func(*args: Any) -> None: + aq_transpiler.converted_call(f, *args, kwargs={}, options=options) + + return _func + + +def _get_gate_args(f: Callable) -> aq_program.GateArgs: + """Build a GateArgs object from the function signature of a gate. + + Args: + f (Callable): Gate function + + Returns: + aq_program.GateArgs: Object representing a list of qubit and angle arguments for + a gate definition. + """ gate_args = aq_program.GateArgs() sig = inspect.signature(f) for param in sig.parameters.values(): @@ -486,8 +535,101 @@ def _wrap_for_oqpy_gate( f'Parameter "{param.name}" for gate "{f.__name__}" ' "must have a type hint of either aq.Qubit or float." ) + return gate_args - def _func(*args: Any) -> None: - aq_transpiler.converted_call(f, *args, kwargs={}, options=options) - return _func, gate_args +def _convert_calibration( + f: Callable, + options: converter.ConversionOptions, + args: List[Any], + kwargs: Dict[str, Any], + gate_function: Callable, + **decorator_kwargs, +) -> GateCalibration: + """Convert the initial callable `f` into a GateCalibration object that will be added to + the main program as defcal. + + Args: + f (Callable): The function to be converted. + options (converter.ConversionOptions): Converter options. + args (List[Any]): Arguments passed to the program when called. + kwargs (Dict[str, Any]): Keyword arguments passed to the program when called. + gate_function (Callable): The gate function which calibration is being defined. + + Returns: + GateCalibration: Object representing the calibration definition. + """ + func_args = _get_gate_args(f) + _validate_calibration_args(gate_function, decorator_kwargs, func_args) + + union_deco_func_args = {**decorator_kwargs, **{var.name: var for var in func_args._args}} + + gate_calibration_qubits = [] + gate_calibration_angles = [] + gate_args = _get_gate_args(gate_function) + for i, var in enumerate(gate_args._args): + name = var.name + value = union_deco_func_args[name] + is_qubit = i in gate_args.qubit_indices + + if is_qubit and not is_qubit_identifier_type(value): + raise errors.ParameterTypeError(f'Parameter "{name}" must have a type of aq.Qubit.') + + if not is_qubit and not isinstance(value, (float, oqpy.AngleVar)): + raise errors.ParameterTypeError(f'Parameter "{name}" must have a type of float.') + + if is_qubit: + gate_calibration_qubits.append(value) + else: + gate_calibration_angles.append(value) + + func_call_kwargs = { + **{var.name: var for var in func_args.qubits}, + **{ + var.name: oqpy.FloatVar(name=var.name, needs_declaration=False) + for var in func_args.angles + }, + } + + with aq_program.build_program() as program_conversion_context: + with program_conversion_context.calibration_definition( + gate_function.__name__, gate_calibration_qubits, gate_calibration_angles + ): + aq_transpiler.converted_call(f, [], func_call_kwargs, options=options) + + return GateCalibration( + gate_function=gate_function, + qubits=gate_calibration_qubits, + angles=gate_calibration_angles, + program=program_conversion_context.make_program(), + ) + + +def _validate_calibration_args( + gate_function: Callable, + decorator_args: Dict[str, Union[Qubit, float]], + func_args: aq_program.GateArgs, +) -> None: + """Validate the arguments passed to the calibration decorator and function. + + Args: + gate_function (Callable): The gate function which calibration is being defined. + decorator_args (Dict[str, Union[Qubit, float]]): The calibration decorator arguments. + func_args (aq_program.GateArgs): The gate function arguments. + """ + gate_args = _get_gate_args(gate_function) + gate_args_names = [var.name for var in gate_args._args] + func_args_names = [var.name for var in func_args._args] + decorator_args_names = decorator_args.keys() + + # validate the name of args + if not set(gate_args_names) == set(decorator_args_names) | set(func_args_names): + raise errors.InvalidCalibrationDefinition( + "The union of calibration decorator arguments and function arguments must match the" + " gate arguments." + ) + + if any(name in decorator_args_names for name in func_args_names): + raise errors.InvalidCalibrationDefinition( + "The function arguments must not duplicate any argument in the calibration decorator." + ) diff --git a/src/braket/experimental/autoqasm/errors.py b/src/braket/experimental/autoqasm/errors.py index 5476aa690..9b7903a7b 100644 --- a/src/braket/experimental/autoqasm/errors.py +++ b/src/braket/experimental/autoqasm/errors.py @@ -41,6 +41,10 @@ class InvalidGateDefinition(AutoQasmError): """Gate definition does not meet the necessary requirements.""" +class InvalidCalibrationDefinition(AutoQasmError): + """Calibration definition does not meet the necessary requirements.""" + + class InvalidTargetQubit(AutoQasmError): """Target qubit is invalid in the current context.""" diff --git a/src/braket/experimental/autoqasm/instructions/qubits.py b/src/braket/experimental/autoqasm/instructions/qubits.py index 099f57dc8..a8def724a 100644 --- a/src/braket/experimental/autoqasm/instructions/qubits.py +++ b/src/braket/experimental/autoqasm/instructions/qubits.py @@ -22,7 +22,7 @@ from braket.experimental.autoqasm import constants, errors, program -QubitIdentifierType = Union[int, oqpy._ClassicalVar, oqpy.base.OQPyExpression, str] +QubitIdentifierType = Union[int, oqpy._ClassicalVar, oqpy.base.OQPyExpression, str, oqpy.Qubit] def is_qubit_identifier_type(qubit: Any) -> bool: diff --git a/src/braket/experimental/autoqasm/program/gate_calibrations.py b/src/braket/experimental/autoqasm/program/gate_calibrations.py new file mode 100644 index 000000000..ad76dcd5e --- /dev/null +++ b/src/braket/experimental/autoqasm/program/gate_calibrations.py @@ -0,0 +1,43 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + + +from __future__ import annotations + +from typing import Callable, Iterable + +from braket.experimental.autoqasm.instructions.qubits import QubitIdentifierType as Qubit +from braket.experimental.autoqasm.program import Program + + +class GateCalibration: + def __init__( + self, + gate_function: Callable, + qubits: Iterable[Qubit], + angles: Iterable[float], + program: Program, + ): + """Definition of a gate calibration, including pulse instructions and the qubits, angles + and the gate it implements. + + Args: + gate_function (Callable): The gate function which calibration is defined. + qubits (Iterable[Qubit]): The qubits on which the gate calibration is defined. + angles (Iterable[float]): The angles at which the gate calibration is defined. + program (Program): Calibration instructions as an AutoQASM program. + """ + self.gate_function = gate_function + self.qubits = qubits + self.angles = angles + self.program = program diff --git a/src/braket/experimental/autoqasm/program/program.py b/src/braket/experimental/autoqasm/program/program.py index 1d41037e9..ce63dc9e7 100644 --- a/src/braket/experimental/autoqasm/program/program.py +++ b/src/braket/experimental/autoqasm/program/program.py @@ -12,12 +12,13 @@ # language governing permissions and limitations under the License. """AutoQASM Program class, context managers, and related functions.""" +from __future__ import annotations import contextlib import threading from dataclasses import dataclass from enum import Enum -from typing import Any, List, Optional, Union +from typing import Any, Callable, Iterable, List, Optional, Union import oqpy.base @@ -25,6 +26,8 @@ from braket.device_schema import DeviceActionType from braket.devices.device import Device from braket.experimental.autoqasm import constants, errors +from braket.experimental.autoqasm.instructions.qubits import QubitIdentifierType as Qubit +from braket.experimental.autoqasm.instructions.qubits import _qubit # Create the thread-local object for the program conversion context. _local = threading.local() @@ -67,6 +70,8 @@ class ProgramMode(Enum): """For general program conversion where all operations are allowed.""" UNITARY = 1 """For program conversion inside a context where only unitary operations are allowed.""" + PULSE = 2 + """For program conversion inside a context where only pulse operations are allowed.""" class Program(SerializableProgram): @@ -85,6 +90,27 @@ def __init__(self, oqpy_program: oqpy.Program, has_pulse_control: bool = False): self._oqpy_program = oqpy_program self._has_pulse_control = has_pulse_control + def with_calibrations(self, gate_calibrations: Union[Callable, List[Callable]]) -> Program: + """Add the gate calibrations to the program. The calibration added program is returned + as a new object. The original program is not modified. + + Args: + gate_calibrations (Union[Callable, List[Callable]]): The gate calibrations to add to + the main program. Calibration are passed as callable without evaluation. + + Returns: + Program: The program with gate calibrations added. + """ + if isinstance(gate_calibrations, Callable): + gate_calibrations = [gate_calibrations] + assert all(isinstance(gc, Callable) for gc in gate_calibrations) + + combined_oqpy_program = oqpy.Program() + for gc in gate_calibrations: + combined_oqpy_program += gc().program._oqpy_program + combined_oqpy_program += self._oqpy_program + return Program(combined_oqpy_program, self._has_pulse_control) + def to_ir( self, ir_type: IRType = IRType.OPENQASM, @@ -154,6 +180,7 @@ def __init__(self, user_config: Optional[UserConfig] = None): self.return_variable = None self._oqpy_program_stack = [oqpy.Program()] self._gate_definitions_processing = [] + self._calibration_definitions_processing = [] self._gates_defined = set() self._gates_used = set() self._in_verbatim = False @@ -357,6 +384,9 @@ def get_oqpy_program( errors.InvalidGateDefinition: If this function is called from within a gate definition where only unitary gate operations are allowed, and the `mode` parameter is not specified as `ProgramMode.UNITARY`. + errors.InvalidCalibrationDefinition: If this function is called from within a + calibration definition where only pulse operations are allowed, and the + `mode` parameter is not specified as `ProgramMode.PULSE`. Returns: oqpy.Program: The requested oqpy program. @@ -367,6 +397,12 @@ def get_oqpy_program( f'Gate definition "{gate_name}" contains invalid operations. ' "A gate definition must only call unitary gate operations." ) + if self._calibration_definitions_processing and mode != ProgramMode.PULSE: + gate_name = self._calibration_definitions_processing[-1]["name"] + raise errors.InvalidCalibrationDefinition( + f'Calibration definition "{gate_name}" contains invalid operations. ' + "A calibration definition must only call pulse operations." + ) if scope == ProgramScope.CURRENT: requested_index = -1 @@ -411,6 +447,32 @@ def gate_definition(self, gate_name: str, gate_args: GateArgs) -> None: finally: self._gate_definitions_processing.pop() + @contextlib.contextmanager + def calibration_definition( + self, gate_name: str, qubits: Iterable[Qubit], angles: Iterable[float] + ) -> None: + """Sets the program conversion context into a calibration definition context. + + Args: + gate_name (str): The name of the gate being defined. + qubits (Iterable[Qubit]): The list of qubits to the gate. + angles (Iterable[float]): The angles at which the gate calibration is defined. + """ + try: + qubits = [_qubit(q) for q in qubits] + self._calibration_definitions_processing.append( + {"name": gate_name, "qubits": qubits, "angles": angles} + ) + with oqpy.defcal( + self.get_oqpy_program(mode=ProgramMode.PULSE), + qubits, + gate_name, + angles, + ): + yield + finally: + self._calibration_definitions_processing.pop() + @contextlib.contextmanager def verbatim_block(self) -> None: """Sets the program conversion context into a verbatim block context. diff --git a/src/braket/experimental/autoqasm/pulse/pulse.py b/src/braket/experimental/autoqasm/pulse/pulse.py index c27da3c1f..90dff13fb 100644 --- a/src/braket/experimental/autoqasm/pulse/pulse.py +++ b/src/braket/experimental/autoqasm/pulse/pulse.py @@ -15,12 +15,13 @@ """Pulse instructions that apply to frames or qubits. """ +import re from typing import List, Union import oqpy from braket.circuits.qubit_set import QubitSet -from braket.experimental.autoqasm import program +from braket.experimental.autoqasm import program as aq_program from braket.experimental.autoqasm.instructions.qubits import ( QubitIdentifierType, is_qubit_identifier_type, @@ -37,13 +38,39 @@ def _pulse_instruction(name: str, frame: Frame, *args) -> None: name (str): Name of the pulse instruction. frame (Frame): Frame for which the instruction is apply to. """ - program_conversion_context = program.get_program_conversion_context() + program_conversion_context = aq_program.get_program_conversion_context() program_conversion_context._has_pulse_control = True pulse_sequence = PulseSequence() - pulse_sequence._program = program_conversion_context.get_oqpy_program() - with oqpy.Cal(pulse_sequence._program): + pulse_sequence._program = program_conversion_context.get_oqpy_program( + mode=aq_program.ProgramMode.PULSE + ) + + if program_conversion_context._calibration_definitions_processing: getattr(pulse_sequence, name)(frame, *args) + else: + with oqpy.Cal(pulse_sequence._program): + getattr(pulse_sequence, name)(frame, *args) + + +def _physical_qubit_to_braket_qubit(qids: List[str]) -> QubitSet: + """Convert a physical qubit label to a QubitSet. + + Args: + qids (List[str]): Physical qubit labels. + + Returns: + QubitSet: Represent physical qubits. + """ + braket_qubits = [] + for qid in qids: + if not (isinstance(qid, str) and re.match(r"\$\d+", qid)): + raise ValueError( + f"invalid physical qubit label: '{qid}'. Physical qubit must be labeled as a string" + "with `$` followed by an integer. For example: `$1`." + ) + braket_qubits.append(int(qid[1:])) + return QubitSet(braket_qubits) def set_frequency(frame: Frame, frequency: float) -> None: @@ -130,7 +157,7 @@ def delay( if not isinstance(qubits_or_frames, List): qubits_or_frames = [qubits_or_frames] if all(is_qubit_identifier_type(q) for q in qubits_or_frames): - qubits_or_frames = QubitSet(qubits_or_frames) + qubits_or_frames = _physical_qubit_to_braket_qubit(qubits_or_frames) _pulse_instruction("delay", qubits_or_frames, duration) @@ -148,5 +175,5 @@ def barrier( if not isinstance(qubits_or_frames, List): qubits_or_frames = [qubits_or_frames] if all(is_qubit_identifier_type(q) for q in qubits_or_frames): - qubits_or_frames = QubitSet(qubits_or_frames) + qubits_or_frames = _physical_qubit_to_braket_qubit(qubits_or_frames) _pulse_instruction("barrier", qubits_or_frames) diff --git a/src/braket/jobs/config.py b/src/braket/jobs/config.py index de24ed782..73467dd3a 100644 --- a/src/braket/jobs/config.py +++ b/src/braket/jobs/config.py @@ -25,7 +25,7 @@ class CheckpointConfig: @dataclass class InstanceConfig: - """Configuration of the instances used to execute the job.""" + """Configuration of the instances used to execute the hybrid job.""" instanceType: str = "ml.m5.large" volumeSizeInGb: int = 30 @@ -34,7 +34,7 @@ class InstanceConfig: @dataclass class OutputDataConfig: - """Configuration that specifies the location for the output of the job.""" + """Configuration that specifies the location for the output of the hybrid job.""" s3Path: Optional[str] = None kmsKeyId: Optional[str] = None @@ -42,7 +42,7 @@ class OutputDataConfig: @dataclass class StoppingCondition: - """Conditions that specify when the job should be forcefully stopped.""" + """Conditions that specify when the hybrid job should be forcefully stopped.""" maxRuntimeInSeconds: int = 5 * 24 * 60 * 60 @@ -64,7 +64,7 @@ def __init__( s3_data, content_type=None, ): - """Create a definition for input data used by a Braket job. + """Create a definition for input data used by a Braket Hybrid job. Args: s3_data (str): Defines the location of s3 data to train on. diff --git a/src/braket/jobs/data_persistence.py b/src/braket/jobs/data_persistence.py index 5bd44adbe..198a7e225 100644 --- a/src/braket/jobs/data_persistence.py +++ b/src/braket/jobs/data_persistence.py @@ -64,17 +64,17 @@ def save_job_checkpoint( def load_job_checkpoint(job_name: str, checkpoint_file_suffix: str = "") -> Dict[str, Any]: """ - Loads the job checkpoint data stored for the job named 'job_name', with the checkpoint - file that ends with the `checkpoint_file_suffix`. The `job_name` can refer to any job whose - checkpoint data you expect to be available in the file path specified by the `CHECKPOINT_DIR` - container environment variable. + Loads the hybrid job checkpoint data stored for the job named 'job_name', with the checkpoint + file that ends with the `checkpoint_file_suffix`. The `job_name` can refer to any hybrid job + whose checkpoint data you expect to be available in the file path specified by the + `CHECKPOINT_DIR` container environment variable. - Note: This function for loading job checkpoints is only for use inside the job container + Note: This function for loading hybrid job checkpoints is only for use inside the job container as it writes data to directories and references env variables set in the containers. Args: - job_name (str): str that specifies the name of the job whose checkpoints + job_name (str): str that specifies the name of the hybrid job whose checkpoints are to be loaded. checkpoint_file_suffix (str): str specifying the file suffix that is used to locate the checkpoint file to load. The resulting file name @@ -113,7 +113,7 @@ def save_job_result( environment variable `AMZN_BRAKET_JOB_RESULTS_DIR`, with the filename 'results.json'. The `result_data` values are serialized to the specified `data_format`. - Note: This function for storing the results is only for use inside the job container + Note: This function for storing the results is only for use inside the hybrid job container as it writes data to directories and references env variables set in the containers. diff --git a/src/braket/jobs/local/local_job.py b/src/braket/jobs/local/local_job.py index 6be15720f..a6fc8d279 100644 --- a/src/braket/jobs/local/local_job.py +++ b/src/braket/jobs/local/local_job.py @@ -31,7 +31,7 @@ class LocalQuantumJob(QuantumJob): - """Amazon Braket implementation of a quantum job that runs locally.""" + """Amazon Braket implementation of a hybrid job that runs locally.""" @classmethod def create( @@ -50,42 +50,42 @@ def create( aws_session: AwsSession = None, local_container_update: bool = True, ) -> LocalQuantumJob: - """Creates and runs job by setting up and running the customer script in a local + """Creates and runs hybrid job by setting up and running the customer script in a local docker container. Args: device (str): ARN for the AWS device which is primarily accessed for the execution - of this job. Alternatively, a string of the format "local:/" - for using a local simulator for the job. This string will be available as the - environment variable `AMZN_BRAKET_DEVICE_ARN` inside the job container when - using a Braket container. + of this hybrid job. Alternatively, a string of the format + "local:/" for using a local simulator for the hybrid job. This + string will be available as the environment variable `AMZN_BRAKET_DEVICE_ARN` inside + the hybrid job container when using a Braket container. source_module (str): Path (absolute, relative or an S3 URI) to a python module to be tarred and uploaded. If `source_module` is an S3 URI, it must point to a tar.gz file. Otherwise, source_module may be a file or directory. - entry_point (str): A str that specifies the entry point of the job, relative to + entry_point (str): A str that specifies the entry point of the hybrid job, relative to the source module. The entry point must be in the format `importable.module` or `importable.module:callable`. For example, `source_module.submodule:start_here` indicates the `start_here` function contained in `source_module.submodule`. If source_module is an S3 URI, entry point must be given. Default: source_module's name - image_uri (str): A str that specifies the ECR image to use for executing the job. + image_uri (str): A str that specifies the ECR image to use for executing the hybrid job. `image_uris.retrieve_image()` function may be used for retrieving the ECR image URIs for the containers supported by Braket. Default = ``. - job_name (str): A str that specifies the name with which the job is created. + job_name (str): A str that specifies the name with which the hybrid job is created. Default: f'{image_uri_type}-{timestamp}'. code_location (str): The S3 prefix URI where custom code will be uploaded. Default: f's3://{default_bucket_name}/jobs/{job_name}/script'. - role_arn (str): This field is currently not used for local jobs. Local jobs will use - the current role's credentials. This may be subject to change. + role_arn (str): This field is currently not used for local hybrid jobs. Local hybrid + jobs will use the current role's credentials. This may be subject to change. - hyperparameters (Dict[str, Any]): Hyperparameters accessible to the job. - The hyperparameters are made accessible as a Dict[str, str] to the job. + hyperparameters (Dict[str, Any]): Hyperparameters accessible to the hybrid job. + The hyperparameters are made accessible as a Dict[str, str] to the hybrid job. For convenience, this accepts other types for keys and values, but `str()` is called to convert them before being passed on. Default: None. @@ -97,7 +97,8 @@ def create( channel name "input". Default: {}. - output_data_config (OutputDataConfig): Specifies the location for the output of the job. + output_data_config (OutputDataConfig): Specifies the location for the output of the + hybrid job. Default: OutputDataConfig(s3Path=f's3://{default_bucket_name}/jobs/{job_name}/data', kmsKeyId=None). @@ -114,7 +115,7 @@ def create( Default: True. Returns: - LocalQuantumJob: The representation of a local Braket Job. + LocalQuantumJob: The representation of a local Braket Hybrid Job. """ create_job_kwargs = prepare_quantum_job( device=device, @@ -164,8 +165,8 @@ def create( def __init__(self, arn: str, run_log: str = None): """ Args: - arn (str): The ARN of the job. - run_log (str): The container output log of running the job with the given arn. + arn (str): The ARN of the hybrid job. + run_log (str): The container output log of running the hybrid job with the given arn. """ if not arn.startswith("local:job/"): raise ValueError(f"Arn {arn} is not a valid local job arn") @@ -177,20 +178,20 @@ def __init__(self, arn: str, run_log: str = None): @property def arn(self) -> str: - """str: The ARN (Amazon Resource Name) of the quantum job.""" + """str: The ARN (Amazon Resource Name) of the hybrid job.""" return self._arn @property def name(self) -> str: - """str: The name of the quantum job.""" + """str: The name of the hybrid job.""" return self._name @property def run_log(self) -> str: - """Gets the run output log from running the job. + """Gets the run output log from running the hybrid job. Returns: - str: The container output log from running the job. + str: The container output log from running the hybrid job. """ if not self._run_log: try: @@ -201,7 +202,7 @@ def run_log(self) -> str: return self._run_log def state(self, use_cached_value: bool = False) -> str: - """The state of the quantum job. + """The state of the hybrid job. Args: use_cached_value (bool): If `True`, uses the value most recently retrieved value from the Amazon Braket `GetJob` operation. If `False`, calls the @@ -213,7 +214,7 @@ def state(self, use_cached_value: bool = False) -> str: return "COMPLETED" def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: - """When running the quantum job in local mode, the metadata is not available. + """When running the hybrid job in local mode, the metadata is not available. Args: use_cached_value (bool): If `True`, uses the value most recently retrieved from the Amazon Braket `GetJob` operation, if it exists; if does not exist, @@ -225,7 +226,7 @@ def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: pass def cancel(self) -> str: - """When running the quantum job in local mode, the cancelling a running is not possible. + """When running the hybrid job in local mode, the cancelling a running is not possible. Returns: str: None """ @@ -237,11 +238,11 @@ def download_result( poll_timeout_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_INTERVAL, ) -> None: - """When running the quantum job in local mode, results are automatically stored locally. + """When running the hybrid job in local mode, results are automatically stored locally. Args: extract_to (str): The directory to which the results are extracted. The results - are extracted to a folder titled with the job name within this directory. + are extracted to a folder titled with the hybrid job name within this directory. Default= `Current working directory`. poll_timeout_seconds (float): The polling timeout, in seconds, for `result()`. Default: 10 days. @@ -255,7 +256,7 @@ def result( poll_timeout_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = QuantumJob.DEFAULT_RESULTS_POLL_INTERVAL, ) -> Dict[str, Any]: - """Retrieves the job result persisted using save_job_result() function. + """Retrieves the hybrid job result persisted using save_job_result() function. Args: poll_timeout_seconds (float): The polling timeout, in seconds, for `result()`. @@ -264,7 +265,7 @@ def result( Default: 5 seconds. Returns: - Dict[str, Any]: Dict specifying the job results. + Dict[str, Any]: Dict specifying the hybrid job results. """ try: with open(os.path.join(self.name, "results.json"), "r") as f: @@ -308,13 +309,13 @@ def metrics( return parser.get_parsed_metrics(metric_type, statistic) def logs(self, wait: bool = False, poll_interval_seconds: int = 5) -> None: - """Display container logs for a given job + """Display container logs for a given hybrid job Args: - wait (bool): `True` to keep looking for new log entries until the job completes; + wait (bool): `True` to keep looking for new log entries until the hybrid job completes; otherwise `False`. Default: `False`. poll_interval_seconds (int): The interval of time, in seconds, between polling for - new log entries and job completion (default: 5). + new log entries and hybrid job completion (default: 5). """ return print(self.run_log) diff --git a/src/braket/jobs/local/local_job_container.py b/src/braket/jobs/local/local_job_container.py index d9f1c1195..f924db47f 100644 --- a/src/braket/jobs/local/local_job_container.py +++ b/src/braket/jobs/local/local_job_container.py @@ -21,7 +21,7 @@ class _LocalJobContainer(object): - """Uses docker CLI to run Braket Jobs on a local docker container.""" + """Uses docker CLI to run Braket Hybrid Jobs on a local docker container.""" ECR_URI_PATTERN = r"^((\d+)\.dkr\.ecr\.([^.]+)\.[^/]*)/([^:]*):(.*)$" CONTAINER_CODE_PATH = "/opt/ml/code/" @@ -33,7 +33,8 @@ def __init__( logger: Logger = getLogger(__name__), force_update: bool = False, ): - """Represents and provides functions for interacting with a Braket Jobs docker container. + """Represents and provides functions for interacting with a Braket Hybrid Jobs docker + container. The function "end_session" must be called when the container is no longer needed. Args: @@ -71,7 +72,7 @@ def _envs_to_list(environment_variables: Dict[str, str]) -> List[str]: environment_variables (Dict[str, str]): A dictionary of environment variables and their values. Returns: - List[str]: The list of parameters to use when running a job that will include the + List[str]: The list of parameters to use when running a hybrid job that will include the provided environment variables as part of the runtime. """ env_list = [] @@ -229,11 +230,11 @@ def run_local_job( self, environment_variables: Dict[str, str], ) -> None: - """Runs a Braket job in a local container. + """Runs a Braket Hybrid job in a local container. Args: environment_variables (Dict[str, str]): The environment variables to make available - as part of running the job. + as part of running the hybrid job. """ start_program_name = self._check_output_formatted( ["docker", "exec", self._container_name, "printenv", "SAGEMAKER_PROGRAM"] diff --git a/src/braket/jobs/local/local_job_container_setup.py b/src/braket/jobs/local/local_job_container_setup.py index 6372957ea..7505dcbf5 100644 --- a/src/braket/jobs/local/local_job_container_setup.py +++ b/src/braket/jobs/local/local_job_container_setup.py @@ -24,17 +24,17 @@ def setup_container( container: _LocalJobContainer, aws_session: AwsSession, **creation_kwargs ) -> Dict[str, str]: - """Sets up a container with prerequisites for running a Braket Job. The prerequisites are - based on the options the customer has chosen for the job. Similarly, any environment variables - that are needed during runtime will be returned by this function. + """Sets up a container with prerequisites for running a Braket Hybrid Job. The prerequisites are + based on the options the customer has chosen for the hybrid job. Similarly, any environment + variables that are needed during runtime will be returned by this function. Args: - container(_LocalJobContainer): The container that will run the braket job. + container(_LocalJobContainer): The container that will run the braket hybrid job. aws_session (AwsSession): AwsSession for connecting to AWS Services. Returns: - Dict[str, str]: A dictionary of environment variables that reflect Braket Jobs options - requested by the customer. + Dict[str, str]: A dictionary of environment variables that reflect Braket Hybrid Jobs + options requested by the customer. """ logger = getLogger(__name__) _create_expected_paths(container, **creation_kwargs) @@ -52,10 +52,10 @@ def setup_container( def _create_expected_paths(container: _LocalJobContainer, **creation_kwargs) -> None: - """Creates the basic paths required for Braket Jobs to run. + """Creates the basic paths required for Braket Hybrid Jobs to run. Args: - container(_LocalJobContainer): The container that will run the braket job. + container(_LocalJobContainer): The container that will run the braket hybrid job. """ container.makedir("/opt/ml/model") container.makedir(creation_kwargs["checkpointConfig"]["localPath"]) @@ -95,7 +95,7 @@ def _get_env_script_mode_config(script_mode_config: Dict[str, str]) -> Dict[str, Args: script_mode_config (Dict[str, str]): The values for scriptModeConfig in the boto3 input - parameters for running a Braket Job. + parameters for running a Braket Hybrid Job. Returns: Dict[str, str]: The set of key/value pairs that should be added as environment variables @@ -137,7 +137,7 @@ def _get_env_default_vars(aws_session: AwsSession, **creation_kwargs) -> Dict[st def _get_env_hyperparameters() -> Dict[str, str]: """Gets the env variable for hyperparameters. This should only be added if the customer has - provided hyperpameters to the job. + provided hyperpameters to the hybrid job. Returns: Dict[str, str]: The set of key/value pairs that should be added as environment variables @@ -150,7 +150,7 @@ def _get_env_hyperparameters() -> Dict[str, str]: def _get_env_input_data() -> Dict[str, str]: """Gets the env variable for input data. This should only be added if the customer has - provided input data to the job. + provided input data to the hybrid job. Returns: Dict[str, str]: The set of key/value pairs that should be added as environment variables @@ -187,13 +187,13 @@ def _download_input_data( download_dir: str, input_data: Dict[str, Any], ) -> None: - """Downloads input data for a job. + """Downloads input data for a hybrid job. Args: aws_session (AwsSession): AwsSession for connecting to AWS Services. download_dir (str): The directory path to download to. input_data (Dict[str, Any]): One of the input data in the boto3 input parameters for - running a Braket Job. + running a Braket Hybrid Job. """ # If s3 prefix is the full name of a directory and all keys are inside # that directory, the contents of said directory will be copied into a diff --git a/src/braket/jobs/metrics.py b/src/braket/jobs/metrics.py index cd8626282..462501cb6 100644 --- a/src/braket/jobs/metrics.py +++ b/src/braket/jobs/metrics.py @@ -22,7 +22,7 @@ def log_metric( iteration_number: Optional[int] = None, ) -> None: """ - Records Braket Job metrics. + Records Braket Hybrid Job metrics. Args: metric_name (str) : The name of the metric. diff --git a/src/braket/jobs/metrics_data/cwl_insights_metrics_fetcher.py b/src/braket/jobs/metrics_data/cwl_insights_metrics_fetcher.py index bbcc4f3c4..94ed5499d 100644 --- a/src/braket/jobs/metrics_data/cwl_insights_metrics_fetcher.py +++ b/src/braket/jobs/metrics_data/cwl_insights_metrics_fetcher.py @@ -39,8 +39,9 @@ def __init__( in seconds. Default: 10 seconds. poll_interval_seconds (float): The interval of time, in seconds, between polling for results. Default: 1 second. - logger (Logger): Logger object with which to write logs, such as task statuses - while waiting for a task to be in a terminal state. Default is `getLogger(__name__)` + logger (Logger): Logger object with which to write logs, such as quantum task statuses + while waiting for a quantum task to be in a terminal state. Default is + `getLogger(__name__)` """ self._poll_timeout_seconds = poll_timeout_seconds self._poll_interval_seconds = poll_interval_seconds @@ -136,18 +137,18 @@ def get_metrics_for_job( job_end_time: int = None, ) -> Dict[str, List[Union[str, float, int]]]: """ - Synchronously retrieves all the algorithm metrics logged by a given Job. + Synchronously retrieves all the algorithm metrics logged by a given Hybrid Job. Args: - job_name (str): The name of the Job. The name must be exact to ensure only the relevant - metrics are retrieved. + job_name (str): The name of the Hybrid Job. The name must be exact to ensure only the + relevant metrics are retrieved. metric_type (MetricType): The type of metrics to get. Default is MetricType.TIMESTAMP. statistic (MetricStatistic): The statistic to determine which metric value to use when there is a conflict. Default is MetricStatistic.MAX. - job_start_time (int): The time when the job started. + job_start_time (int): The time when the hybrid job started. Default: 3 hours before job_end_time. - job_end_time (int): If the job is complete, this should be the time at which the - job finished. Default: current time. + job_end_time (int): If the hybrid job is complete, this should be the time at which the + hybrid job finished. Default: current time. Returns: Dict[str, List[Union[str, float, int]]] : The metrics data, where the keys @@ -164,7 +165,8 @@ def get_metrics_for_job( query_end_time = job_end_time or int(time.time()) query_start_time = job_start_time or query_end_time - self.QUERY_DEFAULT_JOB_DURATION - # The job name needs to be unique to prevent jobs with similar names from being conflated. + # The hybrid job name needs to be unique to prevent jobs with similar names from being + # conflated. query = ( f"fields @timestamp, @message " f"| filter @logStream like /^{job_name}\\// " diff --git a/src/braket/jobs/metrics_data/cwl_metrics_fetcher.py b/src/braket/jobs/metrics_data/cwl_metrics_fetcher.py index c7db59daa..5e3ef28f2 100644 --- a/src/braket/jobs/metrics_data/cwl_metrics_fetcher.py +++ b/src/braket/jobs/metrics_data/cwl_metrics_fetcher.py @@ -34,8 +34,9 @@ def __init__( aws_session (AwsSession): AwsSession to connect to AWS with. poll_timeout_seconds (float): The polling timeout for retrieving the metrics, in seconds. Default: 10 seconds. - logger (Logger): Logger object with which to write logs, such as task statuses - while waiting for task to be in a terminal state. Default is `getLogger(__name__)` + logger (Logger): Logger object with which to write logs, such as quantum task statuses + while waiting for quantum task to be in a terminal state. Default is + `getLogger(__name__)` """ self._poll_timeout_seconds = poll_timeout_seconds self._logger = logger @@ -63,7 +64,7 @@ def _parse_metrics_from_log_stream( parser: LogMetricsParser, ) -> None: """ - Synchronously retrieves the algorithm metrics logged in a given job log stream. + Synchronously retrieves the algorithm metrics logged in a given hybrid job log stream. Args: stream_name (str): The name of the log stream. @@ -94,14 +95,14 @@ def _parse_metrics_from_log_stream( def _get_log_streams_for_job(self, job_name: str, timeout_time: float) -> List[str]: """ - Retrieves the list of log streams relevant to a job. + Retrieves the list of log streams relevant to a hybrid job. Args: - job_name (str): The name of the job. + job_name (str): The name of the hybrid job. timeout_time (float) : Metrics cease getting streamed if the current time exceeds the timeout time. Returns: - List[str] : A list of log stream names for the given job. + List[str] : A list of log stream names for the given hybrid job. """ kwargs = { "logGroupName": self.LOG_GROUP_NAME, @@ -130,11 +131,11 @@ def get_metrics_for_job( statistic: MetricStatistic = MetricStatistic.MAX, ) -> Dict[str, List[Union[str, float, int]]]: """ - Synchronously retrieves all the algorithm metrics logged by a given Job. + Synchronously retrieves all the algorithm metrics logged by a given Hybrid Job. Args: - job_name (str): The name of the Job. The name must be exact to ensure only the relevant - metrics are retrieved. + job_name (str): The name of the Hybrid Job. The name must be exact to ensure only the + relevant metrics are retrieved. metric_type (MetricType): The type of metrics to get. Default is MetricType.TIMESTAMP. statistic (MetricStatistic): The statistic to determine which metric value to use when there is a conflict. Default is MetricStatistic.MAX. diff --git a/src/braket/jobs/quantum_job.py b/src/braket/jobs/quantum_job.py index f99fe18dc..f022d28a3 100644 --- a/src/braket/jobs/quantum_job.py +++ b/src/braket/jobs/quantum_job.py @@ -23,22 +23,22 @@ class QuantumJob(ABC): @property @abstractmethod def arn(self) -> str: - """The ARN (Amazon Resource Name) of the quantum job. + """The ARN (Amazon Resource Name) of the hybrid job. Returns: - str: The ARN (Amazon Resource Name) of the quantum job. + str: The ARN (Amazon Resource Name) of the hybrid job. """ @property @abstractmethod def name(self) -> str: - """The name of the quantum job. + """The name of the hybrid job. Returns: - str: The name of the quantum job. + str: The name of the hybrid job. """ @abstractmethod def state(self, use_cached_value: bool = False) -> str: - """The state of the quantum job. + """The state of the hybrid job. Args: use_cached_value (bool): If `True`, uses the value most recently retrieved @@ -55,28 +55,29 @@ def state(self, use_cached_value: bool = False) -> str: @abstractmethod def logs(self, wait: bool = False, poll_interval_seconds: int = 5) -> None: - """Display logs for a given job, optionally tailing them until job is complete. + """Display logs for a given hybrid job, optionally tailing them until hybrid job is + complete. If the output is a tty or a Jupyter cell, it will be color-coded based on which instance the log entry is from. Args: - wait (bool): `True` to keep looking for new log entries until the job completes; + wait (bool): `True` to keep looking for new log entries until the hybrid job completes; otherwise `False`. Default: `False`. poll_interval_seconds (int): The interval of time, in seconds, between polling for - new log entries and job completion (default: 5). + new log entries and hybrid job completion (default: 5). Raises: - RuntimeError: If waiting and the job fails. + RuntimeError: If waiting and the hybrid job fails. """ - # The loop below implements a state machine that alternates between checking the job status - # and reading whatever is available in the logs at this point. Note, that if we were - # called with wait == False, we never check the job status. + # The loop below implements a state machine that alternates between checking the hybrid job + # status and reading whatever is available in the logs at this point. Note, that if we were + # called with wait == False, we never check the hybrid job status. # - # If wait == TRUE and job is not completed, the initial state is TAILING - # If wait == FALSE, the initial state is COMPLETE (doesn't matter if the job really is - # complete). + # If wait == TRUE and hybrid job is not completed, the initial state is TAILING + # If wait == FALSE, the initial state is COMPLETE (doesn't matter if the hybrid job really + # is complete). # # The state table: # @@ -101,7 +102,7 @@ def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: `GetJob` is called to retrieve the metadata. If `False`, always calls `GetJob`, which also updates the cached value. Default: `False`. Returns: - Dict[str, Any]: Dict that specifies the job metadata defined in Amazon Braket. + Dict[str, Any]: Dict that specifies the hybrid job metadata defined in Amazon Braket. """ @abstractmethod @@ -133,10 +134,10 @@ def metrics( @abstractmethod def cancel(self) -> str: - """Cancels the job. + """Cancels the hybrid job. Returns: - str: Indicates the status of the job. + str: Indicates the status of the hybrid job. Raises: ClientError: If there are errors invoking the CancelJob API. @@ -148,7 +149,7 @@ def result( poll_timeout_seconds: float = DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = DEFAULT_RESULTS_POLL_INTERVAL, ) -> Dict[str, Any]: - """Retrieves the job result persisted using save_job_result() function. + """Retrieves the hybrid job result persisted using save_job_result() function. Args: poll_timeout_seconds (float): The polling timeout, in seconds, for `result()`. @@ -159,11 +160,11 @@ def result( Returns: - Dict[str, Any]: Dict specifying the job results. + Dict[str, Any]: Dict specifying the hybrid job results. Raises: - RuntimeError: if job is in a FAILED or CANCELLED state. - TimeoutError: if job execution exceeds the polling timeout period. + RuntimeError: if hybrid job is in a FAILED or CANCELLED state. + TimeoutError: if hybrid job execution exceeds the polling timeout period. """ @abstractmethod @@ -173,13 +174,13 @@ def download_result( poll_timeout_seconds: float = DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = DEFAULT_RESULTS_POLL_INTERVAL, ) -> None: - """Downloads the results from the job output S3 bucket and extracts the tar.gz + """Downloads the results from the hybrid job output S3 bucket and extracts the tar.gz bundle to the location specified by `extract_to`. If no location is specified, the results are extracted to the current directory. Args: extract_to (str): The directory to which the results are extracted. The results - are extracted to a folder titled with the job name within this directory. + are extracted to a folder titled with the hybrid job name within this directory. Default= `Current working directory`. poll_timeout_seconds (float): The polling timeout, in seconds, for `download_result()`. @@ -189,6 +190,6 @@ def download_result( `download_result()`.Default: 5 seconds. Raises: - RuntimeError: if job is in a FAILED or CANCELLED state. - TimeoutError: if job execution exceeds the polling timeout period. + RuntimeError: if hybrid job is in a FAILED or CANCELLED state. + TimeoutError: if hybrid job execution exceeds the polling timeout period. """ diff --git a/src/braket/jobs/quantum_job_creation.py b/src/braket/jobs/quantum_job_creation.py index 400bb03b5..1dbc7dc4c 100644 --- a/src/braket/jobs/quantum_job_creation.py +++ b/src/braket/jobs/quantum_job_creation.py @@ -54,28 +54,29 @@ def prepare_quantum_job( aws_session: AwsSession = None, tags: Dict[str, str] = None, ) -> Dict: - """Creates a job by invoking the Braket CreateJob API. + """Creates a hybrid job by invoking the Braket CreateJob API. Args: device (str): ARN for the AWS device which is primarily - accessed for the execution of this job. + accessed for the execution of this hybrid job. source_module (str): Path (absolute, relative or an S3 URI) to a python module to be tarred and uploaded. If `source_module` is an S3 URI, it must point to a tar.gz file. Otherwise, source_module may be a file or directory. - entry_point (str): A str that specifies the entry point of the job, relative to + entry_point (str): A str that specifies the entry point of the hybrid job, relative to the source module. The entry point must be in the format `importable.module` or `importable.module:callable`. For example, `source_module.submodule:start_here` indicates the `start_here` function contained in `source_module.submodule`. If source_module is an S3 URI, entry point must be given. Default: source_module's name - image_uri (str): A str that specifies the ECR image to use for executing the job. + image_uri (str): A str that specifies the ECR image to use for executing the hybrid job. `image_uris.retrieve_image()` function may be used for retrieving the ECR image URIs for the containers supported by Braket. Default = ``. - job_name (str): A str that specifies the name with which the job is created. The job + job_name (str): A str that specifies the name with which the hybrid job is created. The + hybrid job name must be between 0 and 50 characters long and cannot contain underscores. Default: f'{image_uri_type}-{timestamp}'. @@ -85,8 +86,8 @@ def prepare_quantum_job( role_arn (str): A str providing the IAM role ARN used to execute the script. Default: IAM role returned by AwsSession's `get_default_jobs_role()`. - hyperparameters (Dict[str, Any]): Hyperparameters accessible to the job. - The hyperparameters are made accessible as a Dict[str, str] to the job. + hyperparameters (Dict[str, Any]): Hyperparameters accessible to the hybrid job. + The hyperparameters are made accessible as a Dict[str, str] to the hybrid job. For convenience, this accepts other types for keys and values, but `str()` is called to convert them before being passed on. Default: None. @@ -99,26 +100,27 @@ def prepare_quantum_job( Default: {}. instance_config (InstanceConfig): Configuration of the instances to be used - to execute the job. Default: InstanceConfig(instanceType='ml.m5.large', + to execute the hybrid job. Default: InstanceConfig(instanceType='ml.m5.large', instanceCount=1, volumeSizeInGB=30, volumeKmsKey=None). - distribution (str): A str that specifies how the job should be distributed. If set to - "data_parallel", the hyperparameters for the job will be set to use data parallelism - features for PyTorch or TensorFlow. Default: None. + distribution (str): A str that specifies how the hybrid job should be distributed. If set to + "data_parallel", the hyperparameters for the hybrid job will be set to use data + parallelism features for PyTorch or TensorFlow. Default: None. stopping_condition (StoppingCondition): The maximum length of time, in seconds, - and the maximum number of tasks that a job can run before being forcefully stopped. - Default: StoppingCondition(maxRuntimeInSeconds=5 * 24 * 60 * 60). + and the maximum number of quantum tasks that a hybrid job can run before being + forcefully stopped. Default: StoppingCondition(maxRuntimeInSeconds=5 * 24 * 60 * 60). - output_data_config (OutputDataConfig): Specifies the location for the output of the job. + output_data_config (OutputDataConfig): Specifies the location for the output of the hybrid + job. Default: OutputDataConfig(s3Path=f's3://{default_bucket_name}/jobs/{job_name}/data', kmsKeyId=None). - copy_checkpoints_from_job (str): A str that specifies the job ARN whose checkpoint you - want to use in the current job. Specifying this value will copy over the checkpoint - data from `use_checkpoints_from_job`'s checkpoint_config s3Uri to the current job's - checkpoint_config s3Uri, making it available at checkpoint_config.localPath during - the job execution. Default: None + copy_checkpoints_from_job (str): A str that specifies the hybrid job ARN whose checkpoint + you want to use in the current hybrid job. Specifying this value will copy over the + checkpoint data from `use_checkpoints_from_job`'s checkpoint_config s3Uri to the current + hybrid job's checkpoint_config s3Uri, making it available at checkpoint_config.localPath + during the hybrid job execution. Default: None checkpoint_config (CheckpointConfig): Configuration that specifies the location where checkpoint data is stored. @@ -128,11 +130,11 @@ def prepare_quantum_job( aws_session (AwsSession): AwsSession for connecting to AWS Services. Default: AwsSession() - tags (Dict[str, str]): Dict specifying the key-value pairs for tagging this job. + tags (Dict[str, str]): Dict specifying the key-value pairs for tagging this hybrid job. Default: {}. Returns: - Dict: Job tracking the execution on Amazon Braket. + Dict: Hybrid job tracking the execution on Amazon Braket. Raises: ValueError: Raises ValueError if the parameters are not valid. @@ -226,12 +228,12 @@ def prepare_quantum_job( def _generate_default_job_name(image_uri: Optional[str]) -> str: """ - Generate default job name using the image uri and a timestamp + Generate default hybrid job name using the image uri and a timestamp Args: image_uri (Optional[str]): URI for the image container. Returns: - str: Job name. + str: Hybrid job name. """ if not image_uri: job_type = "-default" @@ -253,7 +255,7 @@ def _process_s3_source_module( Args: source_module (str): S3 URI pointing to the tarred source module. - entry_point (str): Entry point for the job. + entry_point (str): Entry point for the hybrid job. aws_session (AwsSession): AwsSession to copy source module to code location. code_location (str): S3 URI pointing to the location where the code will be copied to. @@ -362,7 +364,7 @@ def _process_input_data( input_data (Union[str, Dict, S3DataSourceConfig]): Either a channel definition or a dictionary mapping channel names to channel definitions, where a channel definition can be an S3DataSourceConfig or a str corresponding to a local prefix or S3 prefix. - job_name (str): Job name. + job_name (str): Hybrid job name. aws_session (AwsSession): AwsSession for possibly uploading local data. Returns: @@ -383,7 +385,7 @@ def _process_channel( Convert a location to an S3DataSourceConfig, uploading local data to S3, if necessary. Args: location (str): Local prefix or S3 prefix. - job_name (str): Job name. + job_name (str): Hybrid job name. aws_session (AwsSession): AwsSession to be used for uploading local data. channel_name (str): Name of the channel. diff --git a/src/braket/tasks/annealing_quantum_task_result.py b/src/braket/tasks/annealing_quantum_task_result.py index 828289422..1a5fbc2cb 100644 --- a/src/braket/tasks/annealing_quantum_task_result.py +++ b/src/braket/tasks/annealing_quantum_task_result.py @@ -35,8 +35,8 @@ class AnnealingQuantumTaskResult: output or energy of the solutions. variable_count (int): the number of variables problem_type (ProblemType): the type of annealing problem - task_metadata (TaskMetadata): Task metadata. - additional_metadata (AdditionalMetadata): Additional metadata about the task + task_metadata (TaskMetadata): Quantum task metadata. + additional_metadata (AdditionalMetadata): Additional metadata about the quantum task """ record_array: numpy.recarray diff --git a/src/braket/tasks/gate_model_quantum_task_result.py b/src/braket/tasks/gate_model_quantum_task_result.py index 04b201e3e..9b21b0075 100644 --- a/src/braket/tasks/gate_model_quantum_task_result.py +++ b/src/braket/tasks/gate_model_quantum_task_result.py @@ -40,8 +40,8 @@ class GateModelQuantumTaskResult: to be initialized by a QuantumTask class. Args: - task_metadata (TaskMetadata): Task metadata. - additional_metadata (AdditionalMetadata): Additional metadata about the task + task_metadata (TaskMetadata): Quantum task metadata. + additional_metadata (AdditionalMetadata): Additional metadata about the quantum task result_types (List[Dict[str, Any]]): List of dictionaries where each dictionary has two keys: 'Type' (the result type in IR JSON form) and 'Value' (the result value for this result type). @@ -105,7 +105,7 @@ def __post_init__(self): def get_value_by_result_type(self, result_type: ResultType) -> Any: """ Get value by result type. The result type must have already been - requested in the circuit sent to the device for this task result. + requested in the circuit sent to the device for this quantum task result. Args: result_type (ResultType): result type requested diff --git a/src/braket/tasks/local_quantum_task.py b/src/braket/tasks/local_quantum_task.py index 5b079f63c..69a0f1bd6 100644 --- a/src/braket/tasks/local_quantum_task.py +++ b/src/braket/tasks/local_quantum_task.py @@ -23,7 +23,7 @@ class LocalQuantumTask(QuantumTask): - """A task containing the results of a local simulation. + """A quantum task containing the results of a local simulation. Since this class is instantiated with the results, cancel() and run_async() are unsupported. """ diff --git a/src/braket/tasks/quantum_task.py b/src/braket/tasks/quantum_task.py index ccefc7f5a..47ea8fff0 100644 --- a/src/braket/tasks/quantum_task.py +++ b/src/braket/tasks/quantum_task.py @@ -26,9 +26,9 @@ class QuantumTask(ABC): @property @abstractmethod def id(self) -> str: - """Get the task ID. + """Get the quantum task ID. Returns: - str: The task ID. + str: The quantum task ID. """ @abstractmethod @@ -71,6 +71,6 @@ def metadata(self, use_cached_value: bool = False) -> Dict[str, Any]: request. Default is False. Returns: - Dict[str, Any]: The metadata regarding the task. If `use_cached_value` is True, + Dict[str, Any]: The metadata regarding the quantum task. If `use_cached_value` is True, then the value retrieved from the most recent request is used. """ diff --git a/src/braket/tracking/tracker.py b/src/braket/tracking/tracker.py index e902e1701..9558d1d57 100644 --- a/src/braket/tracking/tracker.py +++ b/src/braket/tracking/tracker.py @@ -71,7 +71,7 @@ def tracked_resources(self) -> List[str]: Resources tracked by this tracker. Returns: - List[str]: The list of task ids for tasks tracked by this tracker. + List[str]: The list of quantum task ids for quantum tasks tracked by this tracker. """ return list(self._resources.keys()) @@ -98,8 +98,8 @@ def simulator_tasks_cost(self) -> Decimal: """ Estimate cost of all quantum tasks tracked by this tracker using Braket simulator devices. - Note: The cost of a simulator task is not available until after the results for the task - have been fetched. Call `result()` on an `AwsQuantumTask` before estimating its cost + Note: The cost of a simulator quantum task is not available until after the results for the + task have been fetched. Call `result()` on an `AwsQuantumTask` before estimating its cost to ensure that the simulator usage is included in the cost estimate. Note: Charges shown are estimates based on your Amazon Braket simulator and quantum @@ -123,9 +123,9 @@ def quantum_tasks_statistics(self) -> Dict[str, Dict[str, Any]]: Returns: Dict[str,Dict[str,Any]] : A dictionary where each key is a device arn, and maps to - a dictionary sumarizing the tasks run on the device. The summary includes the + a dictionary sumarizing the quantum tasks run on the device. The summary includes the total shots sent to the device and the most recent status of the quantum tasks - created on this device. For finished tasks on simulator devices, the summary + created on this device. For finished quantum tasks on simulator devices, the summary also includes the duration of the simulation. Example: diff --git a/test/integ_tests/test_create_local_quantum_job.py b/test/integ_tests/test_create_local_quantum_job.py index 02ae41da0..ad91d0a03 100644 --- a/test/integ_tests/test_create_local_quantum_job.py +++ b/test/integ_tests/test_create_local_quantum_job.py @@ -23,10 +23,10 @@ def test_completed_local_job(aws_session, capsys): - """Asserts the job is completed with the respective files and folders for logs, + """Asserts the hybrid job is completed with the respective files and folders for logs, results and checkpoints. Validate the results are what we expect. Also, assert that logs contains all the necessary steps for setup and running - the job is displayed to the user. + the hybrid job is displayed to the user. """ absolute_source_module = str(Path("test/integ_tests/job_test_script.py").resolve()) current_dir = Path.cwd() @@ -104,7 +104,7 @@ def test_completed_local_job(aws_session, capsys): def test_failed_local_job(aws_session, capsys): - """Asserts the job is failed with the output, checkpoints not created in bucket + """Asserts the hybrid job is failed with the output, checkpoints not created in bucket and only logs are populated. Validate the calling result function raises the ValueError. Also, check if the logs displays the required error message. """ diff --git a/test/integ_tests/test_create_quantum_job.py b/test/integ_tests/test_create_quantum_job.py index 76c49cd48..568a53bc3 100644 --- a/test/integ_tests/test_create_quantum_job.py +++ b/test/integ_tests/test_create_quantum_job.py @@ -21,8 +21,8 @@ def test_failed_quantum_job(aws_session, capsys): - """Asserts the job is failed with the output, checkpoints, - tasks not created in bucket and only input is uploaded to s3. Validate the + """Asserts the hybrid job is failed with the output, checkpoints, + quantum tasks not created in bucket and only input is uploaded to s3. Validate the results/download results have the response raising RuntimeError. Also, check if the logs displays the Assertion Error. """ @@ -76,10 +76,10 @@ def test_failed_quantum_job(aws_session, capsys): def test_completed_quantum_job(aws_session, capsys): - """Asserts the job is completed with the output, checkpoints, tasks and - script folder created in S3 for respective job. Validate the results are + """Asserts the hybrid job is completed with the output, checkpoints, quantum tasks and + script folder created in S3 for respective hybrid job. Validate the results are downloaded and results are what we expect. Also, assert that logs contains all the - necessary steps for setup and running the job and is displayed to the user. + necessary steps for setup and running the hybrid job and is displayed to the user. """ job = AwsQuantumJob.create( diff --git a/test/unit_tests/braket/experimental/autoqasm/test_gate_calibrations.py b/test/unit_tests/braket/experimental/autoqasm/test_gate_calibrations.py new file mode 100644 index 000000000..db2e6e5ca --- /dev/null +++ b/test/unit_tests/braket/experimental/autoqasm/test_gate_calibrations.py @@ -0,0 +1,232 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +"""Tests for the pulse control module.""" + +import textwrap + +import pytest + +import braket.experimental.autoqasm as aq +from braket.experimental.autoqasm import errors, pulse +from braket.experimental.autoqasm.instructions import h, rx + + +def test_gate_calibrations_fixed_args(): + """test gate calibrations with fixed args""" + + @aq.gate_calibration(implements=h, target="$0") + def cal_1(): + pulse.barrier("$0") + + @aq.gate_calibration(implements=rx, target="$1", angle=1.789) + def cal_2(): + pulse.delay("$1", 0.123) + + @aq.main + def my_program(): + h("$0") + rx("$1", 1.0) + + expected = textwrap.dedent( + """ + OPENQASM 3.0; + defcal h $0 { + barrier $0; + } + defcal rx(1.789) $1 { + delay[123.0ms] $1; + } + h $0; + rx(1.0) $1; + """ + ).strip() + qasm = my_program().with_calibrations([cal_1, cal_2]).to_ir() + assert qasm == expected + + +def test_gate_calibrations_variable_args(): + """test gate calibrations with variable args""" + + @aq.gate_calibration(implements=rx, target="$1") + def cal_1(angle: float): + pulse.delay("$1", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + expected = textwrap.dedent( + """ + OPENQASM 3.0; + defcal rx(angle[32] angle) $1 { + delay[angle * 1s] $1; + } + rx(1.0) $1; + """ + ).strip() + qasm = my_program().with_calibrations(cal_1).to_ir() + assert qasm == expected + + +def test_gate_calibrations_invalid_args(): + """test gate calibrations with invalid args name""" + + @aq.gate_calibration(implements=rx, target="$1", foo=0) + def cal_1(angle: float): + pulse.delay("$1", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + with pytest.raises(errors.InvalidCalibrationDefinition): + _ = my_program().with_calibrations(cal_1) + + +def test_gate_calibrations_invalid_type(): + """test gate calibrations with invalid args type""" + + @aq.gate_calibration(implements=rx, target=0.123) + def cal_1(angle: float): + pulse.delay("$1", angle) + + @aq.gate_calibration(implements=rx, target={"foo": "bar"}) + def cal_2(angle: float): + pulse.delay("$1", angle) + + @aq.gate_calibration(implements=rx, target=0, angle="$0") + def cal_3(): + pulse.delay("$1", 0.123) + + @aq.gate_calibration(implements=rx, target=0) + def cal_4(angle: aq.Qubit): + pulse.delay("$1", angle) + + @aq.gate_calibration(implements=rx) + def cal_5(target: float, angle: aq.Qubit): + pulse.delay("$0", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + for cal in [cal_1, cal_2, cal_3, cal_4, cal_5]: + with pytest.raises(errors.ParameterTypeError): + _ = my_program().with_calibrations(cal) + + +def test_gate_calibrations_insufficient_args(): + """test gate calibrations with insufficient args""" + + @aq.gate_calibration(implements=rx, target="$1") + def cal_1(): + pulse.delay("$1", 0.123) + + @aq.gate_calibration(implements=rx) + def cal_2(angle: float): + pulse.delay("$1", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + with pytest.raises(errors.InvalidCalibrationDefinition): + _ = my_program().with_calibrations(cal_1) + + with pytest.raises(errors.InvalidCalibrationDefinition): + _ = my_program().with_calibrations(cal_2) + + +def test_gate_calibrations_duplicated_args(): + """test gate calibrations with duplicated args""" + + @aq.gate_calibration(implements=rx, target="$1", angle=0.123) + def cal_1(angle: float): + pulse.delay("$1", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + with pytest.raises(errors.InvalidCalibrationDefinition): + _ = my_program().with_calibrations(cal_1) + + +def test_gate_calibrations_invalid_instructions(): + """test gate calibrations with invalid instructions that are not pulse""" + + @aq.gate_calibration(implements=rx, target="$1") + def cal_1(angle: float): + h(0) + pulse.delay("$1", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + with pytest.raises(errors.InvalidCalibrationDefinition): + _ = my_program().with_calibrations(cal_1) + + +def test_gate_calibrations_bind_calibrations_not_inplace(): + """test that bind_calibrations does not modify the original program""" + + @aq.gate_calibration(implements=rx, target="$1") + def cal_1(angle: float): + pulse.delay("$1", angle) + + @aq.main + def my_program(): + rx("$1", 1.0) + + program_1 = my_program() + _ = program_1.with_calibrations(cal_1) + + program_2 = my_program() + + assert program_1.to_ir() == program_2.to_ir() + + +def test_gate_calibrations_with_gate_definition(): + """test gate calibrations on gate defined by aq.gate""" + + @aq.gate + def my_gate(q: aq.Qubit, a: float): + h(q) + + @aq.gate_calibration(implements=my_gate, q="$0") + def cal_1(a: float): + pulse.barrier("$0") + pulse.delay("$0", a) + + @aq.main + def my_program(): + my_gate(2, 0.123) + + expected = textwrap.dedent( + """ + OPENQASM 3.0; + gate my_gate(a) q { + h q; + } + defcal my_gate(angle[32] a) $0 { + barrier $0; + delay[a * 1s] $0; + } + qubit[3] __qubits__; + my_gate(0.123) __qubits__[2]; + """ + ).strip() + qasm = my_program().with_calibrations(cal_1).to_ir() + assert qasm == expected diff --git a/test/unit_tests/braket/experimental/autoqasm/test_pulse.py b/test/unit_tests/braket/experimental/autoqasm/test_pulse.py index 9e527acca..58840ce6e 100644 --- a/test/unit_tests/braket/experimental/autoqasm/test_pulse.py +++ b/test/unit_tests/braket/experimental/autoqasm/test_pulse.py @@ -73,8 +73,8 @@ def test_merge_cal_box() -> None: @aq.main def my_program(): - barrier(0) - delay([3, 4], 0.34) + barrier("$0") + delay(["$3", "$4"], 0.34) expected = textwrap.dedent( """ @@ -123,8 +123,8 @@ def my_program(): [0.12], "\ncal {\n set_scale(predefined_frame_1, 0.12);\n}", ), - (delay, 3, [0.34], "\ncal {\n delay[340.0ms] $3;\n}"), - (delay, [3, 4], [0.34], "\ncal {\n delay[340.0ms] $3, $4;\n}"), + (delay, "$3", [0.34], "\ncal {\n delay[340.0ms] $3;\n}"), + (delay, ["$3", "$4"], [0.34], "\ncal {\n delay[340.0ms] $3, $4;\n}"), ( delay, FRAME1, @@ -137,8 +137,8 @@ def my_program(): [0.34], "\ncal {\n delay[340.0ms] predefined_frame_1, predefined_frame_2;\n}", ), - (barrier, 3, [], "\ncal {\n barrier $3;\n}"), - (barrier, [3, 4], [], "\ncal {\n barrier $3, $4;\n}"), + (barrier, "$3", [], "\ncal {\n barrier $3;\n}"), + (barrier, ["$3", "$4"], [], "\ncal {\n barrier $3, $4;\n}"), (barrier, FRAME1, [], "\ncal {\n barrier predefined_frame_1;\n}"), ( barrier, @@ -164,3 +164,19 @@ def test_pulse_control(instruction, qubits_or_frames, params, expected_qasm) -> instruction(qubits_or_frames, *params) assert expected_qasm in program_conversion_context.make_program().to_ir() + + +@pytest.mark.parametrize( + "instruction,qubits_or_frames,params", + [ + (barrier, "1", []), + (barrier, 1, []), + (barrier, ["1", "2"], []), + (barrier, [1, 2], []), + ], +) +def test_pulse_control_invalid_physical_qubit(instruction, qubits_or_frames, params) -> None: + """Test pulse control operations with invalid lables for physical qubits.""" + with pytest.raises(ValueError): + with aq.build_program(): + instruction(qubits_or_frames, *params)