From 0afb82b68988ff5854207c2ac9efd86e2ac85cf8 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 30 Jun 2022 16:32:29 -0700 Subject: [PATCH 01/19] 2.6.0 release for azure-ai-ml --- .../azure-ai-ml/azure/ai/ml/.bumpversion.cfg | 3 + sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore | 1 + .../ai/ml/_artifacts/_blob_storage_helper.py | 118 +----- .../azure/ai/ml/_artifacts/_constants.py | 2 + .../ml/_artifacts/_default_storage_helper.py | 343 ------------------ .../ai/ml/_artifacts/_gen2_storage_helper.py | 114 +----- .../ai/ml/_artifacts/storage_overview.md | 33 ++ .../_artifacts/upload_process_flowchart.png | Bin 0 -> 98532 bytes .../azure/ai/ml/_file_utils/data_binding.py | 66 ---- sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py | 5 +- .../_deployment/batch/batch_deployment.py | 3 + .../ai/ml/_schema/_deployment/deployment.py | 4 +- .../_deployment/online/online_deployment.py | 2 + .../ai/ml/_schema/_sweep/sweep_termination.py | 3 + .../ai/ml/_schema/_utils/data_binding.py | 66 ---- .../_schema/_utils/data_binding_expression.py | 12 +- .../azure/ai/ml/_schema/_utils/utils.py | 23 +- .../ai/ml/_schema/automl/automl_vertical.py | 6 +- .../image_vertical/image_classification.py | 4 +- .../image_vertical/image_object_detection.py | 4 +- .../automl/image_vertical/image_vertical.py | 8 +- .../automl/nlp_vertical/nlp_vertical.py | 7 +- .../automl/table_vertical/table_vertical.py | 7 +- .../ml/_schema/component/command_component.py | 5 +- .../azure/ai/ml/_schema/core/fields.py | 205 ++++++++++- .../job/input_output_fields_provider.py | 39 +- .../ml/_schema/job/parameterized_command.py | 4 +- .../ai/ml/_schema/pipeline/automl_node.py | 53 ++- .../ai/ml/_schema/pipeline/component_job.py | 51 ++- .../_schema/pipeline/pipeline_command_job.py | 3 +- .../ai/ml/_schema/pipeline/pipeline_job.py | 25 +- .../_schema/pipeline/pipeline_parallel_job.py | 6 +- .../azure/ai/ml/_schema/schedule/schedule.py | 11 +- .../azure/ai/ml/_utils/_arm_id_utils.py | 4 - .../azure/ai/ml/_utils/_asset_utils.py | 295 ++++++++++++++- .../azure/ai/ml/_utils/_azureml_polling.py | 1 - .../azure/ai/ml/_utils/_data_utils.py | 2 +- .../azure/ai/ml/_utils/_endpoint_utils.py | 40 +- .../azure-ai-ml/azure/ai/ml/_utils/utils.py | 5 + sdk/ml/azure-ai-ml/azure/ai/ml/_version.py | 2 +- sdk/ml/azure-ai-ml/azure/ai/ml/constants.py | 23 +- .../ml/entities/_assets/_artifacts/model.py | 2 +- .../ai/ml/entities/_builders/base_node.py | 153 ++++++-- .../azure/ai/ml/entities/_builders/command.py | 122 ++----- .../ai/ml/entities/_builders/parallel.py | 157 ++------ .../azure/ai/ml/entities/_builders/sweep.py | 128 ++----- .../entities/_component/command_component.py | 67 +--- .../ai/ml/entities/_component/component.py | 129 ++++--- .../entities/_component/component_factory.py | 147 ++++++++ .../ai/ml/entities/_component/input_output.py | 2 +- .../entities/_component/parallel_component.py | 27 +- .../azure/ai/ml/entities/_component/utils.py | 27 -- .../azure/ai/ml/entities/_inputs_outputs.py | 54 +-- .../ml/entities/_job/pipeline/_attr_dict.py | 2 + .../_job/pipeline/_component_translatable.py | 6 +- .../entities/_job/pipeline/_load_component.py | 156 +++++++- .../ml/entities/_job/pipeline/pipeline_job.py | 33 +- .../ai/ml/entities/_schedule/schedule.py | 26 +- .../azure/ai/ml/entities/_validation.py | 4 +- .../ai/ml/operations/_component_operations.py | 2 +- .../ai/ml/operations/_data_operations.py | 12 +- .../ml/operations/_environment_operations.py | 3 + .../azure/ai/ml/operations/_job_operations.py | 20 +- .../ai/ml/operations/_model_operations.py | 2 + .../ai/ml/operations/_workspace_operations.py | 1 - sdk/ml/azure-ai-ml/azure/ai/ml/py.typed | 1 - .../azure-ai-ml/azure/ai/ml/requirements.txt | 24 ++ .../azure-ai-ml/azure/ai/ml/template_code.py | 9 - 68 files changed, 1455 insertions(+), 1469 deletions(-) create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_default_storage_helper.py create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/upload_process_flowchart.png delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_file_utils/data_binding.py delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding.py create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/py.typed create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/template_code.py diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg b/sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg new file mode 100644 index 000000000000..a7f5a7e6285b --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg @@ -0,0 +1,3 @@ +[bumpversion] +current_version = 0.0.139 +commit = True diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore b/sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore new file mode 100644 index 000000000000..bee8a64b79a9 --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_blob_storage_helper.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_blob_storage_helper.py index ee60f55b8371..35488fe6cef2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_blob_storage_helper.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_blob_storage_helper.py @@ -6,35 +6,26 @@ import logging import time import os -import warnings -from contextlib import suppress -from typing import Optional, Dict, Any, List, TYPE_CHECKING +from typing import Dict, List, TYPE_CHECKING from pathlib import PurePosixPath, Path -from multiprocessing import cpu_count from colorama import Fore -from concurrent.futures import ThreadPoolExecutor, as_completed -from tqdm import tqdm, TqdmWarning -from platform import system import sys -from azure.ai.ml._utils._exception_utils import EmptyDirectoryError from azure.storage.blob import BlobServiceClient, ContainerClient from azure.core.exceptions import ResourceNotFoundError from azure.ai.ml._utils._asset_utils import ( generate_asset_id, - traverse_directory, + upload_directory, + upload_file, AssetNotChangedError, _build_metadata_dict, IgnoreFile, - FileUploadProgressBar, get_directory_size, ) from azure.ai.ml._artifacts._constants import ( UPLOAD_CONFIRMATION, ARTIFACT_ORIGIN, LEGACY_ARTIFACT_DIRECTORY, - EMPTY_DIRECTORY_ERROR, - PROCESSES_PER_CORE, MAX_CONCURRENCY, FILE_SIZE_WARNING, BLOB_DATASTORE_IS_HDI_FOLDER_KEY, @@ -103,11 +94,18 @@ def upload( # start upload if os.path.isdir(source): - self.upload_dir(source, asset_id, msg, show_progress, ignore_file=ignore_file) + upload_directory( + storage_client=self, + source=source, + dest=asset_id, + msg=msg, + show_progress=show_progress, + ignore_file=ignore_file, + ) else: self.indicator_file = dest self.check_blob_exists() - self.upload_file(source, dest, msg, show_progress) + upload_file(storage_client=self, source=source, dest=dest, msg=msg, show_progress=show_progress) print(Fore.RESET + "\n", file=sys.stderr) # upload must be completed before we try to generate confirmation file @@ -124,98 +122,6 @@ def upload( return artifact_info - def upload_file( - self, - source: str, - dest: str, - msg: Optional[str] = None, - show_progress: Optional[bool] = None, - in_directory: bool = False, - callback: Any = None, - ) -> None: - """ - Upload a single file to a path inside the container - """ - validate_content = os.stat(source).st_size > 0 # don't do checksum for empty files - - with open(source, "rb") as data: - if show_progress and not in_directory: - file_size, _ = get_directory_size(source) - file_size_in_mb = file_size / 10**6 - if file_size_in_mb < 1: - msg += Fore.GREEN + " (< 1 MB)" - else: - msg += Fore.GREEN + f" ({round(file_size_in_mb, 2)} MBs)" - cntx_manager = FileUploadProgressBar(msg=msg) - else: - cntx_manager = suppress() - - with cntx_manager as c: - callback = c.update_to if (show_progress and not in_directory) else None - self.container_client.upload_blob( - name=dest, - data=data, - validate_content=validate_content, - overwrite=self.overwrite, - raw_response_hook=callback, - max_concurrency=MAX_CONCURRENCY, - ) - - self.uploaded_file_count += 1 - - def upload_dir(self, source: str, dest: str, msg: str, show_progress: bool, ignore_file: IgnoreFile) -> None: - """ - Upload a directory to a path inside the container - - Azure Blob doesn't allow metadata setting at the directory level, so the first - file in the directory is designated as the file where the confirmation metadata - will be added at the end of the upload. - """ - source_path = Path(source).resolve() - prefix = "" if dest == "" else dest + "/" - prefix += os.path.basename(source_path) + "/" - - # get all paths in directory and each file's size - upload_paths = [] - size_dict = {} - total_size = 0 - for root, _, files in os.walk(source_path): - upload_paths += list(traverse_directory(root, files, source_path, prefix, ignore_file=ignore_file)) - - for path, _ in upload_paths: - path_size = os.path.getsize(path) - size_dict[path] = path_size - total_size += path_size - - upload_paths = sorted(upload_paths) - if len(upload_paths) == 0: - raise EmptyDirectoryError( - message=EMPTY_DIRECTORY_ERROR.format(source), - no_personal_data_message=msg.format("[source]"), - target=ErrorTarget.ARTIFACT, - error_category=ErrorCategory.USER_ERROR, - ) - - self.indicator_file = upload_paths[0][1] - self.check_blob_exists() - self.total_file_count = len(upload_paths) - - # submit paths to workers for upload - num_cores = int(cpu_count()) * PROCESSES_PER_CORE - with ThreadPoolExecutor(max_workers=num_cores) as ex: - futures_dict = { - ex.submit(self.upload_file, src, dest, in_directory=True, show_progress=show_progress): (src, dest) - for (src, dest) in upload_paths - } - if show_progress: - warnings.simplefilter("ignore", category=TqdmWarning) - msg += f" ({round(total_size/10**6, 2)} MBs)" - ascii = system() == "Windows" # Default unicode progress bar doesn't display well on Windows - with tqdm(total=total_size, desc=msg, ascii=ascii) as pbar: - for future in as_completed(futures_dict): - file_path_name = futures_dict[future][0] - pbar.update(size_dict.get(file_path_name) or 0) - def check_blob_exists(self) -> None: """ Throw error if blob already exists. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_constants.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_constants.py index 067bae71e08b..7214dd0b855f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_constants.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_constants.py @@ -30,3 +30,5 @@ "{jsonSchemaErrorPath}{jsonSchemaMessage}\n{invalidMLTableMsg}:\n{invalidSchemaSnippet}" ) BLOB_DATASTORE_IS_HDI_FOLDER_KEY = "hdi_isfolder" +BLOB_STORAGE_CLIENT_NAME = "BlobStorageClient" +GEN2_STORAGE_CLIENT_NAME = "Gen2StorageClient" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_default_storage_helper.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_default_storage_helper.py deleted file mode 100644 index 79b5ec202366..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_default_storage_helper.py +++ /dev/null @@ -1,343 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -import uuid -import logging -import time -import os -import warnings -from contextlib import suppress -from typing import Optional, Dict, Any, List -from pathlib import PurePosixPath, Path -from multiprocessing import cpu_count -from colorama import Fore -from concurrent.futures import ThreadPoolExecutor, as_completed -from tqdm import tqdm, TqdmWarning -from platform import system -import sys - -from azure.ai.ml._utils._exception_utils import EmptyDirectoryError -from azure.storage.blob import BlobServiceClient, ContainerClient -from azure.core.exceptions import ResourceNotFoundError -from azure.ai.ml._utils._asset_utils import ( - generate_asset_id, - traverse_directory, - AssetNotChangedError, - _build_metadata_dict, - IgnoreFile, - FileUploadProgressBar, - get_directory_size, -) -from azure.ai.ml._artifacts._constants import ( - UPLOAD_CONFIRMATION, - ARTIFACT_ORIGIN, - LEGACY_ARTIFACT_DIRECTORY, - EMPTY_DIRECTORY_ERROR, - PROCESSES_PER_CORE, - MAX_CONCURRENCY, - FILE_SIZE_WARNING, -) -from azure.ai.ml.constants import STORAGE_AUTH_MISMATCH_ERROR -from azure.ai.ml._ml_exceptions import ErrorTarget, ErrorCategory, ValidationException, MlException - -module_logger = logging.getLogger(__name__) - - -class DefaultStorageClient: - def __init__(self, credential: str, account_url: str, container_name: str = None): - self.service_client = BlobServiceClient(account_url=account_url, credential=credential) - self.upload_to_root_container = None - if container_name: - self.container_client = self.service_client.get_container_client(container=container_name) - else: - self.container_client = ContainerClient.from_container_url(account_url) - self.upload_to_root_container = True - self.container = container_name if container_name else self.container_client.container_name - self.total_file_count = 1 - self.uploaded_file_count = 0 - self.overwrite = False - self.indicator_file = None - self.legacy = False - self.name = None - self.version = None - - def upload( - self, - source: str, - name: str, - version: str, - ignore_file: IgnoreFile = IgnoreFile(None), - asset_hash: str = None, - show_progress: bool = True, - ) -> Dict[str, str]: - """ - Upload a file or directory to a path inside the container - """ - if name and version is None: - version = str(uuid.uuid4()) # placeholder for auto-increment artifacts - - asset_id = generate_asset_id(asset_hash, include_directory=True) if not self.upload_to_root_container else "" - source_name = Path(source).name - dest = str(PurePosixPath(asset_id, source_name)) - - try: - # truncate path longer than 50 chars for terminal display - if show_progress and len(source_name) >= 50: - formatted_path = "{:.47}".format(source_name) + "..." - else: - formatted_path = source_name - - # configure progress bar description - msg = Fore.GREEN + f"Uploading {formatted_path}" - - # warn if large file (> 100 MB) - file_size, _ = get_directory_size(source) - file_size_in_mb = file_size / 10**6 - if file_size_in_mb > 100: - module_logger.warning(FILE_SIZE_WARNING) - - # start upload - if os.path.isdir(source): - self.upload_dir(source, asset_id, msg, show_progress, ignore_file=ignore_file) - else: - self.indicator_file = dest - self.check_blob_exists() - self.upload_file(source, dest, msg, show_progress) - print(Fore.RESET + "\n", file=sys.stderr) - - # upload must be completed before we try to generate confirmation file - while self.uploaded_file_count < self.total_file_count: - time.sleep(0.5) - self._set_confirmation_metadata(name, version) - except AssetNotChangedError: - name = self.name - version = self.version - if self.legacy: - dest = dest.replace(ARTIFACT_ORIGIN, LEGACY_ARTIFACT_DIRECTORY) - - artifact_info = {"remote path": dest, "name": name, "version": version, "indicator file": self.indicator_file} - - return artifact_info - - def upload_file( - self, - source: str, - dest: str, - msg: Optional[str] = None, - show_progress: Optional[bool] = None, - in_directory: bool = False, - callback: Any = None, - ) -> None: - """ - Upload a single file to a path inside the container - """ - validate_content = os.stat(source).st_size > 0 # don't do checksum for empty files - - with open(source, "rb") as data: - if show_progress and not in_directory: - file_size, _ = get_directory_size(source) - file_size_in_mb = file_size / 10**6 - if file_size_in_mb < 1: - msg += Fore.GREEN + " (< 1 MB)" - else: - msg += Fore.GREEN + f" ({round(file_size_in_mb, 2)} MBs)" - cntx_manager = FileUploadProgressBar(msg=msg) - else: - cntx_manager = suppress() - - with cntx_manager as c: - callback = c.update_to if (show_progress and not in_directory) else None - self.container_client.upload_blob( - name=dest, - data=data, - validate_content=validate_content, - overwrite=self.overwrite, - raw_response_hook=callback, - max_concurrency=MAX_CONCURRENCY, - ) - - self.uploaded_file_count += 1 - - def upload_dir(self, source: str, dest: str, msg: str, show_progress: bool, ignore_file: IgnoreFile) -> None: - """ - Upload a directory to a path inside the container - - Azure Blob doesn't allow metadata setting at the directory level, so the first - file in the directory is designated as the file where the confirmation metadata - will be added at the end of the upload. - """ - source_path = Path(source).resolve() - prefix = "" if dest == "" else dest + "/" - prefix += os.path.basename(source_path) + "/" - - # get all paths in directory and each file's size - upload_paths = [] - size_dict = {} - total_size = 0 - for root, _, files in os.walk(source_path): - upload_paths += list(traverse_directory(root, files, source_path, prefix, ignore_file=ignore_file)) - - for path, _ in upload_paths: - path_size = os.path.getsize(path) - size_dict[path] = path_size - total_size += path_size - - upload_paths = sorted(upload_paths) - if len(upload_paths) == 0: - raise EmptyDirectoryError( - message=EMPTY_DIRECTORY_ERROR.format(source), - no_personal_data_message=msg.format("[source]"), - target=ErrorTarget.ARTIFACT, - error_category=ErrorCategory.USER_ERROR, - ) - - self.indicator_file = upload_paths[0][1] - self.check_blob_exists() - self.total_file_count = len(upload_paths) - - # submit paths to workers for upload - num_cores = int(cpu_count()) * PROCESSES_PER_CORE - with ThreadPoolExecutor(max_workers=num_cores) as ex: - futures_dict = { - ex.submit(self.upload_file, src, dest, in_directory=True, show_progress=show_progress): (src, dest) - for (src, dest) in upload_paths - } - if show_progress: - warnings.simplefilter("ignore", category=TqdmWarning) - msg += f" ({round(total_size/10**6, 2)} MBs)" - ascii = system() == "Windows" # Default unicode progress bar doesn't display well on Windows - with tqdm(total=total_size, desc=msg, ascii=ascii) as pbar: - for future in as_completed(futures_dict): - file_path_name = futures_dict[future][0] - pbar.update(size_dict.get(file_path_name) or 0) - - def check_blob_exists(self) -> None: - """ - Throw error if blob already exists. - - Check if blob already exists in container by checking the metadata for - existence and confirmation data. If confirmation data is missing, blob does not exist - or was only partially uploaded and the partial upload will be overwritten with a complete - upload. - """ - - try: - legacy_indicator_file = self.indicator_file.replace(ARTIFACT_ORIGIN, LEGACY_ARTIFACT_DIRECTORY) - blob_client = self.container_client.get_blob_client(blob=self.indicator_file) - legacy_blob_client = self.container_client.get_blob_client(blob=legacy_indicator_file) - - properties = blob_client.get_blob_properties() - metadata = properties.get("metadata") - - # first check legacy folder's metadata to see if artifact is stored there - try: - legacy_properties = legacy_blob_client.get_blob_properties() - legacy_metadata = legacy_properties.get("metadata") - - if ( - legacy_metadata and UPLOAD_CONFIRMATION.items() <= legacy_metadata.items() - ): # checks if metadata dictionary includes confirmation key and value - self.name = legacy_metadata.get("name") - self.version = legacy_metadata.get("version") - self.legacy = True - - raise AssetNotChangedError - except ResourceNotFoundError: - pass - - # check LocalUpload folder's metadata if not found in legacy metadata - if metadata and UPLOAD_CONFIRMATION.items() <= metadata.items(): - self.name = metadata.get("name") - self.version = metadata.get("version") - raise AssetNotChangedError - else: - self.overwrite = True # if upload never confirmed, approve overriding the partial upload - except ResourceNotFoundError: - pass - except Exception as e: - if hasattr(e, "error_code") and e.error_code == STORAGE_AUTH_MISMATCH_ERROR: - msg = "You don't have permission to alter this storage account. Ensure that you have been assigned both Storage Blob Data Reader and Storage Blob Data Contributor roles." - raise ValidationException( - message=msg, - no_personal_data_message=msg, - target=ErrorTarget.ARTIFACT, - error_category=ErrorCategory.USER_ERROR, - ) - else: - raise e - - def _set_confirmation_metadata(self, name: str, version: str) -> None: - blob_client = self.container_client.get_blob_client(blob=self.indicator_file) - metadata_dict = _build_metadata_dict(name, version) - blob_client.set_blob_metadata(metadata_dict) - - def download( - self, starts_with: str, destination: str = Path.home(), max_concurrency: int = MAX_CONCURRENCY - ) -> None: - """ - Downloads all blobs inside a specified container to the destination folder - :param starts_with: Indicates the blob name starts with to search. - :param destination: Indicates path to download in local - :param max_concurrency: Indicates concurrent connections to download a blob. - """ - try: - mylist = list(self.container_client.list_blobs(name_starts_with=starts_with)) - for item in mylist: - blob_name = item.name[len(starts_with) :].lstrip("/") or Path(starts_with).name - blob_content = self.container_client.download_blob(item) - blob_content = blob_content.content_as_bytes(max_concurrency) - target_path = Path(destination, blob_name).resolve() - os.makedirs(str(target_path.parent), exist_ok=True) - with target_path.open("wb") as file: - file.write(blob_content) - except OSError as ex: - raise ex - except Exception as e: - msg = "Saving blob with prefix {} was unsuccessful. exception={}" - raise MlException( - message=msg.format(starts_with, e), - no_personal_data_message=msg.format("[starts_with]", "[exception]"), - target=ErrorTarget.ARTIFACT, - error_category=ErrorCategory.USER_ERROR, - error=e, - ) - - def list(self, starts_with: str) -> List[str]: - """ - Lists all blob names in the specified container - :param starts_with: Indicates the blob name starts with to search. - :return: the list of blob paths in container - """ - blobs = self.container_client.list_blobs(name_starts_with=starts_with) - return [blob.name for blob in blobs] - - def exists(self, blobpath: str, delimeter: str = "/") -> bool: - """Returns whether there exists a blob named `blobpath`, or if there - exists a virtual directory given path delimeter `delimeter` - - e.g: - Given blob store with blobs - foobar/baz.txt - foobar/baz.txt - - self.exists("foobar") -> True - self.exists("foobar/baz.txt") -> True - self.exists("foobar/quux.txt") -> False - self.exists("foo") -> False - - - :param str blobpath: prefix matched against blob names - :param str delimiter: The path delimeter (defaults to /) - :return bool: True if file or virtual directory exists, False otherwise - """ - if self.container_client.get_blob_client(blobpath).exists(): - return True - - ensure_delimeter = delimeter if not blobpath.endswith(delimeter) else "" - - # Virtual directory only exists if there is atleast one blob with it - result = next( - self.container_client.walk_blobs(name_starts_with=blobpath + ensure_delimeter, delimiter=delimeter), None - ) - return result is not None diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_gen2_storage_helper.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_gen2_storage_helper.py index 0dfa60f6e676..b8a32b302513 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_gen2_storage_helper.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_gen2_storage_helper.py @@ -6,35 +6,23 @@ import logging import time import os -import warnings -from contextlib import suppress -from typing import Optional, Dict, Any, List +from typing import Dict, List from pathlib import PurePosixPath, Path -from multiprocessing import cpu_count -from attr import validate from colorama import Fore -from concurrent.futures import ThreadPoolExecutor, as_completed -from tqdm import tqdm, TqdmWarning -from platform import system import sys -from azure.ai.ml._utils._exception_utils import EmptyDirectoryError from azure.storage.filedatalake import DataLakeServiceClient from azure.core.exceptions import ResourceExistsError from azure.ai.ml._utils._asset_utils import ( generate_asset_id, - traverse_directory, + upload_directory, + upload_file, AssetNotChangedError, _build_metadata_dict, IgnoreFile, - FileUploadProgressBar, - get_directory_size, ) from azure.ai.ml._artifacts._constants import ( UPLOAD_CONFIRMATION, - EMPTY_DIRECTORY_ERROR, - PROCESSES_PER_CORE, - MAX_CONCURRENCY, ) from azure.ai.ml.constants import STORAGE_AUTH_MISMATCH_ERROR from azure.ai.ml._ml_exceptions import ErrorTarget, ErrorCategory, ValidationException, MlException @@ -94,9 +82,16 @@ def upload( self.check_blob_exists() if os.path.isdir(source): - self.upload_dir(source, asset_id, msg, show_progress, ignore_file=ignore_file) + upload_directory( + storage_client=self, + source=source, + dest=asset_id, + msg=msg, + show_progress=show_progress, + ignore_file=ignore_file, + ) else: - self.upload_file(source, msg, show_progress) + upload_file(storage_client=self, source=source, msg=msg, show_progress=show_progress) print(Fore.RESET + "\n", file=sys.stderr) # upload must be completed before we try to generate confirmation file @@ -116,91 +111,6 @@ def upload( return artifact_info - def upload_file( - self, - source: str, - msg: Optional[str] = None, - show_progress: Optional[bool] = None, - in_directory: bool = False, - callback: Any = None, - ) -> None: - """ - Upload a single file to a path inside the filesystem. - """ - validate_content = os.stat(source).st_size > 0 # don't do checksum for empty files - - if in_directory: - self.file_client = self.sub_directory_client.create_file(source.split("/")[-1]) - else: - self.file_client = self.directory_client.create_file(source.split("/")[-1]) - - with open(source, "rb") as data: - if show_progress and not in_directory: - file_size, _ = get_directory_size(source) - file_size_in_mb = file_size / 10**6 - if file_size_in_mb < 1: - msg += Fore.GREEN + " (< 1 MB)" - else: - msg += Fore.GREEN + f" ({round(file_size_in_mb, 2)} MBs)" - cntx_manager = FileUploadProgressBar(msg=msg) - else: - cntx_manager = suppress() - - with cntx_manager as c: - callback = c.update_to if (show_progress and not in_directory) else None - self.file_client.upload_data( - data=data.read(), - overwrite=True, - validate_content=validate_content, - raw_response_hook=callback, - max_concurrency=MAX_CONCURRENCY, - ) - - self.uploaded_file_count += 1 - - def upload_dir(self, source: str, dest: str, msg: str, show_progress: bool, ignore_file: IgnoreFile) -> None: - """ - Upload a directory to a path inside the filesystem. - """ - source_path = Path(source).resolve() - prefix = "" if dest == "" else dest + "/" - prefix += os.path.basename(source_path) + "/" - self.sub_directory_client = self.directory_client.create_sub_directory(prefix.strip("/").split("/")[-1]) - - # get all paths in directory and each file's size - upload_paths = [] - size_dict = {} - total_size = 0 - for root, _, files in os.walk(source_path): - upload_paths += list(traverse_directory(root, files, source_path, prefix, ignore_file=ignore_file)) - - for path, _ in upload_paths: - path_size = os.path.getsize(path) - size_dict[path] = path_size - total_size += path_size - - upload_paths = sorted(upload_paths) - if len(upload_paths) == 0: - raise EmptyDirectoryError(EMPTY_DIRECTORY_ERROR.format(source)) - - self.total_file_count = len(upload_paths) - - # submit paths to workers for upload - num_cores = int(cpu_count()) * PROCESSES_PER_CORE - with ThreadPoolExecutor(max_workers=num_cores) as ex: - futures_dict = { - ex.submit(self.upload_file, src, dest, in_directory=True, show_progress=show_progress): (src, dest) - for (src, dest) in upload_paths - } - if show_progress: - warnings.simplefilter("ignore", category=TqdmWarning) - msg += f" ({round(total_size/10**6, 2)} MBs)" - ascii = system() == "Windows" # Default unicode progress bar doesn't display well on Windows - with tqdm(total=total_size, desc=msg, ascii=ascii) as pbar: - for future in as_completed(futures_dict): - file_path_name = futures_dict[future][0] - pbar.update(size_dict.get(file_path_name) or 0) - def check_blob_exists(self) -> None: """ Throw error if file or directory already exists. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md new file mode 100644 index 000000000000..afb94db74f7b --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md @@ -0,0 +1,33 @@ +# Artifact Storage + +## Overview + +The AzureML v2 artifacts module facilitates interaction with Azure datastores for artifact creation and retrieval. + +#### Supported Storage Account Types + +Azure Storage offers four different account types (Blob, Gen1, Gen2, and File), and AzureML v2 currently supports Blob and Gen2.** Each has its own unique design and architecture which adds value for multiple user groups, but also provides its own set of challenges and requirements when building storage infrastructure. During implementation of the classes and functionality, the goal is to share as much as possible across the account types for code clarity and cleanliness while still taking advantage of their differences where possible and efficient. + +[_gen2_storage_helper.py](_gen2_storage_helper.py) contains the client object and methods for uploading to and downloading from ADLS Gen2 storage accounts. This implementation heavily relies on the [ADLS Gen2 Storage SDK](https://docs.microsoft.com/python/api/azure-storage-file-datalake/azure.storage.filedatalake?view=azure-python). + +[_blob_storage_helper.py](_blob_storage_helper.py) contains the client object and methods for uploading to and downloading from Azure Blob storage accounts. This implementation heavily relies on the [Blob Storage SDK](https://docs.microsoft.com/python/api/azure-storage-blob/azure.storage.blob?view=azure-python). + +**This folder includes an implementation of support for Azure File Storage; however, Azure File datastores are not yet supported for AzureML v2 due to Management Front End (MFE) restrictions. + +#### What are artifacts? + +Artifacts are the datastore representations of the files and folders that Assets are associated with. There can be a many-to-one relationship between assets and artifacts (e.g. asset _experiment-4-dataset:1_ and asset _experiment-1-dataset:1_ can both point to the same file or folder in storage). Artifacts are idempotent and thus are never overwritten or altered via AzureML once uploaded. + +#### Upload Process +![](upload_process_flowchart.png) + +Datastore upload functionality is triggered by calling an **Asset** or **Job** object's `create_or_update` method which then calls `_check_and_upload_path` in [_artifact_utilities.py](_artifact_utilities.py) to do basic checks to see if the path the user provided is a local path or if it is a reference to a remote object (e.g. a storage uri) + +If the path is determined to be a local path, `_upload_to_datastore` in [_artifact_utilities.py](_artifact_utilities.py) is called which 1) checks for a .amlignore or .gitignore file at the path and creates a filter for any excluded files, 2) creates a hash for the path and its contents, and 3) determines the datastore name and finally sends it off to `upload_artifact` in [_artifact_utilities.py](_artifact_utilities.py) which initializes a storage client in [_storage_utils.py](../_utils/_storage_utils.py) corresponding to the datastore type, either **Gen2StorageClient** or **BlobStorageClient**. + +The hash created in `_upload_to_datastore` will be used as a the name of the directory in the v2-specific LocalUpload/ directory inside the datastore where the file(s) will be stored. The storage client checks the hash against all existing directory names to see if the content has already been uploaded. If it has, the client returns the artifact's path in the blob storage along with the name and version of the asset is was last registered to, and the `create_or_update` method concludes its client-side work and continues onto contacting Management Front End for the service call. If the file or folder does not exist, it is uploaded, confirmation metadata is set, and then the asset path is returned to the `create_or_update` method. + + +### Download Process + +Download functionality is currently limited to **Job** and **Model** objects and does not require any of the pre-process steps that uploading does. It simply takes in the path, finds it in the storage account, and downloads it to the user's local machine. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/upload_process_flowchart.png b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/upload_process_flowchart.png new file mode 100644 index 0000000000000000000000000000000000000000..a4d0ea3aa7635c957adc4725c557c6bc89627160 GIT binary patch literal 98532 zcmeEvc|6qJ-}hKj_GmG(mPBP4+1F9B7ip875VDMYA45X26_sQwid0BsXJoDH`%cKd zXPL2#dCrL6uj|*P-}BtB=eh6uxnKU7%XPlXIp?#!KkswSx4=tkiqsTGC?F6BwX)Lr z%Mi#uLkNU4cpn*PsSUS?1pgs&xU6^%l2*q&4uQ~iILhB}w06B^VPy*86qK`eRkSy< zHNS{3F}31612b{u6gEVJF~Y{i)Y$PaHyYX7BV7J^+bx9EUwgGRGTZL)XNQD_E5=4v zzqe{ym^hmM>_AL-Yj}jal}s(n%(r_MfNfo|Hrl>>bS!W(XT2EBSx6p0h_d*_fCTOvkcg1D@OjYwbCiUkZ9Z`3Z=|u+eTcb@;xByrEs|J7@Tm2&Eb7$nl&z=Lf{6{t8Y-;al zx)lR<(#`L(+si{L}vg&TO@r|KfWhTW$Z>g)hA8BwJDZ-#W==aQ|Jg z|2a;wHOOC_#K73x)Y#I%$i@VCi50@g#K4vimNzrfFCp&VILBW->A#h$2yWT@ma7Pe z|BbI~`^A5XuZZsY$`-)+x4t4m5Y>N`uWSwSS6|uk5J!Xo$R|b)jtKkTlgj_Uo^s~D zovVoNy2_>toPqsYR}uTKbd{|^{^BZIo?>M0XmQI36dnI_^2wQh!Cq(nE}{IVg0D^0 zpMm`%eK{)&Q1bBoRKout*$bW70-9UN{QsrwK@sxL%5$?K`K5dpl<(`mtqFm!LzK^- zz3Qqz)$5jXs5Mq}j`!4^nmgX?ckbP}f9{~^rDg?VChMvhHLh5ZvbSSPg?G)Q(-Vxj z#*ZpnCIlW4{K#=?e+4<$!9Ap;QF}iPXstc%Zij&nLba$%>$x!VZ^kBZCeq5fT8Bo!z@}@5Su%e;?t- zJU}LSh9LZicKjgd>p49!G4!!{#4+-}&uX$Kz|9vthuL_%H8xmN9uUSb!O!lkfoIh@ zy2HTsm%LtKHK#Uww-!w+WbN61?~eUwrQXfFSQN4*=@;$Cm3z<g(UKK+cf^Y`!$S$DG2vNzz^d6W@oY|_mIL=IuFZl_Wb)Oea?aF z6vNZF@5IoeSs#i`LEuSAVXNt**LO|dv_B}hMT z{ovQ`wJ&>M?)@Z(2(7Ys@_mMj2KA{=|3#JCiG818izhAkDg5+AkszU02ltn7o9Y~t z-$7gyhrsnmIhY7a!^Psgr#9uhabSPR7&=Zz;P-iNwrpw#{J8n@h7pFLauP`VYc}`& z9maI<6u8cfnB@W*LDF8`oGp$Em>Ehs=jPv=88AYP1!=+$nGP$V(?%nu(1v%nsrY-X%Yv&T9)ut2Y$?JpxLou=}Sa}p)AIOHvCf0{T-|yT&=|reweJ>2*bFf0=(gJw!Fp;V|or=8+^{q z1D*#_nDuzRiS5SVplxF^@6b;04LIX0xaA)n`3RWvE32S{ox^jJU+b~DItD0mlQ?Lw?wh_zdT z{1zAp@O-xh*{wl#i}c+h{Wn@e_O~K^F}&IlH!+QOlzHV*B>u0i`h)rn>W7fYn&wcgBl=^e^Qw&wc z9-dkGln=9cbW?kf$o^yL*P7e_f~sMWN4NJ)fYDxet;;@KKmB_ zn2y^L!r?_f{An$o+QWs#>V}$_M)<~0{c0wDg#bA~29AIM;#EGx&Q<`C)uIFB%)`wS(YQ4ExZ-x0SJc4Cd3!B{`I za2q3up^QY-Y$w>gjjrq;(+82?<86NINpFpuP;J`Z^#4=PV?2N+>wMj02F8_-e8r=` zNrGKfCa{xTPX)MDA;we>^zQ7Sl}8sqCnnTi3Ydw43*~FMyp3r8lRJ*_qBLZdc06W< zG}d$-{TOT1A6GVjZB#$L2@m8E9Cy%xS7f*rx*{4EIrLSA6LOtnOu{p_Q8%2so zMtKWe)v#?jntp;+@vP3s_waZ4=1HT`?69)Ycx2yG%K`b!vLDxiO7j+y{F>F6R~4LLS0Q?A3u&=;*JrPkG{pMrNZ@1nX#mB4ZiS za&Kuob*_775x44VoDwB?{(y5OePD2@ulT zz_c|o{j>R!4TzzNwDJ2U@pJC3L&Gd`cgUu@nAooJ>)3ha&LH?{%`TebZP9v=Aq0No z&D`Ls3n2#FmTIt;R)ZVs=B}+-mKFL52H>W?bH)2ps~0lr>!LpzBkktap*L=nbGynd0UBbQ~;p8*kImV(;x0rXHC^!K~ju`4x6j{(GrWeQU-N^Z< zc_+z*0Lk5Msc}|K@qlm?k0147HU746h=gpOzhTIXb)+Vqb2S38 zsL`CjT;9Ef!!ZX2qXh?(FUxe~=d7Ic^>Hws7VXKGD&L!z$ig8`<>}6uCRlf6MbupA#f)yxn;b<|D@i%B{F;;U}G6%`Lu|B6N*Y|`j-EGY!%|hzCwF@D?NiAkB z9o=wRD-jr&njfe{7krox405e)kyy#-lG$Sc$d8wuW?CrUAoIuy;>Fa4lujk31+70>RRcxpYscINW4$n_hXO4Is@(Mib#19&L#4~t=tv=|i zUzJy$-RId5Y~|DF*%j>}hqJyYhH}>O`bX<4qS4VWfh3l=t*z7GKSM_!K@J1u;9n)@U-DL4C*hbWoB&3K(KXQE*2`}LXGbl%o(*_*vZAOp=+bVc(~ zpKj-~VUoHpTv@b%#amGbD8X#YJw|5O-cip_^QJ0N*;J_Co9fFzuO!+m&nh9%jJ)0g zPcZh+APOwA!^cn>U>!Kc(q<=ssRjV1{W53Hn~5VO9cYY_>s(bml^d+zS}dVguH31= zkVn@E7oOx%076&ISx+S<{EwpMMs0(fczVXCm{=|z|HsfiaZ zlk15^pKiXqqceKL9^kX;B28*H*?#P!&m+?VWwbIh9?ny=Txu~CIrwH_BQ*vD8k%eK zcDOWX^ezJFaZSu&J@Rm>VmPGoi@FRo8dpDBw(NotNmi!6de673md=-=`a0j(n|Aq_ zgBZ1``Vu;Lh|;;ZqL9g-ckNmgM4=6RFf6wqXO=b$XK(ye$nD%yX_Wg@wC$~XpL~6CXaP*p zMNJMH4R-}eERVE&sV_~0_wUaxT59E@vB|Y|yOEKO^_HSh?}b2WOMr4O|Cn=X%(3gw$2({l|q| zn&RW7>`qCov(?kC+*L}h&(12S93ZJGBY}HKFn6KetqeJT3J38$(%({Uol2oIRG$nM zPRfzc?SoWg@(?Vh=3t%g3yXJ(%9(pxUsB#rGJjE$4Yk-BeJ0%BpwG+N%ykhhDx@Ra z#ijINS@6rIw5mMB-?}8v6GLO~uE*?9aJ3M29C%5yd`OT0g(90YBkTHnvz!c@Z58Hv zfP7@lm(=KKVyH`R&I#T3CeBrsSu!`m;%W2NiiHQ(D@L7}C9!uC1Jv*C-I$KD1^BiS zp5q}KV_r(9ruL&jMD5{sYG^8Lyjfi+f-#q`G5Xk`aKDFxi!0WwURZO#oL~mqffLU7 z%0Q`igUoER;ytdmY#5~{#aWc}yape+dt9p?%c)pX8y;MNz~5@{lR%Yy>5|pFjV_r zqC{>6-Jx=Q{T}Rk@QZH0IPL^26R$eu*}t!0)z*Gp%S>oPqBvUJBTFVLk9 zkeQ9_43{`xLSgVy&Q$P*J^uq&H>i*J_G(sb&wIy0P&$neI4@bF_wEJ}hOuNbz zF5Lp9Reoo2tpg2<*x|n34{ulc<%_6aMkhGibho<-DpOr5=ctui@tMq8&B`QT7g3>} z`TBPM`2%%RNj(D6$(lsLS%^L3R}q$TTo=7x3&WncKsctil$|#g4P3+AXkjPU_yKm} zJWA)vyMDDEQu%4_8?IgiZfQrT#ZZsE>ZVTSb#fxb#IdERv^5T=Pu+d+{mKaecH8w1 zt`JcXW^vC1y4;SBxS`5qhMT#@FFEQV3X>!=g*;7OUyp}Pj;<)&x9Pjl;9jJL`(hko zF*NXUTbjODRD@H#4)kNqfr4=^nC^5dA#N&E!W=q=4wb~qa0W1X{J6(L}8&c z@F<=}eMYgoW|EEL@$4x>a@|rGgGrJNXWG#{Fn_Ob&Y5PZbJTWmYD7V5@YxYERUOMl zjD1tT9ifaFH+bSe3i~j|sJcVh_=d23sJ7Ti?;}K^{i>YW1z;s}-}FggYF3$d0i+X!5r>+T(7d6@-PPma&-sr0am9|rEWYC7*F|b|j{mM>2X$1hKuk8`Kr{5Su zjAzs22Da=x7)g5&qQLb> zqh@I}f1#+!bfZb<{XIsF6`5t?naTcv5tRYWYL=)1F68H!J509TP(SF)CjeROOg1RCF!_-cI1tD7j5X3<3#fSYnrt5PCj zDEX%twH*|7o3Izm_e5y+T@X_T4FchFi2A!EYoS7sURzOhJU@Cqy$OY>4jC81F6BPp(%F;NNwwR|PBGMgGr@m!S$jVSRkyHM8{k(n&pQ4oS{5!X|xKhY;srP()}_|#B{a_$&A(h~y z?dM!G(JuWtCt}dpp7W}!1U^VKl0d_Z~Qmu*7>Q5=gE&#&wZh%+5 zOfZuuUrn4S?}d1Aab2(0QkccW-H|zlI9EX_Ocie}J`ONRwiwzr|CHwXnGyOB2kn01 zh8aPqFPG{Mvf#EH`SQKPE*sU)Y&ZNe^V9g_uw6-*GEnJAUx<@{bq%)i1<$&c%(E;jH>4LcN1q|e|0=`Y(-!pyN|a8U zG4GVl(m*zf#+&|&~E79uhq8*4NE0advr083;GKwIhE zK!Z^ARof3s&+4T%v^y3TyK7Nu2ZB51zUvs_*z^%~jjJg^^>F;L^?6Nxe_noMgOu5q z;e}FOjDD*V?5K?4#*%s*H5=0Py|`i=QLyuZLB9Rjp%?49%>|U|p7!fkvC}ANHs`_| z{w0Zm&I8JI7RY8fPma>qejUN%%*__XZSPAXaoc&Jih4JN%_1Jd1q>-D2!V?cD z4jo|eg-_G(Jx*>|eyM)CzeuDUc1=`qa`wT)O8hKBgm$G|J$cP|?&Fa3>S%z$ZPI&W zp`ggo@2OTsRkbeZ&)iCXDXj?MNR6@EsXWgI<$3G0p626rVeiHnC!DCKB|w6=>=Yy0 z>KV$3QW39}2+~Oq;Mw1=PuJfjU|ZgmRxtb_?atmcXSUmg@*G|^rr&bE-(M)JbxF7K zKP|JixIidP2ynqF&S9V_Z=elTC=p{fGS0#>NS=B$42y_mg>ku&D8%p==iMBNKnPC; zend`ozs=t;sR26GIW`Y(c5jex_%Z&xB0R4MhkFIKsN-SIpu?`^tZ;1{wGT z4z4H@L+uQD028#IL?$*A>l|f(U&|brGk|j(s1GydFPOMvB$tCO$g$(ETH|G%m&n;D zYdz{8E5TBbJqx%j6WjxJ(vM&}?x4Qff@E4K#y*+#)(Y4TdRW?crz9Z^)&t&j=A^w% zvxFq}8a#1XFvJJ(7w?OX_Yjp@D$tCkUF1n?b2g`jq*GgQso5 za*}G<*N;Jp)JC56AV{yHMEnSxDl=32VR)|qs+2rW0oXZ@xyJF-J*h;&YVrN%nD>%0Ps7eWbfXMNOa3^5WsK}!A6<~U!##iCIB@uh z{q0?E6ZKF-Owu>^jor6A9HM=ls#R(^C?rHwhKe}@4XXENwx7Z~%POw}|6bJ!%YCZ$ zDbX489da%lcyx1Z@$FG~7t@8VrTpm0@&~mD{DL1~Ff|d8fx@AS=5Hfl6Yb z<42t*ldldw#d?cw2;uR?N|v%Ex($t-#~4Q0RIP}eE_av=HHg7bk7U#3UKVHyP+ zl@Dq;t49Nza&+7Fkx^1Jbtr+rn{e@mC}?~b;oM|#dQq!x!25! zU}=r`6((9|mszp>GF?u#9o?*hT#_~`Yj6Ffp>-VI0@}!VvnysXEF=8ST632PHTC@2 zwQuU7R94;#{4uRqA^Ay?ZtMc|)Cvfd{UR3xK)!;Z25Mk-O_8+j#)gLV0eb^Tj|`&Y z9cCU?It@jO4hn36Y--B=R4tjq6|_eX7^r7?4*Wuth(Iuti}q90z&qQr)an4K@(@I) z>*{=asp@17O?SK9RGve!pZG$usLbqscr_h-4TfFdwY#+5DQti~{%&=ZGi3770Mr6s zp>0+Z)8ksENJT^d*npcwJVol3DS6l$BCk;xQcbNWZmju@a7mK2(anCzjWnL)CbJr3 zl3kvaY@*UHk4IVehR^+=ob^9Yr6%IfNp5(4)TjP#d4>Qx9R^icuTX%ND0*T1eB+_J zG*+T2%!?jS&!Xjc$tHozB^K{yhn?f48EvB%+igDsR-S1&<#B#rwfwrwzav|RUGHnz zB&uj?Vjv1ub$e$$53&NXe?+d^hUqXI1E>%KOR|GxVb!O-AHFIU6OjR+Wx^a8UHVOX z$c)Wvfvx@{&T_4h=jPnN#3VmWq=C~}*RkZuvSR4~rexHL@{^B7Z9U8J0v4E&ho9O5 zh5JFpCc^4XD>LZZO+Xk{u0K+<5t7i z!A~v*7!--|Fbg&9VUpH&J1drjufC)qrQlIkbECgH9gRIZ6v8g>vTX)BzHrXMT?&Re zf=N4~T^a#aV>=FV?nFD?A)FNprW4 z$%p$~-KyT_*k*Ne^tOn(>Uvt9_k3^!j%*meM=Q&qcZN&XrP?$kM=xtl6}z>ZE;V>H zQTkpef;krx(C&A#nJb%x4NkHNv zsz;_1{XIWSr2zbW7cRYcxypmLuWX*RfWYo`@Qe6cBc^Q@^P9Q6Du3vQBBIcBRTAZ-_eT8(2#uy`i>KP!OgA2TYVX6txx5q%g8J)M7*6d((xz z14W|F0Z29$8?PTnWvF5aYaV3{oRPJZW*3=Po&B$)!P*(g0vp`@3}J=#9+5wG#qS0} z>L&NiA1&lY%WlnOA0AVd-l-TBkx2~yevh^Y-sJ`>TpnCAsw(g(`-H1{W)y~H43El8 zz8l0B&2X4MmT7QQnT&0|mPnvPz_!LfNU&!k)>6^wK5c@i^pbPOa$Ver#2^-{{I~#( z&Yp*PzT{|gH=KR=e)!e&0M2Du=(hAqyZZBTr>gx`dl+hIRMVy4HP0l4W1O{xy{=Vw z*=#M0hlt*%^1Od6&0@7i*=QhKJVagMxYm_JU^d7wTI^7=q<-gI-~O;%{779q9Ut1= zXejeI2}1Y_SieDh3{<4TfP`Ey&@1X9>ajmPZ!V!TWNosLvhGV(GTF?>+%(!g95=JL zV*(xa9x`+w6sntRva()@bC~#U7E*uQiK*n?QrK`2bNa{WROx{Vvwp_qz=Vf=w*;}p zX_G}U-(6;vMkx%xzc8oSiAv!0xh>WD?ukXpo9P^gLdx7BId9pc#Hla*T_0k}f_)$g zM;U}q`rHLz@PDV&1Gcewm}T(=K;3L2MR!yWm%KJ?ezeocsxyG5G$G|#vnA}bP(qUK zI}UP+(y5VkeZcrGp6eCf2{<8uYr0PwU|aXVD7c&f_OHAl3o#D?_1C$IVT~zpFnZpO7KilP&e`Mv++K z(+nB6Unm0R2vz_^6B5b^=Fj)~GS1T`t zzgGgp=M=D)s~MK$w-1*nc@JOR9MgjcFhaElNteDE>u>J`IJyI5IYdzQrOc*Br@S93 z)Ixtge^C~~5got75kuMM_rWx-O=;hGwg*OUDE@eJHs6nfZBqwcTI^(Het`Tr)$&g0 z6@}qT4Z)uhFaf9YK986Q?Nq2HfN2~mc=G(#FY#3z=wL2R$*S5_&#roQz2`p~=yttl*L!xoXV-gn zp}{V4+eL1q`*wjf`*sbq3k`Om!7en|g$BFO;C})%0IRb?4$v*m&g8(>hrv!Wz1Wef zS|nyAdsHmnNdeBmZBtF3$>$5^KR-(OKnpx0 z_=0>Verp`Cs4D#lR(mK6O%?>7{p7@s0!}O-L42oY^1w4+d)Zp)*c+;dQ;AN|+OD|@ z-jMjmKz{o}_Gd50_RzC8qyrKT*X1_3(^rnbE{4}X3AChJ;Im=p0JX^9W%Y1}%?$(= zbZeo4*B9cmN?=FRldqrOfgJ@ix?}*Dw$Y z@DLGSBT$Z)b6Qk(etE+zVRpqN@8TgoO@xiR*}@bYJ204p8NtBuz_KK8T2xkLz`O60_!*Lc?mPV3}zfIqM-E$8IpDZqPXKQ zxPvw8i#+=`Q{ilE!9T^j5d1@xI=~T8M3BI z?x25O{OvheCL$uLZo+v=PQt3Rh4G`EKAc9F{PPfRh`%-1VdxW15Om>zb9I{45iO2i`BV z0JvbkLPQRYB2&otO{u@+jD!1>N*Z{;9S>5xNrNwtB;NSt;wJw272;5k9yc^6z^u<# zWr-D%d^G+Dv-W_|3K3wI$tiXsj^+~XKf)|xFxqDXn04mR1Mi0xh6n!$vwXZsscH!@ zD~dv?s?*8_X7NDQrZ>$Z!vh+(tq`~O}~}>|HRUFt$R0)6L7cB z|HwGLYu&rny=&cnEvmQC;Vvlr*XXc+7lZ$1D!V1mZ^htls{Fk&-3=zY!Q{VlLI1DX z*)8ICF5*i_NjIwcQKK;!XSZ2OkJ*hGJSGG`iMG-Ee6;eh_k0KnTV>ka--|;}=Nq?M z;@kVzSCEt^gRFlK3%|5L`2ZV62{ss&+$G&uo<%RcWjPsJRLwVz*Hb#C&`TO@GguMI zGLD`NZ^EH4q6YYmyKipi{E5}l=4U5LT)GGWIV*x5+l?R9VN;cC;y1=iUhdKsrZav) z4n+nsR7)&%b0XJgH&!Rn&#vlC@AM^G0xXj42DDyQ(Vo|EBH{yw?rFg;tFTS8zEnA; z&`KIyS{{UC=L_(_T42muN*?`GD}e#c~k2 zLPy#T$C5U)uOnOa!%fT@NU>Odr_tlRQ6Myt9;R?YzYxVv=so|By?;Xz2^yM{1HnCg zU-F#Lzs{q30^CH2CWG>rQ}}uG+GKi_*(IJoNp|0bOR~h3q;}JUm3ciMqfEoaQ%fgq zTA|D^n(p{)4C^dwF0Qv1-@T=<5A^xB(A+*IhknbIzwjbMfZ!CMkzWIIE7|9B2E217 zA>v+Rq>08NLXNgT^~{>z6XoBm@}G|T{3u`=sn1GMfp>nbyCZPoWTrtdd6UgAHbxNJ zFFk^VV!8+38VX+|z+|DmvaMRM+bm5}3?H#pI~^z^yzKcKK&igD*~SG7g4)Ob)+gAS zd4WHs}`ZQ)R{$(_Oc?9C)N2yeDBR7^8 z7{hyZAM*M;M$18a3?&QSVy@}D|i^5#!Nt31t4WLy}x z48BI<(?o0>x2J51;ni^J=07gC!*hn~fNHKp+ztcRxjA8NTCf@~jjuf_JKVmh+!LIj zo68@T!B>TR=!uP=5xU`gm%7upX$k5v3>Wo%M0^D-ju=zid3ld?UzXzYmd%-{`(C>8hnb`RXBH5CarqK4Y1c8>O@bX~y*m1u z>#yeiYYZs43f$%C8{3m$AC2{XsKn+}8c16n?yM9;3LviwMhyp}AU>T8P#EEnTkyr( ze=uX%D_~wpLbmrWdY4#H9;_qyaVJfF*B=b*!bjkfDKzn=2iRfN#K_HX@x(XP=1&3x z+0J-E?llll&6|=!f{X>;8eQlAgTNT+KFbghKDDePd8=Go_#Y;6n0?L#wXifbtHp4M zH`?UXbNsLCHVqHFHx%U>XtvLKKGpD<+M7x3U1wf>g?FnrS!E)U=bSIzi5w=vg=N6H z$hUN0cze$xJW0fnVmm*6J_i<@4jWzCw>?boHF`4Hk0B?3l!a2Jw^yxv)=3xPKY{}3 zw`G>33<8jlysFlAl;S-IgLIWX=fC-w(~##_vD~)(p4s1k|IEJc{&wVldebLGwImmt z=%FrEMbtxE;}*tzwZ%Mg+k7oR%$&zMhe@{%m_ zG1LW}@<-ba`5*@vQ{hme9BSM3tM!_(|g1ZFBUA1zOGe#|d5^JFs>KO-@2?D{EiNVHiX^ zx`F04Se1P3W47(YB?l=$hnv3ivxf&6V5GsmiRRlQ5}^PiHEvl*$s`T-y!qnzcKZon z4$m$*Q*E1@2S~43Ty3|vH^Dun03*FZmlVbOk~BDqI`#hcP$=gpi_1itM20e-AV=2 zsk_zR{@pqi@MycW|1L|h%lzzeRK5{c8v8tPnDM>%(I<;vRnuMWf5a4fB=K)lR9LLs z=M+CASn#0-wH7g+19vk|(=~f1BZ8cAFf-Lu+oS!oB;9LK)*T$(7!fI+`YPDe$?#6Q zqs~hB(sTUkfzw_amJCUW>SB+y$%)!$>6L;C@Au!2B{76JK4dbjrsXFBRI8!N8+=oe zX0vdAp|CY-!5Wvjr@RK&NZgQQeqcT}!SLiMp9_F@JVSo>=JsOBt@D6}4kop4YK3`c zz>H{H508lO>L{0(pSVm9n-DMXYF~Jpxzxs+p1gPFMtmLq^B0SF+%j|SY4jnk4-r^_1vt~CusCym*0a;La z^SvCth!MdL&7vLZ@T<{qG=G3L*vWB};wwxAOrHD20IGgvgPZjNe64)MvHBuyGB1DO zvziZ@(rw~ZqPH_wR;m}+1`h9Tx|_wQqX8=c`$c{ARDs_2{X_5hPIwXla_F@^{RaJzvC^I-x3`G( zGB{uNM%|41t4B|g9(jFy8VChTLwTGZlc&IQ6%wOs9LQkzjF41^P(*jtJ}`jQ)Na-j zoX_zBC$-kLR}`|Y0F#j%($8xXr!~BqcEOhks%yUAgO0}3HjB}b9J&;vhn`%y)~ijd za4lnSiP}4~%C2kqM|`H?dVQQdzsfOx(qIcq@oAoP(qMNxpgkqFM13pc$(W(;Wq~`{ z?P#yb>;PrLDuPOz*S1AX9|Vg5b-9{pPIV;E$ETaWk&!VRDc8R%$H;6=ruR<;J@MAK z_RbYa@_fiHCc9{{_D-YDy21egAR?V=3o|!P!aaO|IQ{zzDjub5&Ac)^9Nc$XDE!`0BI6@EYHMG4h!XW{9E#yhRj(PkJ0wQ2*~sAM(@W|7`O`+1 ziC5Vo!ao{puOmo;MV&0cna7r~3lN3juR^&#pZv%&7zQqpDMp`-^wy~FTq=k&Xieob zyn5)gK2WPGbFv-3+^tVzsdeW!Cuv-5VGu=_q}%_&BpJijn9^<3)n)QeO3%+#W(`-g zH|ZeOi?0kxWBNE)JUQ4uaSPx?HQj{CRb@v$M=i(D~Y=e3J(DyLxK3)rCXfPQ_!ocQWADUMj= zr#E$|SrGHA`*L}u9ow&rcP>RYPXa*!4}Iv|Z=91g^$jO64_}H&RLmYVYMsi?uKD6z0U)Re)+BEsOyyjO9Orc$%B7>z`2WrM7;wdL9V>6rA%8I<$P zv;CtD;ihXzLu&dD>uw+~XgkksywdIwqlXP@Lj{miFQ^HDccklf&NQ<_snPhO%&-g9 zPi76?#L}F?>L3bZ~FcxNf2N@)}qA4O^XQ z{bbf6eBR>X0sf@d20V7sQZp~hnxfcb_*m?*O?Bvye6L7~Qqt>3oYSh7hEw{B9IztR3bjHj-j8Ry!O&)2UikOe|R=UM-?PcyBl@)EFFK-Rn*51x3U;rb(#tc zpvGB`r&q($iZH!`$a#Lg8QlW~)$jZ7=gvsuYpN}Jk@a;!PFcOav^wExG7F#87%{!5 zbn^2S^Uf9!$N4D-u^(WElY5ZZ!}C<+Q1Prg^G{yjB|K^Z174lpaCGW2uJ2W0dHr?y zVlUgZJhRhpOxBCbWd^-?XLRzBaWlB{IjD~lkII^5)~2<-S~RE(t=F<#e_&ArcORXG zzhv5&PE%w4%45g2m{A`q?AUr@NGGXg=tlAvY-mAxcRzx8L}O%CXyylXzJe8Ud?|}> zl_?AjLr^8%HbC_(h5b-VK#9Ia(f5VP%vt5HjZ|F;W+)eH@TtbAbH(Y~tiMRfcxM-5 zX5AOCp%|T*KC>OW=P7yCEwI??%ejgq&nbPqhCWVP)Yh{phsop`^_8I$isVNHSf%uT zq%=3{doXbEQ@MLH&=$>NmlDT|yW^PMVq7mduC%hLRBIOEFTJ1o;8Z<19@*X-FCz8P z^!o{oNC!UM$JbHcUuKQ!7li4eUHqUg-R*mmK(?WM)t+4}Y~M}tWI95|t5uv}{EWFa zVB;9{YW{FiKr_xVE+(fwCY*op(`r$U%)AHd^Tiuge>J96i^r~uTa2Gwqt;7TBUaFb560XNSScIq;*~wl>-qhwpkj9O z2C=5JniZS%(o4bXNkRN@-f5STx#PKgLltFM102>z?Pm4!R~V^#L}hI2@j4RZ@)-v? zA;A(Tu6fMa(N)#x>{roVwzCbZ@@^9CkLqI|)4*?Ig^&zVN0Amfd7=y7wd2WjxSg$&~9>#7oUK=LUB&*+~jD%rd7Lh_T;q91sBe2MTZp z9~Y|-%lA(2a@$X@%}?g>VuqNOlXwL=&Oi?;fBZ6mlu68eP*NQKPFXvP}%cEg#)ZAE6{8CY=Ifs z=XXHnqPWrg;dYJEe%kamQp{ob6*_8~Uc+UvQKYMp38cZ*=`y0d$OU5($pm@*`K0pX z4Qx*x4{_oXsFXW4&P*cntfN^A^y5X$?7T?nAp;romE<$kvpMJpFR$97SkmBAs#4zz z#||VXIq+#;6Jt_%Od5Rl3qx|e%Z2=@`+^dgFrH=l%-210=`K%>yyHD(N%c)ZuTBcz z;dAZekwr1{6{7J<6j~C%-03FLfK(}%-pb=v5aZW^drpuodacg7xZFC;ompP?qkf|^ zpt%eq(;O9&Sg=^vrf~}%al`C|?|kovtDVJTL~>G7VK*L2n{hCHYew1YOWamE)_m>& z>}Gb5!N!=+*Y;BLWCrAq7RTj1RUt9MCwc}YmZIaOZsgbD?BIsuGUJ)};X7`E#}0lB z$yEU0e$}zNo{ceTuB}%l?c+mT7aY1r;1D@9fX(gF>O8vV;jyD{YUtZ5l%9%D_arK& zv3j{|jJS-a^~+p8#h7d!;G1?jLa9V-@t*19?$-HamN=OeKiX?*vdn=w`%Xh4dZddr zG+IUgA%ur<(#a7k+zK(Gs?eGu4OZ?UM6pTwN?a!|TrZk6e{K9L+RqOCBLyy=el>jJ zILcuyx{RlXE#x#&VhT2N;1N$z(1{_*FU2!r^!ra}7590+GssD~j7lq*exLEst_2<{ zIKH8Wm&Wa3gNqmC+Uc0J)t{}dKsjnlV44n$;PSp?yldi$IHU;nKm;HEK)kib!drF^ zK*ERKCmFT6R!b)FPouDmvzQ z2|_()K34T`T`Wyni}H6}J4(hlMD3%QU0B@wP5-#O?$Rhj5-ySk1=}c@o@_=SJk67* zIRb|o$|9Znyh2Sc=?xxrYk4I}jem}<&^a!OtCIGMop@S@e|@c5vU#=2ohi=!WYOx^ z=mqCPar2P(+$qnkh!v=KDtM0N;07PME!Cb}8C=rrSAnn2u$=5uo7GFOu1Hq6&g|6C zBZr3FUv9;Hzc{;^*jGHuwsov^wndO63x$BaYjHH+Vrp2I(-*7Aqk{ZrmW8vlf+X4$nw` ziX8F_(HTW1q+klU1zYFdTMlM$v36O6$&bn%H?ne@Z_SHBD#ZF;#I-HOPY&RER%>JH zO`RgmndY^k#oCPz$B6Z>qWTAOxRM^SJ?;?woW92Rb_Sv1qB1(eb#GPre%fh`k$_n@ ze5S@c<1;SS&W=2ra(Y443a59`=X2I(3Y@(7-BBcKsME(3H0t~Em-R7&M+yeUdHue~ zKm6{z>^F5@AKubN6eB+4rsorjygeRw9U)N@)|-DSqnGLxU( z4qmLx`QlKJf>p~;XNq5Svm0uo(OV33H@hM>)<7bGdo@FkOKz^0zSZ~cRs^2`ro(r& za-OjySD_r~z~eh3pIGlw5qUj%NKH#1J*p)YHT5oVLl1?2*^RzGgbAfRA3q8Hk?RW{ zQyDs|g9iFNE6mEXO)WRBM~vQN3Vnrd^ydcE*-!laTpXBwt(N+_(!+amWr*}B411oW zwflvO#qBZ+aDMts@^Yq2ldP^beRB8(KFN%QWtZ8@i;%1}4D?>X+)(mMrNj4VTxRL7 z@NKMjtvNrNC>rDS>6^aKdb<)c4gV1(*!(6!b81mmi1l)6(b{-j^gt2qJ2K8kj$uoE z9+DFAvAMI^TqBPqSS-esw{^pARoA}F7=jAAg9>(fz?*&N43aLKIEGU;7t{c_ole9?AZq*yDpw?;T zM>*ovu0+Opd?Amm=R3$-)^|Ki{krF3rr9fBnkaataye(8{|cx?V=%I3So;oB-*XIG zTg&NFZb!|Ay2T3zx>tB>#*pAfFVsAso8}C?mX#$BJV03uP_$YjvTAV#TxAUgbr zPz8Dl)R?LVMR^OPtRyUNc^$Wv`(UL(JMI(bH!GKMF z$WOtFbhcBYKNyqhO(uP1z&sIfUMScbA5G~{=s1_cpUM}bB@{lvrc)q*TU}j2&W+W3 zj?~LY*)k{I#wEfoF3pmK$d}{RlhKZSh4teKxSXONw?-YF74drt2|sE5p4aijcpR0* z*)*}er`=ouRP};&2jD7>Ux$ozF8KB94O(|U?LyxtkmW$zL%7{ht9@=kU7MnezNWt>1LsgF5E6z@1)cs%%Qpw=~dSVEB=Cbqkol1V>G(kiMx zru?gX^D2!>+Wn;=cc$s&f^y-ZH|csEQR1d8^qKkPHK&Wn4>5QSK62bpRO_;6hEm}= zg)w!i>*HQzT*cy2=8TI;x-;a^GMRwZRLjPe&-0AoO0T(}&7Yu8p2-`Y7k>lg4ms|p zfj*gW_P8!?5k3>uemx!gW8z@HFP}ERgmi*hy7ns;a%M*3c+~9C{12 zI?a>#6|FiAe!<1Q$o@H8!N@ICQ27sjPh7&m_NNK2ZsVe-`VWQ3pYK08A$a>%>ubvh zW37v^LDmwXpv*}~P1`PWzJQ|xPYdlh6uBoJMaKFUzK-a?vuBMFjXfXd3_8_=e}1N~ zp=dRokB@n7(jezTxZ9CWjRzk!3fSqj7qi{;A>lvo-BzaR!*c;Bt0v!Z!p`T;$^!*7h;B{Z>&)67zx9jDf{&oh%Et9V^ko zHP&uuIk!_Sq_F{F2b`G5)qj`BRyRiP)i0%x-&cf!+YQ^8JE>KtpRe|yyycU&OE2GZ z^wco9XE471y~WLhg{2p5iQy;Albe@tI7mk*c`d}__CL{30@kHfp@H3AHhX)py9RsW z-6P`6cf6N=_@k=qhE=ZDH0q|dN@iNpUGzWuL-V;?odI#r2x`p<-9&7j_^`B zfmmP7h;-A_2gJr6^*Zn<+d|1-jc%Q2ms`MFM5PX=d_xWCSIX^f1GK8vL*k}>r2B{; zL7gGOH8Gl&+ew#HBQa%s5^1)GO6zPMok_MAnmtLftM6);o^>HUl8qy>?g?7xy@tRt z-*Pq`b=|k- zovzh9^K%IRB;>NjZP-;#h3132a|c340r-B>s|qFc@KMUW;^*V)(pP+zn0bz_ zRcEQFG?cYIPcus2kA@hrt3nvqYqp;ChT1Mw5Efx{iBdZgxc!+Frr}6X1 zoUVNr^uE#E+i~^lhh3IbQ*-k=6;AwzbzSUm@|?rMyX$xRtHAn|kAFyVC{pCg7|N+5 zD13w3B)}d{EuXGb4&Sse3Ba_z4aRV&e+w&raK1_v=bAjG!zTJxz!bLhIamK2aT`aX zOC`RTuj6$J(akzAKwn{JOa0vxvMLUfq^1B2sCCt>>pdqy+aFB-JYxhHuwX#T|2lEB z4dhuj%Xg*|&$#Wct(lGyw?L5aYGNs*4cBu-%DOBd1z4HflA{EDVhM~wKccW)_5R3U z1lKLjL&aqJSglrB0iQAEspiz8q>usg_7X(8vIWfZkf_TgbHndGKfA9~#bISUiP0Gwkefc>0BeXywBmV)Z!4xAT$8(`vm+*{Rl%XI zM26n7@8Jc7`42w_T+vMY)+1=+#xrIW#Vc9ZCouSF>ITL+HkwOxR>lP8)S7`%yR1>Y`VF437~h)^GS_GC0;%l)k}@y^vMrcwY!8^cb@diEw5GKi#GMKPk9=9 ze$iHn-tyZ_#LG@t*Pc7p23{@jh8~HwM4C#<&F4)mj9Z-ZE}!@Sz!xfH;t3Eo=LX3K-N(_5A~|Ya3zZFhA@Q9dpZ46u zvUu(x8r|09B9@3KRi7oS*OW1`kQI-RC;3=XciNkRm12-hG>|p^=8^irI})f;H~S)T zKV*!>Du;*EIk|vvNyOG6O5IKFbfquVqJmP}5^g+fy3&m1aWT4e{uagI2Jk_3!rz^^ zOX?*&BU=YQV7KDl;ffavIU@Rr%pG}OIe8{#5VhBL@^Dz4P}@@~ir-a11v`U|VQzC) zF1Y45;z+Yhj{BrAyfi=jCV!iYW;Pk;gHBUdpE7dsPB{^Mod zn-J(M7Ij-jl;8Lr#VAh>$6U5U_kNB}6Z^a{_a)N8=P9Y)z(;e!;pFYN`yK2dOr+VLvp`PsuO@9VqUrd>_$ttExB zUPxYWcThC%!}UAlxD4I)Pr3EOz_IHpGr{EnfZkGx5Lelnjvsn=yPX>e6TnJFIOqQ^ ziziRr*G|Oco7i%0JD4n>x6Tw6)VXv>`KH~#npgkMbkY;m6gLx#l%$;@i)*inAaXJ# zEBfr5(XvhxZ%*UxLL-wL%K|V!H!fLQ#ESLs1omw-_fBaMV~w)zkYcT`Uclk zD+N|24Y%NYa2T#pX`o9)3#>XL$3Xwe+0@7-O|R^R|t$;}`ff+*Yv z3*yG4TMOi(SJjeojscUn|9P2OlD8wpYqpIW9pR|Kdb31j@x|*2f1%rH3C=EH;i&0^ zDmR`!clRSK(iW3`NiP+qYb|L0-I3}J>FB~tV*on)zh?^__iF4(jsf9B$9;GAIv_)fVnG~oVZs`8VVq`DO_u{W89ys5*Unm zuj39^qa;oG=_$AOeSfL6-E+Kq9{s*NK`B&r)yQ)tQiA>92#|*04?m6m;`U>&vJ6es z@i`vnWyn95ud~%XXVhe}bg81YXmeO@X|9LXnMQBbGN ze(;<;ed?F*s1gpCcR#SlH$TMI*Mj32;G}rORVEHaPUlC2WD)3DByL%;!;-}=W^fI< zb1r`DxSO|)c!|~fZDNaSF{=^HvpH3f7S_IcRHngcOjDIHJ4<}!^Lm@hlPVN9N~vmA z0-=o0^2on)z9Yd46Nc-Dw0R^>@?Y4cHz8g7WXE>rczVmO>v2hLhQ)TZ#bU~BSrRgF zQ2h7jVGh_8?sM5qj-**G-zZd4zIx%>3#nE*j)@AD9N!AoUSo}mxhm3>9M!&p7A*2$8c&$R4p## zGT~j}`p=a67$4jaT2&itjvP+!kmqx_qN=!BFSkCPUozb9LlWi}iq$BSe!&jI_nK04 z<|rE{Ry{*4cLXzilqemfI#z*NAaJ`7+Bj683XIM@T)`Qrvkgc~888>VU?%vFTe)Lg zk7`57w<`Np?c4GJE3v|+ntR3-0@*KBbi6q-{>`;5G;R?S7Zjq-ZVELRaP5uv=gjCr zG!{X$2W<&#r^1f{)1eMCE%hZge3^ep!)$nS3&3;x69;gZ{ajQ?mYeEnvvryW zqq_it$?doAa$s>c;&`Oaf@udgt4C~+vz%CIj6J^fDJCgGvKViEt zCbs}oKkq!q33t-BIUM6YjikEDndJc0DX?AOg6Eun7DQhUB6#Ya-t+Zk-9)xR3tlWW z($8J7{L!O4_(zg^>`WgUi|S4;_^J52DPCb|cSKoft>9^i^+Dr!+v^yo>UHfW%`r`E2wp1(d%A@@jh2%}+Y?@leViol7{zGC!fg<-4T zYrS)*hKsuMLJHr9Aj))M;{mj~XRvG&JMe*|oVt&{q)bMAB&P441enZ(aI9~;i?`yN zkXg*8hH&UqVr=2wR%4~4tdh>v|(^; z++?AA{slFEY0AwDyXgPn9Ks1ojm9hU2uMv!kVb|JDX%M~Z|?#oSwm_R8|m!e8F9oY zK>v6)v2{LPLJ8i~@dT%Ax*8PJ0n#0!gquFx4fc=jr8IHHqZP}Qz3-gL+$WPX6qK&B zqkU$N=Z&>SdrH&ng1RjR>Y1%Z-Ek78ZQ4H7auhrsA_uuT5B>l}G~90bUOnWq03r+n z0Jx7u|I=4PAmb`u(UYJxY-(~RjG-%K%db6viecMQ^S<}{4ttKzyR^t5Uyto=UFb+O zqB=0(+u4rd=3@%Iw}9tY9QAo(0T!T(?awhUT7czVdbnXvhD}ib59ySVkdxJ| ze0HW3k2-l6X(PT8E8T=wJdh6XggpE3MZ1~C?;xhUF#Fv6jo9Pt6^n?R2XV>4CF=n2 zta6>R)Nq1E5+wQ&8Rw%k{^_E6i5&WfIB<+!lFycQGI>`78(?EA|7_8i-IJIdFj8{t zWTXDd)oEKp5LH`EGn`C)J>;B&>}0QkYD&$0-6lWvzdr>Q^`s6s+Y#k3-w)HD=W_`!Z4bZ9e{R)#PFA<6 zEua%~=QRDv)=q#v`)e7=H40koZ35yoZAjX~yyoT#iw&LCFk%@mIEYN444K~Q(1x*U zwAaKr8ljqG-|@2*IA(Vy3KoGB9_bf0VIw+jYb&8VF%rzTVJCFjr&8~7dp#mF>&nLA z2GWd@nrS-{X$QP3dm=CYvG>n%Z0KA@-3p2)Vr&o`Z{6FA!hOYTv+hl8M+&^?RY25% z-SZjhP8(k-r!OYU?!IDbaz6T=>7@Y@UY3M!RV7chE|D~Ala&~5YAp&4s@3P208xS< zn5V7!6it!Q&MSQFm_Hr@YF zgd2@i_&KsqIMZb=0}4LMbjtW8j^MH(g4kC!Wl= zwaZpOvWZNI_s>b=eM}srCq1J+6VYuVGR`XPdv-}S186c+!|i5V(*59!Q)aczEQ@rN zbIoT-&z}^F0NhfW{K=uTzUE0Rh#(KAy&~9*@q1QlxZ<8C*&^TN<4hH!a`awj6K0;> zN1W#IZoJpGhN2K(&Fu42JW@O!RSN71wh!yDkU7lr%z?U18ym#xya$mTWbfVv5ZTGI z*P1vN%ORJ^6%j9%4UG8=%5_$Uk3Jd1=2P1miaFIO$wFaw)oJO07r@@-tz*Kf=5C%H zrj9qIh+lM0h8lPE%i>?Nb<{<}e%g(sg3v~;m~$Nx;qx$is2r1L2p?0|jfRSnN8;5} zV$QfXfJh@Tz~3p1sF?0iK8mMJ;8j1p zz$#}0t4E{VSq^Tj*NQ9G>xbT-3_HV}&``y0i%!fho3Y`#B!gFiFAUmF`g}SL;t~Od z-fmMb6cZAYg^1SPrE=EKn&qQNq2zQ)fnoVFTO7e zJlJBd;}%sZfarIurn-7(%nq)2d}04|1U9l0nYc|EbgicbGJu5}xbJOI}XJ zx5ybwN9Sr_A7y_7DO?ZNMoa9K7u}p8FSb8bhoLJx66wxfd+GmBjvOsLS^4qGoS%v2 zn9`M7mFuBxT!bUf+|xZkYRemw0qkcLu0;(@hk9a|8*=4I=ck#GYgfKgK$cf*kV2Q( z4xtlAQXvR72Mw8>(-dvxb$;2Gcg~dYd0WnqhxO`ln3BA)Hj6GUEZ7=0zjmU=p@`Tf z^b}X;N7Fbt1?6_hPGb?E6K9nvHb=(RU))G=mzTN6|Dcnnc4q8gfiPA?fraD5D4K>O zR_oi2y?5L5v(5}Eq4}V1&naL*rd-D>d?_RwJ%NY zCh%YZr{8x^QIWzqe{^zFJTzn9>)cUCFZkjl3Orh;b|lQQ_}EpY=nL&~)e zZ;CiT?XsP$7S?)m1|#g(@#Ryb;o5zlntcz znt&~$?!^nN8UN670WPunkd-Y2EoyBv#|=Dc@R+w=k|~t%;!4*~cPM??AZWvx(WrN= zU9$a$UXMjwcmY&}6sjMZOQ5YEZ^7jW?V0!+p?ACbgZ#fzGpbs*Ws!F_E`AK*=+g6Q z>s;m*j)(NWh_~~MKEoyoeAfcKT7<%y+g5vF2e9>?g(nyL*Zof?44%1@5YU% z-eXzjYK#4Od^Dy;4ZJ_lFU-X1Jkx=>!yCY_UPSI_5`WQP|v z|M??1w!Wws%iMYfk;7qfD5#<3qH7lj#hBd~3S2Vgw^aKeIY8zx^4 zb_hvi#p69b43p|{x|HtI{MK*KrY0~DHb}+{i;19Z_2N&w`C%hhYBM|)2PZbm(4TCS zoaPL+{Sl9VUPP8yL+{k)N?-|cpF1k+6&aK%lH9N$=P{7eG~?Z4+r-SHQZ#%l+?-{Y zkj4<|=7LUB!xSbPyYW}eech^P=rdq2ijsOGuFB*UFd9OEI@+{9UR zjhA;6PFPl?AYlkryf*~3t~Z5Gl`}tIoblH8z#vF~q#C_bvkiRo7_c+&_#EIB5(>p- zQc-7V(#<+j?_1MMWv~J?S9mp=zo=BDOty}ZG`F8!xcVi(T+lS{qR(VotGaU3yrKJf z@k6N8wjXE423<@g_7H7~0-R?GV7SqPiGK5@Zv{=;n;np|cblu&ExAd`!8KPvR(8B& z%5d$pdBF*Qx8dDYv6wMEoqEKdo$KNehd)I~SbQ>NEk?=6>%N4g9F0@Odue<8-SUlcWZ2Gf%*J*Ak?tF{;QV%Ec zTjj02^T*3pMCP@pRt+oo53(5fw{Ra|!Hx+nu7uhT@XIbvE;U+4L%92@R_Za*EE8iC zuW0kz+{_;7cd<-Q1v+7BNnwXnD>)lpykXoY3f$l6Gs}+ksEW$jg~Z^pUK@e#ww-IkW#sVMSdA;7X?b-mc5brBOba)^mvOiMv9Y>()i7dAE#6$Qm22d7$ocEWr@aTTKjZ9a#z0Tanv!OuR<9-A3=rDI5>W`7zE=Tkok7kOQVx;W>CclfA(D z#%oJ_xK@qzIsUhgWkT5J%>i07ZKN(sUUQa0d_!6tM;xkjjPwn(d87&*<=vx>=YFW){s97T}=Wk_M(IA z;z?O>*r`>!Oiafz^0f8I()AgZ7MY84$YS*N;LC39HoF^p_;x7inz~u=0i4q1O2zMYP z`YP&amh^PqtND|TKT>&yq#Y*1G!MO`|K8aueFjP0duL1nyT>R%h^tSRa?PPHp)9qx z#;W82t-CybT@muyDNwdwbh>Zxr?jj{7iAKiJFC8K_kE8*<)Dl==7My~1F=S4GIwwA z<=!z^O*YTs2z6gWR7>nT;u_pHH zu{Ng@Zg_>!z~mJ7JsT>Tdly#+=5*Fu#-L|i1q5rz%zMAuD-ng z>88b_53*P9h`3x{N^Oqa)8rZU3G7JfKPhF1oq3?LNAs$}4SC*+Cn~WWO(|i9mUc1? z&Taiuag{g&uZ6YdHEB#lWv}-n<_&(hY?7Qk9TFddlBw;X< zlFBi~lgcn&e&0}mLCx7dOx4eMsszUz0C0e}^UCSdjH2))Ym%4Q zlFng=D?*i{b~dUB+W9!IG~#wqc+Llp!JPPLIEw^4$4#I3&&!pNg7wORr38-z-@K*a z1qq#;)rMnd)IUu_GhE9P7ur?~hkZ<>2&FZ}Ug#ePi2BG{<5cz_)>fg%1LytLTjf5k^8W3cGcp}PsU-dRFPD*8!tfmR z{u#_@Ru$B$UtgFwkaI|%ShYYfmQVG1e20CX0Yy6~I~qem2Bkq?-hsr=Uo)OX zH2Dm}&fR@*xSHd>CS2tJ!v$HA#qrNUhv?{oPegtB5;(me1h4$;M&Wkxds1TRc2#O+ zZv4^^^9qxh-~-r2b-Lv1#${7$glKyeb-Bf%;#MT$#S~(+%cb40O=Tx%DXKUJWAbVl zH5B>yK|fDc^AzqFMx8>y_p16R4H8(UdoP_M;na(74sT1zS+7|^5A$3(-zlSw$Jgpl#lM1^9_<&^8p~Dz0%j_AromKE@QCyIlDWhZ4UVLh~z0X-0+}F+wo`7s`*Bf|b z9MWMl`^vCWL1XlhYCXxb-ec`H9YVdR=!LTJQS6U#Ig@xG6i3J|*A8?;YC$Wk`^S^c zg>4iM7#wwXx~k(xF$S5qfFQPGo3+Z%`0qOxnjyD{wd}AJzQWJ7+*nh>W%Jel0c3c| zj4*~WDriW)UA}DuLav|ftRl|KV6Zz74h7=fE29C)`_%Xn^De|M4(QrXU&qH9C6Z9O zS>#DFdFNd0FwrY|ZjpnWUHaai`#K8cQ*?vf2tNOuh+DnT>62CaEsZA@ejOV*oWo-q z#nQG@!YqW@#a~;(#2Er7;{4CR+~DF>9XSha6iN1k2>4po`W_}TBhUV|@Fo|S&5-vz zGF(paTxhPzn%7jE>1BRbQ`ZhI;nt-Kr+o9ANWy2^S5^`Rd4_*NMl~Ba->OM1_hDz8 z)o&qn3$Y~hJ~+?XXgGRucHuT2wKo095fZ;(%Lh-p5J!Hio7&kbWzRCTfPk1s3RmYX zbxzGbPhBaJUZ4A-Zd2vfR@FMtS>?>LM93m%x*^bz*}Ztx$A;%jqr0>Eh0P#69yxE< zxjKDBd+;^|2vqs33v5S-3_0Lik-DVw)N>enxyE>rE_t2U*H-qk7K_81>Y<#(dch`Mn zHQ0#QiA1%t3{zHTdZ)71%aQYV`1gPC4sDH9lNVsn2_toI^3bpgGCWikGy6AG1%s zbkCn{6d`qHTJ<#-zT`l71Aa=$jr8629oN$UPcs!f?Vgt;?Aty}IObfBQ?z^l-^^k7 zdUAw(GPD2`BHoN?WPyoa*l~Xu1cj@=GRT^794X9eVlunf^E08-m2QA7^Kmp~4$U5& z%7wP%q5PcC6?9f&;ArX!l`AG`u5-Vor$#>u`9{del?kz8ze1sAVc>`?(^5Y~=g?(#DDB%))u%={ec??O@SxCa1 zN_V<^HA#_C^&;a26F`CPtDsRv@$3BHe*vdJqqmR(Id|!Umpt#>QP?N1Jaf~l2+-2I zEN1f%b}=<|6OQ-0!muE>botmb+Lk2{EUaJStHzckJ{-#_v;C2wh8Us;!A~gZx`UD; z4@56&aZkRt-wKbOFg&TGVrt0cb6uK{XN*7&>I;fd)+_+wV(LB02ZFmks#KGauQGW^ zk>|DSPojg`POf|yYxUP_+;01KzB4z}RTu?qxIccL(xTrSoNC%lKAVQKsQ1KvPV2Cd z^UxVs8D~Fyp&Fi%;MLBL?r3UlJAW5-bqIz#wCu^SJ!y9-C160(l~&lc@!YlcD=H2O23)-V zhB7H=eX}#?0W3nxOU#nVY<;MkG-c5flflZXkpPry$usE$jm!UlyS&57d%yT+pA0WE zVxiw@CX*Yr-f@^L3>5b@d;hK)k>wgu`rrdc^%hTa`raKHqSIol)>8?OT61W%u)aw8 zgj19(V!seYSKnrggb7=}EEAw-m|!ZUoXz1ApiPMzhxb=zEwbMHRWxUR8 zrv1H^QSCGRG=rnDry&@K-HQq{W_seurp@94wQP?l2Xbr#Xji*O2!iXrogiJH4*7=W z-P5S1zxPB{N z(@dKio!f_Cdh8ohn%aRSxBxu(Bii8i4^J35efOPl@uLTdx$S2~__9P*>cy;xsAftEncUO{=>8^*Asf{`q1wCe2d~ zNT=-uT1A7WKrxRdsdgXc_tmtsRSo{d6j7aOlK-a7qIMvf3Z`Mq+iY4*W?oUfYWiuj zYwR4rR*KQJ8PbTRXB>iUfB5z_?QcfrU1kS|I8$h7n!&+GbORQ>O>YY5Zc0{a(5h4O z`UAD*Y^r|23c~0he?Pff+8!5*Pc$glcWTl*9ikb1w$)$-5^0=VsT6NZRUs3tWC zDw)vkRO8KkBfse1zfWGsgod;YGVGHdT!1a~ms7q9*?Qrbp+wq4Bby$N{4)JN9x``mccyI; z8zBN2L*rSWiacG-01d1A)ol&q zcG2g@;J>SB(VF5d1R&Wojz59+!^T*4gSO9pIMDd7Mz-$~RF^4chVlCO}jB~bF32pK>ZU2{@rub30~j4gKke)|D;LY3heD7#k*X7 zzeaoT0sJBaddU1zi(l&4cb3=JT<1Sz9%Toj@TQNE-ST!Y=uvEUa?pQX9|O>L?R>M-?RQw;NRp{-vQi>`eF}mZ=?+lJfs65C+9zZ3of+$^Jj4FW)9$@saOuU zg!Ok5;NcjcC-e7BbhMA;#{vB>Md&)WyzQq6dc0}wa^PQ0`^p=EF_ubG*I>N+1GT>2v~gHlm*?{lVAGFO-4)>+LST zx>Z8w0ribXA8dvt|DegQ3UJYp6xe5j~vh1 z75?+OBM{sePHkcfof=eV`wxKr_h8KU!BCXVCC+Yi>Buc~%1Z(||A;pHCf6?om>j`p$ah;fs{umDH?8)Hb8YDo z3`pJ-aIwn5ME$qSHXbekdh`x$X#%LaUp4K{~GhZ#+FD?;T`leDSD|r-xX+n$*&XQ477MBKdtIdwa ztv#u9!>r(8OJOV~66!+3|Jr~j0fgYjd>nksMw7al%CW6Z^U1o_Ekp=|JAp`T9*;S$ zZ{s-kIS5(a$+K_^O$dekSW*`M7x+^MFrjvXhL-4mVM6I@BgvF;t>{!F8U=qbuZDh6 zbv4*y8^`Qpc10C4$cGZxdQG&cCRaWa4eI^{8rcD;M)1FoAtM7)OZ1MCgW(&cMBbYy zy85n>I`y4oh&m$OOzh28Ji7TbKuptROqR1B3+8A-)JcXZQY1cgo z3i?6U#J6A|uXF$=e6Hm4zch!QA&5tW(8?xnLe08-g;~L;3H4iRGoT_+OF=IC4;iWU=Fv$kry|pvjqqoc@0|`I4!52{d^s zoVOnSs;-dYpnOvuaqnT|8{yyJ6!k#`06MwC|6xfaQ#0XUa9HX*n*HQe_?|7F)KU}5 zk#gNz5HPBuT-t^8*$$hLJU18KgWD+fzn(B;@KhJFWBwZh%ZIcUck8K$(NRzLqzLr4kq$TGZ-4@3EAApwH!lFwXbS$k{ zcy9SXG!dZO-uA=4)4gd3-L8|OGt^H`sGtsQ1>HS>3N3ja`+ewzMtkl-zp#x_DWem% zqTRfYfjFNEf21C>+1k5?S;PkE)xz%5TmIKY&C_;L(01gKt~@`HC7Q$|-68 zkNztkikao-NPy;bB^=p`QComf)lP>C{KwKJGhw}VKTk6aK7+Zor4x+;z^oRexWrag z0o_07XQKZ3CzjbZU@IS+PXYC13uph2`p&S%EK-N1sXN$03odTT_b>=P<;fdG)6(L9 zD`SL!Xy=Bebm2Y*ZRut=2tl0914{zz@|Io5eFlBmCWfIYQ;?Ndm%F_XKn@;u4iGOzj&Qp0^q3X)v z+w%ikI?>1jl+?9|i~lLc9|x(+YmxMnd;KpqUfS)well8mxkz-E;H_#X(U;j8LkiC=5z||Y+b7v`QSqo&=?y)r)?-M)Ko~{q zX9{O&ET`%$h`Q|TpoQrCPHb0_dnL?p`@r_egWzMf*1-+Et`6Az_~yf6`^=JrSvp4nMQO<30o40jMrrP2d&{1<^##TcNaQ z#k*+v!fj}i@MQ~zXH0#V!#(>asMN}{F|^~#88}o0mBaruw$)>Rb0WiiyEb`O2|m8h zb!xwUHB`b0-?cF)O~#~oz_+CAi@y(dZSYMLkp4{kh9vdN?@y_{o_TBWx_fsY=I*ca zK7NlkKizGjqC`9YvoV-c;L|X8%EsZIbOqi4KNq2e$@WxVl6|a?ge{e6dfD=Yq-?r+ zr{19UKh1>3OW$qi2L|^~PUh6MMk<;q(-V!EH$yi+*aM1DhpN9KZU+D9pZEy?9_jYq4X3K7-xtOt zu3PM_AFqHZ*0VV!&#G+5B{`Sz1LvQbeS;e3`gJBw{vW@4fat7lwQ6YZ(M{ZPZIeAq zIR{Su88v^W1lXff-2GvL&5x+xJ#!Wat&U>Al^@;=1AZ0RZ-+SN8wPvHv1R?x*?sv{ zc7E2Gjg{*e^9|`&!f{$2z~W?XPEw7S4YuAiXTG%|w@t$bKq3LK4i`DkP!-GXD?|If z4<`*X;(bZk;+t^A#hdh+I<+9MyyuD`o7+!_aSkH1 ze}fgTOgDIQl5$jRi=p^*@DZ=Em7(fJUHIMX0dBm;m(!N0%hfijfaKqa`rV^WnlFc6 zIZ^+nEvWmc9_WLqu8qk=g_Zv*zyD7?`~IroufF;@y~PG&9Zn2^hPs){n8&A5KtZ z5%Mt04ao49*8C4;SPbk@k^E+l&`sW~+tvnb4E+~#((hFJxkTED^HYa^)`&VeM&lY! z0w-|q=G6GIePb%`Hk5Dt(*u^*29`WMUQ2>X;f=#U7F_)X^T32}K0A8fPEU^KaP!Y66ci$xRJO70`Ztsx1DoEn)*Q{G~PjGa2Tl zfXT?XnRf9%n~WfO{A+3)JhNdmH2WCtY{_c*bK}dKPc@8Rdmius8UE6mzXtWMLH)nH zDE>95)TQvRL8WK-Yf!0<>tBQV*F*j5q0-#)t5>S#h|4{|G^h`4z4=*|QckvUJt zr34=wQku)D&ky~fGxT=!`jX^*aH(ZK&cH<8>;e__le*2D4=~Xm3n_(O_yug=b@eo9 z2QN{iGqDE!kq;J^;Paq&=EFX4rIAZ**v>RIH5sXRH=HASI^I8}5;vUD`=J+eL`~M& z#uM*g8i7MPVD|RDfxy}`ctx`Bjs&)$UfEYnW)BlPA6)fRYfeNYm?=X&HelD+?7^d3 zVAmi3o*GD%^?87*Ov{I=IVMZU$Tr!^bI3<4@#xHDlgN-OxbLbWvTgM9Y@sr-RalAM z4~pFGPMxy}o;{9eW5f#KHg9r$qP~x9oL8%)g7mA_`<8+Xmdt&a5TUnIo^5h?iy5*I zhTMVzWyY+zvG1|AqAh0K>6pOe?XBmTPEH`y0r+jnR zW3pT{*lFT5--qKr;lpyToCQK2?L;ZDAStU+jES(tkLWCnq(j{8LoSh3Rg96``4K=k z$HNX)YWv9%YSTp4!$ZA)!cz_}7JQzEC{KTx6L;a6L4hiTof4G!sO~h47V<(OVksSn z#gtSn@*z1?+Iz5s+3cZmVFNQf?e&mX!%1*SB`S{FYx*NL3Lh6MT)J8aWxtQ@X5gqf zih{u89$v(73v9qQVujNGTT!>?ikaPi?Uf&dvSIBQ2fN*`yYkY1xeWbhX7)sBLWGK72MPe9`{4{v4x zxqtWpn0x)iInR#_JS0paL2n>zILhsV9C_7W1Yz-}t`8%ug+TSS$q>B>pxUWAWSB6L z>R_~{l*=Uz&*8g`ev_7d<+@dcW+omdjl`Fyw^~<^#IQq05uCe)dyI%wdTOOIAF{=9MWdlhxc_O z`K^BT=Y9oUkuZriah++e;nP?>G`T3`tyCAy4&YjM4aUQKKl9{B1t;AqOWTb3Qx+Dt z=?Y#5Fh>{;EyWmUuzy%xr<8e$KP+2H;_{lmm2(CC)n)XUx;kH+lgoeu#phB5f(V)G zIkupTS|iLKtIGbs{IG(0duUpP=Tz!`1`blsSAtA9UHu#sNWoCm%L+vz<+fLNrEi8Y zzK3|pZDOgvJYf|OvoYLN0PybSq{1~@?^2oO94~y*-Yv!Y?gmw^a@nMrde3I~v-6DX z4#Mq~e^prvl&{}!kKVKDwuHxvs2&w-{;PY4$fam4yttkzx!hLb%GwyU$lvf{6?5kz zAD3k3sRY!D1z|?c$APt8kLWx-x^r?sAc~d?kcu}Z*LN8^45%m4)vG+yCZC`WUOOh} zD~2$}t5LP748^q(ZSZ==u_fRiNsTrX=ABemrnBToXDRVtbH$hxjuz6@TOq)`-3TFR z@4FL5=IvDz&4qDF0IH8ZnfCe?!)(Iiykcx7b;zNtvpSq>R-M7eeB_ zeQx1(%zU6G1_>HHn9zhvx?$o1-LeO21l=mdoMGJs~>5&tdDBzg=t&(qs~fN3G}){MBI> zDzRhgcbR3#wI@5vDV+)*iBh>9&;-~*9abIY;X0FDxzy=%=$n_E`)#Swh~!H~cj%H6 z^XYEN<;ajb*s5cA+So*Q^b3u*Y=r2V^iJ1r39>u`nt$%$3`6QEU$H&iw1H~Sjjv(f z$RWV(jnfyxt+^h3hoXDsu&FLsNB_MJ3FlM}0UST;<@U)#ePjF_UfyTk{GPqTj>Vpd zq=ggO4P}$+CZ3X{WfYVTxyZwkX_8Bnm?lLd#JHT-N1yB{n=SV5U?%U8(r!<rFE#e6o& zIe*bNkGcR(KT%hmDFan3JHkULmt8~s7ohM4B_7wg_vmI&wl*!^$6E@N$=@M9@rEx^ z88VF1fKL%w(iC|-&&w=#Wfu}Bkm0s&%W^+*w{^5jv#8rt;3Clx7a*x5mLW*?BAmce zidRJU%wm)wsKrN~xR1se%aSYI(w!a-mt9w$JW=r9Ir+BwjDx{b6yeczJZY9^l_Zh& zWM99W%wpdKbgXIFX;+@tF1Ce+EL01E9q3KG!bI=AAX4?ljkNmgO=>Cm#i2o&BzF=F zW&cY58dK2E!OZN15-$v#^d3u!g~o70M_zP`_mQ4s4k&WPsmWT}NKLV;4Po0n@HR^Y zi4_m*QS%k+_$+GNYnIRNIK$u-M$7pW_N5zOG95=6&Ux{WN7>LAs^wqLX4l>6iHvQ# zSD_x?Pk4J-b02qN@ZZ6g9jSwwn)TQue3 z6TEP1w}lGGM47#*gLpTu)W}3yeNk1iO$iI^hrha76>e-;nnxdbEGHOJZ)@_Vwe05y zm!Iaba(4e1Xt-wVSP_5Ymh~Rb<1>sP7x5*QU8tlQP2xfc{zD;f56=5wW~A|b%NR&? z6}oSnCDHrYSN@z~Ni<4OZnox}Pu3+vIi&6^Cpqv*2U8k%JUnMmaOu7#rVKxO2)^8UzSWN!U`J)j?kw%RiCHGM zFmT|97H<)v+l-_=9#r3zL!OL5QElz0B6r2aEP>N3yHW!(;?cv^?@c>B!!HzJ-RvEGc8M#_QB`Ki_gE& zk6Oza_l?gjn?^#>$SJjbqRKFM^$F4?W;2)fK{?P-FRv?3-dR(14su>jUymO5&AWwI zvCSmPqu=HrqwH6E;$sK0D$)%7`4ZezJR7`3Zuw^tbHS<`B{v`Br3yklb+=2Mm_eCv zMoXc(bgD^q($z0ftia8pOoeiZuZp|~b*;LU77wTv6+vAME!i_aO|TygsY=p#ihh$ub)MJLGxsJ?xN*f7f??=F+>jib>0L{ zK-rKkv_C6`O>E0i=8K00)73Y_ttv#RNfgP!uE|@DYd>Xw#47`3CUTCb9jk03rhyXi zEJOsh1l5mw(jS>uj+%k>I#kKVyA#A>Et=V?rxUCnO~|3Zr9Q1lLvB5_V}-=5S_pY^ z)<&v&HsQwNe9mxG_S(RFWrO|^uPLUSTbQ*r_LS@U2A4~Y_xAR@bS0H6sGwGRDKY4g z0xuZ;n4<%9%9an|r$5#!{o`ls;p)<#FVoU;d=~3fd++0qeL^}oK-VzYLQ=kAH4gJq zb5N~SX4u+178~BSLTH)=cX-{x5VCsHbQ9K>F_rAq(+ZafLCIXVLide4ZehZ@@2)q9 z)qKo>E4LCZS*Ss_QBWCt2xG)jCrR2B+zoKtmFM2n`jLD}!fD?{U};lDNG%ZS-KTQm zA$aX)B~?=uT|TajcjY=6I25^krhg=u_3+w4NG-Tpp&g>|pEC#_GU1!nfHeu7(DQx8 zcH;it&S42q!+{z0zqR+BVNGsLyTAtBVnI5JfC@^JCMun)D6a^Ke_a1^mKq(1Dq)DQo8A?JgsoxWDzX#8Ez90MiJJ*q4xdQ9qS+i#DnYm}J zSrGeyeh6cb!@d5pAw=PHC$DSKKzu6>E=vvgmcPTr9CS5lj#$}{Bl(4E zc&=!Z>=`2}HX-Kq-vNYCb)C=j>ZR?XVkvWH)xL!+G_|YT-(An;qCgpoQK1#jH?hVN zYo6D0p&Jao6f+11KAWm;=J^lF9!S$Xl&#?pr#yDwQkh*(y!Q`w{FvD&Z2@y*@7 z-lv6P(KPa!;#o*^nN+H`{nKf3*7S?rIyJ^LgPJ@}-z?iR&vRWX3589scpiD}g47zV0%nPy@;sSclbtzE_WVW%bV!ckT1i z9e?M!%TY(*R%Yu_iTkZbrw*Y*a)4wl?_=S|!os{EGt5Y|wHy!kJ#A z_Pw;SU9^G!q6m`OCrl^=uBCNJhkJx9U|sqR#KLU&Znv5XBTjUTi?)~8o^|d_yNcGX zA40!{bX8qg-1{PsZ*-uQ+wVBf+`UKh&ZgRpL*==Uxh9I2-ud7=Cmx-ohRnB>Cc zw~6LIiyvxqGl*WC72&>jB&78(8UsF*e-l;By{y(2#*nHxi(Z}dOHf}+bUNEKq+KSq zn^mu_aoYbxtj}J>9sp01Q7e<~ z=)>2Amc9s+-de*pM)^?Z?-_WzC$(6V{IC}*yVZDgtr~Mk6N!eRORMqE4_ps+Sk&Nd zzi1u!R%FzUgls?|64a*>S3ZkdQ;gB86qPKsA0wN4Cj>|`&8dvkhChBuH(o!Lc%2K{ z-)8j9yh@w-?z>4U9*2g8R8-CHL`lr#W`&L*C7!%ByU+cpZAr<5Z8^zQ+ZZxNHyjCC z?^pKXEwBou&${q+f>PFZ$4}g}S#H&}zP0j>8sSrC55|2F)YIWo+;Fm2jGfgxD24W@LJrL!qbr$_4 zUcoPygl`1!B=u)KuOJOQ#g%#+>pxjdzi4`CI*j-le=ictP0=CMkIL))b&({6d9VJ^ zkKbWm^tDx`X#Q-)8$-QM?JLEQ;%1>r8*=X#GzgW9k@DQelrX-+ukiLH0$n3BO$lSF zIsb9xnL8%!yU2Q0Yt19Ik;!wu7Y!l7r!=ko*m#TYDh<7V#HrNU&>gp^D)%gA7k*?z zn7`o9GhtnlbkSATI|IAB5poO1ca6AbJ(msJ7jtZUlwn~9$M)2|?kIN=vOb`{uL<7+ z5_s2!$u5!<8+ig-P1^0k=ZWU$ZLgXUu4`}ni<#26WxCnGg6g0}#%RNIsodTc z@6L*n*IBrYhC>{U+*x|uy`|uzZPLwA^9SJ6d#lQvT6ntqV3BmS=A$CDT?s$>-?mr3 zVNgGjTs-v-6<}+8#v5cHayMhy6TM}cK} z*rz>u)?foZEymK+?!Fvh70iZ@2k%OqtZ5d1e_I2vAcEHG`I|=rr026BiTGuR=ygPCqoqSn0ve*lWWgdd4Fi|0Hs;AI3_!8KZNds zTiDPaIg6`vZOi9q(O2a`GZwvtZ)q|H=}TYzKS&qcvgN^%DZ_C{*(aY7;+d1P@BkQa zL+wpn$=*_L$bhtHD!nT`to<$ZA-}@~C-r3M_|`R>b_ou)y|&?iwhVdHj=%FC#E24< z^{!C+N;0iNFIL&~oN>Kw_ez}8wwONk0Hvb3JHaS)`mi!cOfJ#-{74W_>ceJDi}ylv zA1ogT;m6!!u`47qx`qfmr+9NsPr14?4OTD!NjIfx&HeE$pQ{QwD%C1uofB_){o9%_ z%%ZToMDg4m+zPiBNGPU5P(>p$g)}XM>v;b+W!9$U< z6KzNa3F=aKgxi{c(lDyiIY%_BVab`HAu`UV*LrrHHub5JKAo^uB9?qAlr+W_?R_7mQWDahLccMV4!yCCE*gPQ z`#MdH7w1ct%ED&R{{8DDVhM~}4Aw5%TGqI1V-PAn_ZD6YZL3!FlxlYEE6HbZEwY`jW35(ruPYclso2m zYR{#M4x9xJ`vTvXAA1-CvC2^yqdaApDBKD?1GtQ#-OZC+RkI!^PD>pTn=_yekFJRq zBk>Ao-Qp=k&-|Jg6z#J-W-^(sZ~ z8C$FrU%Z^&lzKj%=14jbuvf8zB-R^QugC4<6-;l13!4wohuXUJ%7RHkYbL5)$Ew-u zRxfTrY6UjdDtqF{A&ZW_e3?f|J+embS*fswkV>))*Qxk2dYJQSxGZCa z`j{fc6`0+4<%1Kfxo6fQ2?o|8)Qk!p4sgL(^=1Q#Q4Bt{ zYgD?u-XN>?bl5zwa?rUt>k~s)q?(0t|9o}3D#`I-01o6dSs}#0Dw)7T23s5Y6kv^I z@B~MtiDVah`D*z`AJ$at;JH$_qvY6TMJFL0j`NLm%VYj6>%>dmjcX=8vu1jFU{8yp z&#zc=h*EOs;5t+Tr=oyMrt3eKDQvNO8N7*!ne@c?q~@aAVd=A-)A3fs>inXOw$XVN z4?|O}#wy16&|Qh@U`IV(Nf_Adz?>D~D5~AhfDXipL%)sZ(0BW`Z_@Q4|MLBif_CmCs ziJc`s<6USk(K;Tsia{T$OW$x{93KwrWYp(MU5WN?5iJYRQ=d|ny;^v_Pvhib#>4kl znhd=55Z-8-XNHm4uG(=pm)?8E1UW*&_cKIGx<)NEapMd1gfyp8n)Vbg+#-y{hMy99 z^OzRJFXI!(81U5fvyen_y)ZQ+}jHw%HIxxjgVCwnfLeAt7pgoF)@*8K=k@ zoh;Ibi@iA03J1%s#!=|pamjl{@yM{rk&3=gv*v?of&r%a*5PLIGNS9YN}RSZpZ>O$ zp+8JWsm7XeWNs&en$BkpUx}~h5JJ)k_oUaR=S5nY-l$k)b?oVSAY+-A#3Cmt zFFBm0X}(&!2!*`0s77AbB%jy9*cA8EgGG?8euu@*U%GO%=K))6f;P2wsD<*py=FDi zd&pdE`4A599Q~Q@S33X*>iYB2Rfh<3z;WPX&+l6#o-yzaS(gzMlHY=$h8e(Fc$4jy zi}v=LwTq|=d>i#{O7YsDH1we@urkHM0+Gpk6(4%CDitm+tzJa1IDchJz9W|aE+k$) zI~9PRwx~f~FO(5l9M1E}az72`A62;U{`?qQtnXu*1Sy(pjN(BaFoogZD-VJ)>uhL^ zO*&iHPj;UDg{V6ybnNIcEgAuc0c-MCeTo&bG(PRFn7r>kP{TFL=kVFqI43@6pr*=+ zBlfK15%cM#$J4D97b4nuxtbC);UNamT(wXGFdJ#ovm{p-35SG`GPt7qPhJ;opdKRw zz@cd0>_f{&04}A3Hd#lwf7N_|hz1<8=c#6)!IBvWDc{>BF<#B>re}k?qzT@7x(R(q z{<4W-#c>^nzu2SC$F8`MC%=s;(5V0qocbbO@-kfmk-FF$iYMi7lr{S5@tGV!1`fo| zBWGOo_8pQPKz(sRL$>Wo@Gn9wL#-C;>sASnKA~r?5p_HHoQCa28U2wl6&GmJHfZAH6uq~vN)4-6n_zHK=QfbtKmv< z-~%C|0VBd^eS1?NI2h6}TmST(_Ugg@>h6-agZ5C#u%$(~++Lf2YbO?F`0_qK*Sq)W z3yK>4%EUgGAUF^1j`P`j`=snu?=?P9=08IBovDr?F_@Jc+EAT|Ih)s#QHsdGH zMh~y+&byYNpOgjgr1t-vua}iS@n>_xlw%?*yOE#m?Yjr{; zW1+(bzpl=aImy){IcGk3lT&e~D9U1*_GY(@Hl$e2@?o4y20mJS8t`syHl)*KGk+J} zE?XoT>v2=^@T-moI2GS+3TLe&_#7s*U?KloZH`l_IrX4d>rG}b<)w^Xz4(sBc_^8G z(X+6@3ofCx*pp1ZQW>Bhfc#_4WU;6ii|z2~zH+MB(iM4U-j}uQJVm-WZ+SF*T8&e2 z>4*vfZSoD&FHi9H{HqV`Q!wJ|^+ayZ+eQE7N66XMg#I7SBvDR9K8IN4tPVfuK=1C{ z0y>#mIgN#O)fzVndWLvdlQCITSR19+#&f=Yw3rZnZ_UUXvFKGMXLN*9aq5b-I>;it z%)f0UX=kJ#-W(pz7E3UV^x>%m7bW=7&TuMrK=)aM$>yU~J_>|z#O4oZIc<(wGd8hw zSa*xQAM%fNlTI!yNbiHsKpHpJxHOXVgZLU-!15OZof)) zsbOndVF+{uv;!f5pH@N^uaLnvZw8V9A8rsM(!1oP<%Fl}mC$mW3UneejAG#v7X;Z? zx63q+%iSC4s2ei#y@+NW8K$b~R-+w6YVjXMVzbtUAtnA@MzS%McW|`zw19@qR$Kru zN2>sbzUf(86{5iL$+b2_)2wykrE)g?fzV=^WVa=ncv~I%Ydlz82P_P1)Lx;Wb}uPLX?o-GvzizQtaS;}+QxeI)$(o<{HYhMH%ywR?Or=; z=b90aAQ}}UfX?WZLl@LKMBXK_WVCUzPinm?c=E(qynFfXTWaU3a? zdu!5DnDTq;FA9V!U@#zUGZH2SuJ-XG19|2wF6gWGWg0n`>n#qqe5q)*^Ap~PQxrdH!VHyRP|5Uq8QZ|IVPzf< zK-KWO0(e3LT4yDSRJ6!WWI_UQRnGt;UKv3(> zq8Bt5>#l$V^cQO@jW5D-bMU0t!K_9C1~=h+0;8WjYnKzmv<;3xQ1L_v-M8(V1y@Rr&kMy^8tKyU|S(_x%@dju-6{#N1g16`M8? z>)q8o2L5szeox80iqTd_t%ck(5Cr}h-|l11D$AwH-uTa6073sq3t;k&57cvDNlyE` z=-;(y_yzvRHkreMn31KoK%fMk3w)nqV#4I)%3#ks^^W=Y1cT$3qInkQQW(Q(()jLr zgJ3G|uP6VcIPjqAg(K_Ge*d4p0@*y57#UA#p*Jthm^zX@=&m(Q^0s14K0fvet_y$qp*893aHd)!O zuOR6#tP)|_9+bmi0B|=hpm!~7+}Dz=B>gF9V&RYe0bK1J^CjqzAF3Q#rfx{Goa0wa zTbs3F)$fN*4xGc|o*4dU7tjcZUm^vA%dJ`UMLrQSldR8&)SHsLE`lAR3+zA3H2zQn zmPc#68nIwizqxzX#e?^>m!s$Ao^fkh4P*4-AsjWq?;+|AI5;cd&{~(9jyM_iJzv?qsPzii@XQ<; zz<(4~+Ee5-kz(*QN3)~=`&S3YN)n&m(`j-kBqK`28TA72K*L%6IxJ-cEXRqn0Wx;L9KQ3a)kqJWVR}p{sHT`OqKb zid+w^a2pauAhbiNy^<(%X;baWH@KqlLD9!eWS8l#`f7mg0WbM{z2P_f5^yBe55SaQ zu4VefxM`n)H|(fZz0Y#&)rTh*@}tknQk@H=#E7HF>W0t~IGNb~-b*Ufz9NTGs- z?8sAC&vpM8BzTe4%fnPL=!jomhyQR^W=6J7P`Wt2QNtSuu1aD3v7P3kb(*%c?!D1P z-#XgQ^Er3~uRU{GjkYu{f}{t3_aQv#GB=UoZA9ptd1H&7Qjd*6aG?Q$0pOC9V2{4oOcT7d4!jEto19l( zAA^_p|Ksn9+vQ<)V4brwwl3NajKHK0PQ}~%`?E8iS~nK3IQPDdezv1LQ($0Ql3CVUWEKt^qPp4^n;NE+WW`OF9X4tAJ#)!$2 zAg#fpcaMY)vFbfJ(^PeKCh9{{1Dx)<7*~p|SVx0a({uU-OSCJNmTITyIL%?kb%~qe ztfCwWE%EC(O6kVh%~PEkL8k(lhP2l#oEv$utYoc&yE7>Tbm`Eumm;|d#uU_7>5^<7 z7;_Nh8D>cRs-#PmUud~1}+ zdL}bOt)Bz}5(RH)SX&&IU9bcB!yv}cpzyj`!F#~FaMfl7hmOWBi+eRJ0jWwF?}36w z46xeQ_%#L}_2=9JD^29x4F{AC$ZcZP|F9BGQQR1@Pq*G|>TAH2ZfL-?0c1-TBMDmA zz+qN_3ozXVO2?WC)4g~{-<`)Q&CrSfr5~U^`YzcxPZV?7?dVK87 z4CLkQf@D|m=m%5k*@hFj41ZFrCk&s)2pO!C67k*mtHR5y>g;8zon7GAb+;_q+ToFj zQpimXA-}jjJY91e6w!i3JNr1iR0cqCukzDLgj>$DjhEWV-E!V~OY{b@w-i#)X-^xZ zw4URIO`#eV@#2IDq7h)XbIP_}3Ov4m0WHKnQ^2QH&6*3Mc7t(wV!1%tQ6n@ydaW$( zqmQImX5jG6TeroDNV;Ic8v_+>P>$4bvM5xm9X2+ZLyhpKYCc&ub6q@KoZ-@3>w6>e&MUjG)7~0S)OmB)i5P$qJ zj!sTr}y0-0*@U6163YKT!Gj#nO0>IVvn zE^X0Vh}u9P*KixtYtI!=P`hE?#SfnjY$6Aijr2Qnd1QjSY;s+Fj&UkxlfVU*t#Huy z&tO8fLWyzN6`ReV#Z90D1zVzAU!2JsP)-9B1)w$bQWtJ8`5`0?0N5 zZw(bZdC+?$)Isc1t&~*MfS}z-xiYZNH3a0tzo)7~UR|4+6T-bmeK-{ZM?D8uKILYu zC0*~u0^5R~0?1hy)FY^ZOtXU#;;zerT5FPu$!(ju7I{LsEaD@Fs!4%V4-8`65ZDlK zNBz!blXTNCKdvie{#nhfRkvdA+Wpp6s{5S1=fMVIHWEMfP8Vs!T$uaTaoEQ$v_P)u z0Y5FuxqC74t!yz!VZLmd_>9+iX!jqzgF=IbN7^d9gV!ck+T2O|GHhZHZ2JC%Mo2fH zG9okvYk9cxxy9P0wk;Ico5>(of90^(tlQAZfo${7i0MP;7`xyYXS;Trgvpn?r=ee+ zKa(~kn)WUOI8*1Mw&nf}MvPg>HPWB{CT;RbLfPrgelGAsvHTD6f(#lodDsXuZO z`mMbiRb=DmGQ{BG3bdJbgq7ul=U^7iAj|`q=BRT{Cn~Ms{i7)F^zkw0PQQ|_nX6x&iwA@Xs@I}skE6BcqV7dQ z(pB+wIdK3nuIXggM(O!RG3D#EC@K^h+&_9UkE)?`>aBMh zZ#9NPy79;A^+pZC4>@cAx31SH)yj$yW6=L)+Fqm}RRv!dg*@ICH^4n}1Sl7Ched7p z&@f`g3831WKdSfN3lotv_pS$&ZF54`x)0+$qUwJ$VYUfsr0y`RO*2TV1lIGi${N8F z!!1T{|C@i(M9qJ_8p6_|EQxFiM(kJR8u&mQ+%0PR9 zyenx{0pEWUY4g4ARL3asWY?s`H}(!OY;gjX+GbT*+#R zN8D?NPz~89`emB#q3$yAn*t7@Kt4iV55CdjM|iZmW~FB1hn#rBTH#p~Kw2jCB?&mJ z__rL@{sCY5UgXxPtdI+k)wx+gSW+{h-_hWdY<0ZqRO>Uh63{7WE%e8g*zxN%n(Irk zZ}O(=;e1y-YM0-^`~b>Io+r;tQ)#daV7K~o*R@hIp-XS*%BXQXQf9H zoE3*h-U2bi=y?jT_TuD`(b16cG1J_6g(mHsk9kzj&Xb&qX$!*VxNz|G7Uss77%m;B zo3YsP(M4|~iFXMan+yI+hYBcucZxSPi@h@_YA&@pUVGe}jU28-sF@=zdp^Y2WuY39 z;_Wnwq3iQpeaoia3izU-_}TdyY}VpB$Z6)L?3!#fA_^H6S*TfRene%%-L zcx5OCg(D3;EUC%R<>Q)TMAmLh1Qc8?JkBvTo4q>u{xcL5yduph z-lp|oqYUS5g3+d53Ax$+f`+R~jmaU36=Z+iKs1mG*ojr^qJ$Z5TgyUK)j{rYA=%{z ziw>8Bk)s7)3Ze4!MwQoe&ey;B<7$QVcxbcD zaF`q=T@b>OBXTW0mUY2Ma3k9cBX`vWOB71@u$D34y*op+h^V?-J{H$LSJJl z&h9_k@PQ9Up-eGGUjFZ53>IW>ZLk&6!{|7CX9vnC=|vg}p4+c>5!w_Iva?uo$yn!n z$nB>wzT(V6j`n~)i27~Ze>Q=55u>%)ca5YmcdL_Y7-YMy0ua0FxPr%PXU>P~R57h> z-0-Li>!KGrCzr%7V5h^{_CeU)(QP}*uuNP2e066A{`t_a{!gfI7Ijz(~^;cffxu|${wOWMW7wpV;R9(J6HXxK?GIeY@V+_csNQ<}&h z2)0;idk^oy)C)&z?ypE-ejH}=6&d*U>^CuLrazDEY)LZ0 z99WndC!v$X`)sj~2sS{kun0Y-XF=py9+p%zWnCm{vf7%w90%S=UAtM-yO-TJ2Ey-f zYUEn4;!abkKXLTfSkdCj875ho7|EQH=C#!)I#uC8gI(K4C%U(H^tr;~U4LoxmBE*o zNSr7Qkz%fjYaMfFvfqp-5S8I=dAGZno!QO|ayHen^>(hE{}sSKlUin8G1ctlBwO8y z*VgEFP+-YZf41Jbxvl9|=OX-nX=zjk6tg}COss5mwDJ`IW}Hz`)x>*{l=gFA@K1BY zaLetK{mUd^6*G>Qb8Ve(m1FM68S^z66IZ;V)98M8Tc%SSTQWKWGN}Be_nHZ?#|+|Y zskPO8tI`HoF8@xofP`?ieSX`A;3_X|D|+Y2MB>(?eoFYL1z@;!@#!$8KIx#D)AXzU z0s-XnbKt*~k3MG<#CW@@Sx@6OMompe3G7U|dW-<$GC5EXxz#Byc`pD2UTV2S?#tnh z2ezk~{Ikw5%-;9Q%|#P3FImI~ANPARSL)++<}}e?$R>b%eh&PX@@Y0?T`buI>e7JveR5pA%v8S~UnZQ&O@G#6+R?TQdroh!r`!vj`d_+m*)Snb z-Iu)8giVb(P4uu8Zch}X`{%%aDIX!}qu8^5ZNZ+-@=ut}61V#06qF>8&uXX@if%QG zvyun);?+;4*Bj`tK2+N_POFn!mY;Kz*ATMxsGoJ5!~y_lZ46A1%#}Lo!kng>^{fUU zpPvK&t$f5eS{qJo_u+H3+g841zY8%;DgFZ!KVFC5_m_+u(A{c`*U9uQHR)H^|CDN0 z+`T=`&TUMn^k5_W(u~WO|8n@_*^h*+UThPmfN83`xQ9ss^7%RNU&_bfH2@~;+bz!i zC71yAGqo*4*XP^%o@{({?3c!I#elN-im1`H`Ua*mr#bCWL<0QCKLh`zd}8?kFxhU$ z_wrwY$xD`&Bir(qLoJv?Wtx9UV9mc-8)#ji_#KFmHCn*!2Fl ztDkmmRo;4^qgMge=J|niuk9DvF}oe?)HY2r573gmnPok{ANt=!vvX+y&hWQPf2-7Q oi~0|_{Xc#94JiMA Any: from azure.ai.ml.entities import BatchDeployment + exit_if_registry_assets(data=data, caller="BatchDeployment") return BatchDeployment(base_path=self.context[BASE_PATH_CONTEXT_KEY], **data) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/deployment.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/deployment.py index 90d808fe53eb..0093e6821934 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/deployment.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/deployment.py @@ -6,7 +6,7 @@ from azure.ai.ml._schema import NestedField, PathAwareSchema from azure.ai.ml._schema.assets.environment import EnvironmentSchema, AnonymousEnvironmentSchema -from azure.ai.ml._schema.core.fields import ArmVersionedStr, UnionField +from azure.ai.ml._schema.core.fields import ArmVersionedStr, UnionField, RegistryStr from azure.ai.ml._schema.assets.model import AnonymousModelSchema from azure.ai.ml.constants import AzureMLResourceType from marshmallow import fields @@ -25,6 +25,7 @@ class DeploymentSchema(PathAwareSchema): properties = fields.Dict() model = UnionField( [ + RegistryStr(azureml_type=AzureMLResourceType.MODEL), ArmVersionedStr(azureml_type=AzureMLResourceType.MODEL, allow_default_version=True), NestedField(AnonymousModelSchema), ], @@ -35,6 +36,7 @@ class DeploymentSchema(PathAwareSchema): ) environment = UnionField( [ + RegistryStr(azureml_type=AzureMLResourceType.ENVIRONMENT), ArmVersionedStr(azureml_type=AzureMLResourceType.ENVIRONMENT, allow_default_version=True), NestedField(EnvironmentSchema), NestedField(AnonymousEnvironmentSchema), diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/online/online_deployment.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/online/online_deployment.py index 93246ae854e0..9567f5578124 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/online/online_deployment.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/online/online_deployment.py @@ -17,6 +17,7 @@ from azure.ai.ml._schema._deployment.deployment import DeploymentSchema from azure.ai.ml._schema import ExperimentalField from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, PublicNetworkAccess +from azure.ai.ml._schema._utils.utils import exit_if_registry_assets module_logger = logging.getLogger(__name__) @@ -47,6 +48,7 @@ class KubernetesOnlineDeploymentSchema(OnlineDeploymentSchema): def make(self, data: Any, **kwargs: Any) -> Any: from azure.ai.ml.entities import KubernetesOnlineDeployment + exit_if_registry_assets(data=data, caller="K8SDeployment") return KubernetesOnlineDeployment(base_path=self.context[BASE_PATH_CONTEXT_KEY], **data) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_sweep/sweep_termination.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_sweep/sweep_termination.py index abf4365fd2f2..ac1541f637d7 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_sweep/sweep_termination.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_sweep/sweep_termination.py @@ -31,6 +31,7 @@ class BanditPolicySchema(EarlyTerminationPolicySchema): def make(self, data, **kwargs): from azure.ai.ml.sweep import BanditPolicy + data.pop("type", None) return BanditPolicy(**data) @@ -43,6 +44,7 @@ class MedianStoppingPolicySchema(EarlyTerminationPolicySchema): def make(self, data, **kwargs): from azure.ai.ml.sweep import MedianStoppingPolicy + data.pop("type", None) return MedianStoppingPolicy(**data) @@ -56,4 +58,5 @@ class TruncationSelectionPolicySchema(EarlyTerminationPolicySchema): def make(self, data, **kwargs): from azure.ai.ml.sweep import TruncationSelectionPolicy + data.pop("type", None) return TruncationSelectionPolicy(**data) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding.py deleted file mode 100644 index 864282f90dc1..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding.py +++ /dev/null @@ -1,66 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -from typing import Union -from marshmallow import fields, Schema -from azure.ai.ml._schema import NestedField, PathAwareSchema -from azure.ai.ml._schema.core.fields import DataBindingStr, UnionField - - -def _is_literal(field): - return not isinstance(field, (NestedField, fields.List, fields.Dict, UnionField)) - - -def _add_data_binding_to_field(field, attrs_to_skip, schema_stack): - data_binding_field = DataBindingStr() - if isinstance(field, UnionField): - for field_obj in field.union_fields: - if not _is_literal(field_obj): - _add_data_binding_to_field(field_obj, attrs_to_skip, schema_stack=schema_stack) - field.union_fields.insert(0, data_binding_field) - elif isinstance(field, fields.Dict): - # handle dict, dict value can be None - if field.value_field is not None: - field.value_field = _add_data_binding_to_field(field.value_field, attrs_to_skip, schema_stack=schema_stack) - elif isinstance(field, fields.List): - # handle list - field.inner = _add_data_binding_to_field(field.inner, attrs_to_skip, schema_stack=schema_stack) - elif isinstance(field, NestedField): - # handle nested field - support_data_binding_for_fields(field.schema, attrs_to_skip, schema_stack=schema_stack) - else: - # change basic fields to union - field = UnionField( - [data_binding_field, field], - data_key=field.data_key, - attribute=field.attribute, - dump_only=field.dump_only, - required=field.required, - ) - return field - - -def support_data_binding_for_fields(schema: Union[PathAwareSchema, Schema], attrs_to_skip=None, schema_stack=None): - """Update fields inside schema to support data binding string. - Only first layer of recursive schema is supported now. - """ - if hasattr(schema, "_data_binding_supported") and schema._data_binding_supported: - return - else: - schema._data_binding_supported = True - - if attrs_to_skip is None: - attrs_to_skip = [] - if schema_stack is None: - schema_stack = [] - schema_type_name = type(schema).__name__ - if schema_type_name in schema_stack: - return - schema_stack.append(schema_type_name) - for attr, field_obj in schema.load_fields.items(): - if attr not in attrs_to_skip: - schema.load_fields[attr] = _add_data_binding_to_field(field_obj, attrs_to_skip, schema_stack=schema_stack) - for attr, field_obj in schema.dump_fields.items(): - if attr not in attrs_to_skip: - schema.dump_fields[attr] = _add_data_binding_to_field(field_obj, attrs_to_skip, schema_stack=schema_stack) - schema_stack.pop() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding_expression.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding_expression.py index 94feafc180e6..f2e661927a8b 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding_expression.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/data_binding_expression.py @@ -6,18 +6,22 @@ from azure.ai.ml._schema import NestedField, PathAwareSchema from azure.ai.ml._schema.core.fields import DataBindingStr, UnionField +DATA_BINDING_SUPPORTED_KEY = "_data_binding_supported" + def _is_literal(field): return not isinstance(field, (NestedField, fields.List, fields.Dict, UnionField)) def _add_data_binding_to_field(field, attrs_to_skip, schema_stack): + if hasattr(field, DATA_BINDING_SUPPORTED_KEY) and getattr(field, DATA_BINDING_SUPPORTED_KEY): + return field data_binding_field = DataBindingStr() if isinstance(field, UnionField): for field_obj in field.union_fields: if not _is_literal(field_obj): _add_data_binding_to_field(field_obj, attrs_to_skip, schema_stack=schema_stack) - field.union_fields.insert(0, data_binding_field) + field.insert_union_field(data_binding_field) elif isinstance(field, fields.Dict): # handle dict, dict value can be None if field.value_field is not None: @@ -37,6 +41,8 @@ def _add_data_binding_to_field(field, attrs_to_skip, schema_stack): dump_only=field.dump_only, required=field.required, ) + + setattr(field, DATA_BINDING_SUPPORTED_KEY, True) return field @@ -46,10 +52,10 @@ def support_data_binding_expression_for_fields( """Update fields inside schema to support data binding string. Only first layer of recursive schema is supported now. """ - if hasattr(schema, "_data_binding_supported") and schema._data_binding_supported: + if hasattr(schema, DATA_BINDING_SUPPORTED_KEY) and getattr(schema, DATA_BINDING_SUPPORTED_KEY): return else: - schema._data_binding_supported = True + setattr(schema, DATA_BINDING_SUPPORTED_KEY, True) if attrs_to_skip is None: attrs_to_skip = [] diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/utils.py index 6d8843444f07..ab95f8d51b6c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_utils/utils.py @@ -6,7 +6,7 @@ import re from marshmallow.exceptions import ValidationError -from typing import Any +from typing import Any, Dict from collections import OrderedDict module_logger = logging.getLogger(__name__) @@ -38,3 +38,24 @@ def replace_key_in_odict(odict: OrderedDict, old_key: Any, new_key: Any): if not odict or old_key not in odict: return odict return OrderedDict([(new_key, v) if k == old_key else (k, v) for k, v in odict.items()]) + + +# This is temporary until deployments(batch/K8S) support registry references +def exit_if_registry_assets(data: Dict, caller: str) -> None: + startswith = "azureml://registries/" + if ( + "environment" in data + and data["environment"] + and isinstance(data["environment"], str) + and data["environment"].startswith(startswith) + ): + raise ValidationError(f"Registry reference for environments is not supported for {caller}") + if "model" in data and data["model"] and isinstance(data["model"], str) and data["model"].startswith(startswith): + raise ValidationError(f"Registry reference for models is not supported for {caller}") + if ( + "code_configuration" in data + and data["code_configuration"].code + and isinstance(data["code_configuration"].code, str) + and data["code_configuration"].code.startswith(startswith) + ): + raise ValidationError(f"Registry reference for code_configuration.code is not supported for {caller}") diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/automl_vertical.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/automl_vertical.py index 28b61b1f0940..07b93b816195 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/automl_vertical.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/automl_vertical.py @@ -6,8 +6,8 @@ LogVerbosity, ) from azure.ai.ml._schema.automl.automl_job import AutoMLJobSchema -from azure.ai.ml._schema.core.fields import fields, StringTransformedEnum -from azure.ai.ml._schema.job.input_output_fields_provider import InputsField +from azure.ai.ml._schema.core.fields import StringTransformedEnum, NestedField, UnionField +from azure.ai.ml._schema.job.input_output_entry import InputSchema from azure.ai.ml._utils.utils import camel_to_snake @@ -17,4 +17,4 @@ class AutoMLVerticalSchema(AutoMLJobSchema): casing_transform=camel_to_snake, load_default=LogVerbosity.INFO, ) - training_data = InputsField() + training_data = UnionField([NestedField(InputSchema)]) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_classification.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_classification.py index 34de753d33bc..472b5d9581bf 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_classification.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_classification.py @@ -42,7 +42,7 @@ def make(self, data, **kwargs) -> "ImageClassificationJob": data.pop("task_type") loaded_data = data - search_space_val = data.pop("search_space") + search_space_val = data.pop("search_space", None) search_space = ImageClassificationJob._get_search_space_from_str(search_space_val) data_settings = { "training_data": loaded_data.pop("training_data"), @@ -74,7 +74,7 @@ def make(self, data, **kwargs) -> "ImageClassificationMultilabelJob": data.pop("task_type") loaded_data = data - search_space_val = data.pop("search_space") + search_space_val = data.pop("search_space", None) search_space = ImageClassificationMultilabelJob._get_search_space_from_str(search_space_val) data_settings = { "training_data": loaded_data.pop("training_data"), diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_object_detection.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_object_detection.py index 9a20330c39a4..fd5a0853c062 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_object_detection.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_object_detection.py @@ -43,7 +43,7 @@ def make(self, data, **kwargs) -> "ImageObjectDetectionJob": data.pop("task_type") loaded_data = data - search_space_val = data.pop("search_space") + search_space_val = data.pop("search_space", None) search_space = ImageObjectDetectionJob._get_search_space_from_str(search_space_val) data_settings = { "training_data": loaded_data.pop("training_data"), @@ -75,7 +75,7 @@ def make(self, data, **kwargs) -> "ImageInstanceSegmentationJob": data.pop("task_type") loaded_data = data - search_space_val = data.pop("search_space") + search_space_val = data.pop("search_space", None) search_space = ImageInstanceSegmentationJob._get_search_space_from_str(search_space_val) data_settings = { "training_data": loaded_data.pop("training_data"), diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_vertical.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_vertical.py index 4e8ff2367692..0ed846344ddd 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_vertical.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/image_vertical/image_vertical.py @@ -2,18 +2,18 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.ml._schema.core.fields import fields, NestedField +from azure.ai.ml._schema.core.fields import fields, NestedField, UnionField from azure.ai.ml._schema.automl.automl_vertical import AutoMLVerticalSchema from azure.ai.ml._schema.automl.image_vertical.image_limit_settings import ImageLimitsSchema from azure.ai.ml._schema.automl.image_vertical.image_sweep_settings import ImageSweepSettingsSchema -from azure.ai.ml._schema.job.input_output_fields_provider import InputsField +from azure.ai.ml._schema.job.input_output_entry import InputSchema class ImageVerticalSchema(AutoMLVerticalSchema): limits = NestedField(ImageLimitsSchema()) sweep = NestedField(ImageSweepSettingsSchema()) target_column_name = fields.Str(required=True) - test_data = InputsField() + test_data = UnionField([NestedField(InputSchema)]) test_data_size = fields.Float() - validation_data = InputsField() + validation_data = UnionField([NestedField(InputSchema)]) validation_data_size = fields.Float() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/nlp_vertical/nlp_vertical.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/nlp_vertical/nlp_vertical.py index efd7e70b1b36..635a6310e50e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/nlp_vertical/nlp_vertical.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/nlp_vertical/nlp_vertical.py @@ -1,16 +1,15 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - from azure.ai.ml.constants import AutoMLConstants from azure.ai.ml._schema.automl.automl_vertical import AutoMLVerticalSchema -from azure.ai.ml._schema.core.fields import NestedField +from azure.ai.ml._schema.core.fields import NestedField, UnionField from azure.ai.ml._schema.automl.featurization_settings import NlpFeaturizationSettingsSchema -from azure.ai.ml._schema.job.input_output_fields_provider import InputsField from azure.ai.ml._schema.automl.nlp_vertical.nlp_vertical_limit_settings import NlpLimitsSchema +from azure.ai.ml._schema.job.input_output_entry import InputSchema class NlpVerticalSchema(AutoMLVerticalSchema): limits = NestedField(NlpLimitsSchema()) featurization = NestedField(NlpFeaturizationSettingsSchema(), data_key=AutoMLConstants.FEATURIZATION_YAML) - validation_data = InputsField() + validation_data = UnionField([NestedField(InputSchema)]) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/table_vertical/table_vertical.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/table_vertical/table_vertical.py index c505ef8e2cfc..2109be5d9385 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/table_vertical/table_vertical.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/table_vertical/table_vertical.py @@ -1,13 +1,12 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - from azure.ai.ml.constants import AutoMLConstants from azure.ai.ml._schema.core.fields import fields, NestedField, StringTransformedEnum, UnionField from azure.ai.ml._schema.automl.automl_vertical import AutoMLVerticalSchema from azure.ai.ml._schema.automl.featurization_settings import TableFeaturizationSettingsSchema from azure.ai.ml._schema.automl.table_vertical.table_vertical_limit_settings import AutoMLTableLimitsSchema -from azure.ai.ml._schema.job.input_output_fields_provider import InputsField +from azure.ai.ml._schema.job.input_output_entry import InputSchema from azure.ai.ml._restclient.v2022_02_01_preview.models import ( NCrossValidationsMode, ) @@ -17,7 +16,7 @@ class AutoMLTableVerticalSchema(AutoMLVerticalSchema): limits = NestedField(AutoMLTableLimitsSchema(), data_key=AutoMLConstants.LIMITS_YAML) featurization = NestedField(TableFeaturizationSettingsSchema(), data_key=AutoMLConstants.FEATURIZATION_YAML) target_column_name = fields.Str(required=True) - validation_data = InputsField() + validation_data = UnionField([NestedField(InputSchema)]) validation_data_size = fields.Float() cv_split_column_names = fields.List(fields.Str()) n_cross_validations = UnionField( @@ -27,5 +26,5 @@ class AutoMLTableVerticalSchema(AutoMLVerticalSchema): ], ) weight_column_name = fields.Str() - test_data = InputsField() + test_data = UnionField([NestedField(InputSchema)]) test_data_size = fields.Float() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/command_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/command_component.py index d26a1a586a31..ce43d3bf5b56 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/command_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/command_component.py @@ -5,8 +5,7 @@ from copy import deepcopy from marshmallow import fields, post_load, INCLUDE from azure.ai.ml._schema import StringTransformedEnum, UnionField, NestedField, ArmVersionedStr -from azure.ai.ml._schema.assets.code_asset import AnonymousCodeAssetSchema -from azure.ai.ml._schema.core.fields import FileRefField, RegistryStr, LocalPathField, SerializeValidatedUrl +from azure.ai.ml._schema.core.fields import FileRefField, RegistryStr, LocalPathField, SerializeValidatedUrl, GitStr from azure.ai.ml._schema.assets.asset import AnonymousAssetSchema from azure.ai.ml._schema.component.component import BaseComponentSchema from azure.ai.ml._schema.component.resource import ComponentResourceSchema @@ -27,6 +26,8 @@ class CommandComponentSchema(BaseComponentSchema): SerializeValidatedUrl(), LocalPathField(), RegistryStr(azureml_type=AzureMLResourceType.CODE), + # Accept str to support git paths + GitStr(), # put arm versioned string at last order as it can deserialize any string into "azureml:" ArmVersionedStr(azureml_type=AzureMLResourceType.CODE), ], diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/fields.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/fields.py index 677421b3d296..280cd478cd8a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/fields.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/fields.py @@ -27,7 +27,7 @@ REGISTRY_URI_FORMAT, ) -from azure.ai.ml._schema import PathAwareSchema +from azure.ai.ml._schema import PathAwareSchema, YamlFileSchema from azure.ai.ml._utils._arm_id_utils import ( AMLVersionedArmId, is_ARM_id_for_resource, @@ -42,8 +42,8 @@ from marshmallow import RAISE, fields from marshmallow.exceptions import ValidationError from marshmallow.fields import Field, Nested, _T -from marshmallow.utils import FieldInstanceResolutionError, resolve_field_instance - +from marshmallow.utils import FieldInstanceResolutionError, resolve_field_instance, from_iso_datetime +from azure.ai.ml.entities._job.pipeline._attr_dict import try_get_non_arbitrary_attr_for_potential_attr_dict module_logger = logging.getLogger(__name__) @@ -166,6 +166,28 @@ def _validate(self, value): raise ValidationError(f"Value passed is not a data binding string: {value}") +class DateTimeStr(fields.Str): + def _jsonschema_type_mapping(self): + schema = {"type": "string"} + if self.name is not None: + schema["title"] = self.name + if self.dump_only: + schema["readonly"] = True + return schema + + def _serialize(self, value, attr, obj, **kwargs): + if value is None: + return None + self._validate(value) + return super(DateTimeStr, self)._serialize(value, attr, obj, **kwargs) + + def _validate(self, value): + try: + from_iso_datetime(value) + except Exception: + raise ValidationError(f"Not a valid ISO8601-formatted datetime string: {value}") + + class ArmStr(Field): def __init__(self, **kwargs): super().__init__(**kwargs) @@ -337,18 +359,23 @@ def __init__(self, union_fields: List[fields.Field], **kwargs): @property def union_fields(self): - return self._union_fields + return iter(self._union_fields) + + def insert_union_field(self, field): + self._union_fields.insert(0, field) # This sets the parent for the schema and also handles nesting. def _bind_to_schema(self, field_name, schema): super()._bind_to_schema(field_name, schema) + self._union_fields = self._create_bind_fields(self._union_fields, field_name) + + def _create_bind_fields(self, _fields, field_name): new_union_fields = [] - for field in self._union_fields: + for field in _fields: field = copy.deepcopy(field) field._bind_to_schema(field_name, self) new_union_fields.append(field) - - self._union_fields = new_union_fields + return new_union_fields def _serialize(self, value, attr, obj, **kwargs): if value is None: @@ -370,6 +397,10 @@ def _deserialize(self, value, attr, data, **kwargs): try: return schema.deserialize(value, attr, data, **kwargs) except ValidationError as e: + errors.append(e.normalized_messages()) + except (ValidationException, FileNotFoundError) as e: + errors.append([str(e)]) + finally: # Revert base path to original path when job schema fail to deserialize job. For example, when load # parallel job with component file reference starting with FILE prefex, maybe first CommandSchema will # load component yaml according to AnonymousCommandComponentSchema, and YamlFileSchema will update base @@ -386,10 +417,136 @@ def _deserialize(self, value, attr, data, **kwargs): schema.schema.context[BASE_PATH_CONTEXT_KEY] = schema.schema.old_base_path # recover base path of parent schema schema.context[BASE_PATH_CONTEXT_KEY] = schema.schema.context[BASE_PATH_CONTEXT_KEY] - errors.append(e.normalized_messages()) raise ValidationError(errors, field_name=attr) +class TypeSensitiveUnionField(UnionField): + """Union field which will try to simplify error messages based on type field in failed + serialization/deserialization. + If value doesn't have type, will skip error messages from fields with type field + If value has type & its type doesn't match any allowed types, raise "Value {} not in set {}" + If value has type & its type matches at least 1 allowed value, it will raise the first matched error. + """ + + def __init__( + self, + type_sensitive_fields_dict: typing.Dict[str, List[fields.Field]], + *, + plain_union_fields: List[fields.Field] = None, + allow_load_from_file: bool = True, + type_field_name="type", + **kwargs, + ): + """ + param type_sensitive_fields_dict: a dict of type name to list of type sensitive fields + param plain_union_fields: list of fields that will be used if value doesn't have type field + type plain_union_fields: List[fields.Field] + param allow_load_from_file: whether to allow load from file, default to True + type allow_load_from_file: bool + param type_field_name: field name of type field, default value is "type" + type type_field_name: str + """ + self._type_sensitive_fields_dict = {} + self._allow_load_from_yaml = allow_load_from_file + + union_fields = plain_union_fields or [] + for type_name, type_sensitive_fields in type_sensitive_fields_dict.items(): + union_fields.extend(type_sensitive_fields) + self._type_sensitive_fields_dict[type_name] = [ + resolve_field_instance(cls_or_instance) for cls_or_instance in type_sensitive_fields + ] + + super(TypeSensitiveUnionField, self).__init__(union_fields, **kwargs) + self._type_field_name = type_field_name + + def _bind_to_schema(self, field_name, schema): + super()._bind_to_schema(field_name, schema) + for type_name, type_sensitive_fields in self._type_sensitive_fields_dict.items(): + self._type_sensitive_fields_dict[type_name] = self._create_bind_fields(type_sensitive_fields, field_name) + + @property + def type_field_name(self) -> str: + return self._type_field_name + + @property + def allowed_types(self) -> List[str]: + return list(self._type_sensitive_fields_dict.keys()) + + def _raise_simplified_error_base_on_type(self, e, value, attr): + """ + If value doesn't have type, raise original error; + If value has type & its type doesn't match any allowed types, raise "Value {} not in set {}"; + If value has type & its type matches at least 1 field, return the first matched error message; + """ + value_type = try_get_non_arbitrary_attr_for_potential_attr_dict(value, self.type_field_name) + if value_type is None: + # if value has no type field, raise original error + raise e + elif value_type not in self.allowed_types: + # if value has type field but its value doesn't match any allowed value, raise ValidationError directly + raise ValidationError( + message={self.type_field_name: f"Value {value_type} passed is not in set {self.allowed_types}"}, + field_name=attr, + ) + else: + # if value has type field and its value match at least 1 allowed value, raise first matched + for error in e.messages: + # for non-nested schema, their error message will be {"_schema": ["xxx"]} + if len(error) == 1 and "_schema" in error: + continue + # for nested schema, type field won't be within error only if type field value is matched + # then return first matched error message + if self.type_field_name not in error: + raise ValidationError(message=error, field_name=attr) + # shouldn't reach here + raise e + + def _serialize(self, value, attr, obj, **kwargs): + union_fields = self._union_fields[:] + value_type = try_get_non_arbitrary_attr_for_potential_attr_dict(value, self.type_field_name) + if value_type is not None and value_type in self.allowed_types: + target_fields = self._type_sensitive_fields_dict[value_type] + if len(target_fields) == 1: + return target_fields[0]._serialize(value, attr, obj, **kwargs) + else: + self._union_fields = target_fields + + try: + return super(TypeSensitiveUnionField, self)._serialize(value, attr, obj, **kwargs) + except ValidationError as e: + self._raise_simplified_error_base_on_type(e, value, attr) + finally: + self._union_fields = union_fields + + def _try_load_from_yaml(self, value): + target_path = value + if target_path.startswith(FILE_PREFIX): + target_path = target_path[len(FILE_PREFIX) :] + try: + import yaml + + base_path = Path(self.context[BASE_PATH_CONTEXT_KEY]) + target_path = Path(target_path) + if not target_path.is_absolute(): + target_path = base_path / target_path + target_path.resolve() + if target_path.is_file(): + self.context[BASE_PATH_CONTEXT_KEY] = target_path.parent + with target_path.open() as f: + return yaml.safe_load(f) + except Exception: + pass + return value + + def _deserialize(self, value, attr, data, **kwargs): + try: + return super(TypeSensitiveUnionField, self)._deserialize(value, attr, data, **kwargs) + except ValidationError as e: + if isinstance(value, str) and self._allow_load_from_yaml: + value = self._try_load_from_yaml(value) + self._raise_simplified_error_base_on_type(e, value, attr) + + def ComputeField(**kwargs): """ :param required : if set to True, it is not possible to pass None @@ -507,7 +664,6 @@ class PythonFuncNameStr(fields.Str): @abstractmethod def _get_field_name(self) -> str: """Returns field name, used for error message.""" - pass def _deserialize(self, value, attr, data, **kwargs) -> typing.Any: """Validate component name""" @@ -524,7 +680,6 @@ class PipelineNodeNameStr(fields.Str): @abstractmethod def _get_field_name(self) -> str: """Returns field name, used for error message.""" - pass def _deserialize(self, value, attr, data, **kwargs) -> typing.Any: """Validate component name""" @@ -534,3 +689,33 @@ def _deserialize(self, value, attr, data, **kwargs) -> typing.Any: f"{self._get_field_name()} name should be a valid python identifier(lower letters, numbers, underscore and start with a letter or underscore). Currently got {name}." ) return name + + +class GitStr(fields.Str): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _jsonschema_type_mapping(self): + schema = {"type": "string", "pattern": "^git+"} + if self.name is not None: + schema["title"] = self.name + if self.dump_only: + schema["readonly"] = True + return schema + + def _serialize(self, value, attr, obj, **kwargs): + if isinstance(value, str) and value.startswith("git+"): + return f"{value}" + elif value is None and not self.required: + return None + else: + raise ValidationError(f"Non-string passed to GitStr for {attr}") + + def _deserialize(self, value, attr, data, **kwargs): + if isinstance(value, str) and value.startswith("git+"): + name = value + return name + else: + raise ValidationError( + "In order to specify a git path, please provide the correct path prefixed with 'git+\n" + ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/input_output_fields_provider.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/input_output_fields_provider.py index 1c4e2980de64..9fc2a5abe9dd 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/input_output_fields_provider.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/input_output_fields_provider.py @@ -12,28 +12,23 @@ def InputsField(**kwargs): - return UnionField( - [ - NestedField(InputSchema), - fields.Dict( - keys=fields.Str(), - values=UnionField( - [ - # By default when strict is false, marshmallow downcasts float to int. - # Setting it to true will throw a validation error and try the next types in list. - # https://github.com/marshmallow-code/marshmallow/pull/755 - NestedField(InputSchema), - NestedField(InputLiteralValueSchema), - fields.Int(strict=True), - fields.Str(), - fields.Bool(), - fields.Float(), - ], - metadata={"description": "Inputs to a job."}, - **kwargs - ), - ), - ] + return fields.Dict( + keys=fields.Str(), + values=UnionField( + [ + # By default when strict is false, marshmallow downcasts float to int. + # Setting it to true will throw a validation error and try the next types in list. + # https://github.com/marshmallow-code/marshmallow/pull/755 + NestedField(InputSchema), + NestedField(InputLiteralValueSchema), + fields.Int(strict=True), + fields.Str(), + fields.Bool(), + fields.Float(), + ], + metadata={"description": "Inputs to a job."}, + **kwargs + ), ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/parameterized_command.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/parameterized_command.py index 7c146b9fd58f..d61b644ae01f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/parameterized_command.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/parameterized_command.py @@ -10,7 +10,7 @@ from ..assets.code_asset import AnonymousCodeAssetSchema from ..assets.environment import AnonymousEnvironmentSchema -from ..core.fields import ArmVersionedStr, UnionField, RegistryStr, LocalPathField, SerializeValidatedUrl +from ..core.fields import ArmVersionedStr, UnionField, RegistryStr, LocalPathField, SerializeValidatedUrl, GitStr from .distribution import MPIDistributionSchema, PyTorchDistributionSchema, TensorFlowDistributionSchema @@ -22,7 +22,7 @@ class ParameterizedCommandSchema(PathAwareSchema): required=True, ) code = UnionField( - [LocalPathField, SerializeValidatedUrl(), ArmVersionedStr(azureml_type=AzureMLResourceType.CODE)], + [LocalPathField, SerializeValidatedUrl(), GitStr(), ArmVersionedStr(azureml_type=AzureMLResourceType.CODE)], metadata={"description": "A local path or http:, https:, azureml: url pointing to a remote location."}, ) environment = UnionField( diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/automl_node.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/automl_node.py index baaa4d094644..ec3d9e910b63 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/automl_node.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/automl_node.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from marshmallow import fields, Schema, pre_dump, post_dump, INCLUDE +from marshmallow import fields, pre_dump, post_dump from pydash import get from azure.ai.ml._schema import PathAwareSchema @@ -18,9 +18,8 @@ from azure.ai.ml._schema.automl.nlp_vertical.text_classification import TextClassificationSchema from azure.ai.ml._schema.automl.nlp_vertical.text_classification_multilabel import TextClassificationMultilabelSchema from azure.ai.ml._schema.automl.nlp_vertical.text_ner import TextNerSchema -from azure.ai.ml._schema.core.fields import ComputeField, UnionField, NestedField, StringTransformedEnum, ArmStr -from azure.ai.ml._schema.job.input_output_entry import OutputSchema -from azure.ai.ml._schema.job.input_output_fields_provider import InputsField +from azure.ai.ml._schema.core.fields import ComputeField, UnionField, NestedField +from azure.ai.ml._schema.job.input_output_entry import InputSchema, OutputSchema from azure.ai.ml._schema.pipeline.pipeline_job_io import OutputBindingStr @@ -87,56 +86,56 @@ def resolve_nested_data(self, job_dict: dict, job: "AutoMLJob", **kwargs): class AutoMLClassificationNodeSchema(AutoMLNodeMixin, AutoMLClassificationSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) - test_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) + test_data = UnionField([fields.Str(), NestedField(InputSchema)]) class AutoMLRegressionNodeSchema(AutoMLNodeMixin, AutoMLRegressionSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) - test_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) + test_data = UnionField([fields.Str(), NestedField(InputSchema)]) class AutoMLForecastingNodeSchema(AutoMLNodeMixin, AutoMLForecastingSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) - test_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) + test_data = UnionField([fields.Str(), NestedField(InputSchema)]) class AutoMLTextClassificationNode(AutoMLNodeMixin, TextClassificationSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) class AutoMLTextClassificationMultilabelNode(AutoMLNodeMixin, TextClassificationMultilabelSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) class AutoMLTextNerNode(AutoMLNodeMixin, TextNerSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) class ImageClassificationMulticlassNodeSchema(AutoMLNodeMixin, ImageClassificationSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) class ImageClassificationMultilabelNodeSchema(AutoMLNodeMixin, ImageClassificationMultilabelSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) class ImageObjectDetectionNodeSchema(AutoMLNodeMixin, ImageObjectDetectionSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) class ImageInstanceSegmentationNodeSchema(AutoMLNodeMixin, ImageInstanceSegmentationSchema): - training_data = UnionField([fields.Str(), InputsField()]) - validation_data = UnionField([fields.Str(), InputsField()]) + training_data = UnionField([fields.Str(), NestedField(InputSchema)]) + validation_data = UnionField([fields.Str(), NestedField(InputSchema)]) def AutoMLNodeSchema(**kwargs): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/component_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/component_job.py index 4625d207d75a..8e81c5445e09 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/component_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/component_job.py @@ -33,7 +33,7 @@ from .._sweep.parameterized_sweep import ParameterizedSweepSchema from .._utils.data_binding_expression import support_data_binding_expression_for_fields -from ..core.fields import ComputeField, StringTransformedEnum +from ..core.fields import ComputeField, StringTransformedEnum, TypeSensitiveUnionField from ..job import ParameterizedCommandSchema, ParameterizedParallelSchema from ..job.distribution import PyTorchDistributionSchema, TensorFlowDistributionSchema, MPIDistributionSchema from ..job.job_limits import CommandJobLimitsSchema @@ -89,16 +89,20 @@ def _resolve_inputs_outputs(job): class CommandSchema(BaseNodeSchema, ParameterizedCommandSchema): - component = UnionField( - [ + component = TypeSensitiveUnionField( + { + NodeType.COMMAND: [ + # inline component or component file reference starting with FILE prefix + NestedField(AnonymousCommandComponentSchema, unknown=INCLUDE), + # component file reference + ComponentFileRefField(), + ], + }, + plain_union_fields=[ # for registry type assets RegistryStr(), # existing component ArmVersionedStr(azureml_type=AzureMLResourceType.COMPONENT, allow_default_version=True), - # inline component or component file reference starting with FILE prefix - NestedField(AnonymousCommandComponentSchema, unknown=INCLUDE), - # component file reference - ComponentFileRefField(), ], required=True, ) @@ -113,6 +117,7 @@ class CommandSchema(BaseNodeSchema, ParameterizedCommandSchema): ) environment = UnionField( [ + RegistryStr(azureml_type=AzureMLResourceType.ENVIRONMENT), NestedField(AnonymousEnvironmentSchema), ArmVersionedStr(azureml_type=AzureMLResourceType.ENVIRONMENT, allow_default_version=True), ], @@ -162,14 +167,18 @@ def resolve_code_path(self, data, original_data, **kwargs): class SweepSchema(BaseNodeSchema, ParameterizedSweepSchema): type = StringTransformedEnum(allowed_values=[NodeType.SWEEP]) - trial = UnionField( - [ + trial = TypeSensitiveUnionField( + { + NodeType.SWEEP: [ + # inline component or component file reference starting with FILE prefix + NestedField(AnonymousCommandComponentSchema, unknown=INCLUDE), + # component file reference + ComponentFileRefField(), + ], + }, + plain_union_fields=[ # existing component ArmVersionedStr(azureml_type=AzureMLResourceType.COMPONENT, allow_default_version=True), - # inline component or component file reference starting with FILE prefix - NestedField(AnonymousCommandComponentSchema, unknown=INCLUDE), - # component file reference - ComponentFileRefField(), ], required=True, ) @@ -191,16 +200,20 @@ def resolve_inputs_outputs(self, job, **kwargs): class ParallelSchema(BaseNodeSchema, ParameterizedParallelSchema): - component = UnionField( - [ + component = TypeSensitiveUnionField( + { + NodeType.PARALLEL: [ + # inline component or component file reference starting with FILE prefix + NestedField(AnonymousParallelComponentSchema, unknown=INCLUDE), + # component file reference + ParallelComponentFileRefField(), + ], + }, + plain_union_fields=[ # for registry type assets RegistryStr(), # existing component ArmVersionedStr(azureml_type=AzureMLResourceType.COMPONENT, allow_default_version=True), - # inline component or component file reference starting with FILE prefix - NestedField(AnonymousParallelComponentSchema, unknown=INCLUDE), - # component file reference - ParallelComponentFileRefField(), ], required=True, ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_command_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_command_job.py index 40d1a109b12c..b6ee23476568 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_command_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_command_job.py @@ -8,7 +8,7 @@ from azure.ai.ml._schema.job.command_job import CommandJobSchema from azure.ai.ml._schema.job.input_output_entry import OutputSchema from azure.ai.ml.constants import AzureMLResourceType -from azure.ai.ml._schema.core.fields import ComputeField, ArmVersionedStr +from azure.ai.ml._schema.core.fields import ComputeField, ArmVersionedStr, RegistryStr from azure.ai.ml._schema.assets.environment import AnonymousEnvironmentSchema from marshmallow import fields, post_load @@ -19,6 +19,7 @@ class PipelineCommandJobSchema(CommandJobSchema): compute = ComputeField() environment = UnionField( [ + RegistryStr(azureml_type=AzureMLResourceType.ENVIRONMENT), NestedField(AnonymousEnvironmentSchema), ArmVersionedStr(azureml_type=AzureMLResourceType.ENVIRONMENT, allow_default_version=True), ], diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_job.py index 13fdccfbb049..4be6c8168bff 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_job.py @@ -4,7 +4,7 @@ import logging from marshmallow import INCLUDE -from azure.ai.ml.constants import JobType +from azure.ai.ml.constants import JobType, NodeType from azure.ai.ml._schema import NestedField, StringTransformedEnum, UnionField from azure.ai.ml._schema.job import BaseJobSchema from azure.ai.ml._schema.job.input_output_fields_provider import InputsField, OutputsField @@ -17,7 +17,7 @@ _resolve_inputs_outputs, ) from marshmallow import fields, post_load, pre_dump -from azure.ai.ml._schema.core.fields import ComputeField, PipelineNodeNameStr +from azure.ai.ml._schema.core.fields import ComputeField, PipelineNodeNameStr, TypeSensitiveUnionField from azure.ai.ml._schema.pipeline.pipeline_command_job import PipelineCommandJobSchema from azure.ai.ml._schema.pipeline.pipeline_parallel_job import PipelineParallelJobSchema from azure.ai.ml._schema.schedule.schedule import CronScheduleSchema, RecurrenceScheduleSchema @@ -35,16 +35,17 @@ class PipelineJobSchema(BaseJobSchema): type = StringTransformedEnum(allowed_values=[JobType.PIPELINE]) jobs = fields.Dict( keys=NodeNameStr(), - values=UnionField( - [ - NestedField(CommandSchema, unknown=INCLUDE), - NestedField(SweepSchema, unknown=INCLUDE), - # ParallelSchema support parallel pipeline yml with "component" - NestedField(ParallelSchema, unknown=INCLUDE), - NestedField(PipelineCommandJobSchema), - AutoMLNodeSchema(unknown=INCLUDE), - NestedField(PipelineParallelJobSchema, unknown=INCLUDE), - ] + values=TypeSensitiveUnionField( + { + NodeType.COMMAND: [NestedField(CommandSchema, unknown=INCLUDE), NestedField(PipelineCommandJobSchema)], + NodeType.SWEEP: [NestedField(SweepSchema, unknown=INCLUDE)], + NodeType.PARALLEL: [ + # ParallelSchema support parallel pipeline yml with "component" + NestedField(ParallelSchema, unknown=INCLUDE), + NestedField(PipelineParallelJobSchema, unknown=INCLUDE), + ], + NodeType.AUTOML: [AutoMLNodeSchema(unknown=INCLUDE)], + } ), ) compute = ComputeField() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_parallel_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_parallel_job.py index 85cce308e842..9cd109c6956a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_parallel_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/pipeline/pipeline_parallel_job.py @@ -4,13 +4,12 @@ import logging from typing import Any -from marshmallow import fields, post_load, INCLUDE +from marshmallow import fields, post_load from azure.ai.ml._schema.core.fields import NestedField, UnionField from azure.ai.ml._schema.job.input_output_entry import OutputSchema from azure.ai.ml._schema.job.parallel_job import ParallelJobSchema -from azure.ai.ml._schema._deployment.batch.batch_deployment_settings import BatchRetrySettingsSchema -from azure.ai.ml._schema.core.fields import ComputeField, ArmVersionedStr +from azure.ai.ml._schema.core.fields import ComputeField, ArmVersionedStr, RegistryStr from azure.ai.ml._schema.assets.environment import AnonymousEnvironmentSchema from azure.ai.ml.constants import AzureMLResourceType @@ -21,6 +20,7 @@ class PipelineParallelJobSchema(ParallelJobSchema): compute = ComputeField() environment = UnionField( [ + RegistryStr(azureml_type=AzureMLResourceType.ENVIRONMENT), NestedField(AnonymousEnvironmentSchema), ArmVersionedStr(azureml_type=AzureMLResourceType.ENVIRONMENT, allow_default_version=True), ], diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/schedule/schedule.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/schedule/schedule.py index 97de18c932c7..e86da08297c7 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/schedule/schedule.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/schedule/schedule.py @@ -5,7 +5,13 @@ from marshmallow import fields, pre_dump, post_load, validate, ValidationError, post_dump from azure.ai.ml.constants import TimeZone from azure.ai.ml._schema.core.schema import PatchedSchemaMeta -from azure.ai.ml._schema.core.fields import StringTransformedEnum, NestedField, UnionField, DumpableIntegerField +from azure.ai.ml._schema.core.fields import ( + StringTransformedEnum, + NestedField, + UnionField, + DumpableIntegerField, + DateTimeStr, +) from azure.ai.ml._restclient.v2022_02_01_preview.models import ( ScheduleStatus, ScheduleType, @@ -16,7 +22,8 @@ class ScheduleSchema(metaclass=PatchedSchemaMeta): status = StringTransformedEnum(allowed_values=[o.value for o in ScheduleStatus]) - start_time = fields.DateTime() + start_time = UnionField([fields.DateTime(), DateTimeStr()]) + end_time = UnionField([fields.DateTime(), DateTimeStr()]) time_zone = fields.Str(validate=validate.OneOf([o.value for o in TimeZone])) @post_dump(pass_original=True) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_arm_id_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_arm_id_utils.py index fc39d5a01638..556971bd45db 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_arm_id_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_arm_id_utils.py @@ -5,9 +5,6 @@ import logging import re from typing import Any, Optional, Tuple, Union - -from marshmallow import ValidationError - from azure.ai.ml._scope_dependent_operations import OperationScope from azure.ai.ml.constants import ( ARM_ID_PREFIX, @@ -22,7 +19,6 @@ PROVIDER_RESOURCE_ID_WITH_VERSION, LEVEL_ONE_NAMED_RESOURCE_ID_FORMAT, REGISTRY_VERSION_PATTERN, - AzureMLResourceType, ) from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_asset_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_asset_utils.py index f0baeac170b0..6f91b83c3122 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_asset_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_asset_utils.py @@ -8,20 +8,28 @@ from typing import TYPE_CHECKING, Tuple, Union, Optional, List, Iterable, Dict, Any, cast from pathlib import Path import hashlib -from azure.ai.ml.entities._assets.asset import Asset +from contextlib import suppress +from colorama import Fore import pathspec from tqdm import tqdm, TqdmWarning import warnings from platform import system +from multiprocessing import cpu_count +from concurrent.futures import ThreadPoolExecutor, as_completed from azure.ai.ml._artifacts._constants import ( CHUNK_SIZE, ARTIFACT_ORIGIN, UPLOAD_CONFIRMATION, - HASH_ALGORITHM_NAME, AML_IGNORE_FILE_NAME, GIT_IGNORE_FILE_NAME, + EMPTY_DIRECTORY_ERROR, + PROCESSES_PER_CORE, + MAX_CONCURRENCY, + BLOB_STORAGE_CLIENT_NAME, + GEN2_STORAGE_CLIENT_NAME, ) +from azure.ai.ml.entities._assets.asset import Asset from azure.ai.ml._restclient.v2021_10_01.models import ( DatasetVersionData, ModelVersionData, @@ -37,10 +45,14 @@ ComponentVersionsOperations, ComponentContainersOperations, ) -from azure.ai.ml.constants import OrderString, MAX_AUTOINCREMENT_ATTEMPTS +from azure.ai.ml.constants import ( + OrderString, + MAX_AUTOINCREMENT_ATTEMPTS, +) from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError -from azure.ai.ml._utils.utils import retry +from azure.ai.ml._utils.utils import retry, convert_windows_path_to_unix from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget +from azure.ai.ml._utils._exception_utils import EmptyDirectoryError if TYPE_CHECKING: from azure.ai.ml.operations import ( @@ -182,6 +194,8 @@ def _get_dir_hash(directory: Union[str, Path], hash: hash_type, ignore_file: Ign if ignore_file.is_file_excluded(path): continue hash.update(path.name.encode()) + if os.path.islink(path): # ensure we're hashing the contents of the linked file + path = Path(os.readlink(convert_windows_path_to_unix(path))) if path.is_file(): hash = _get_file_hash(path, hash) elif path.is_dir(): @@ -213,6 +227,8 @@ def get_object_hash(path: Union[str, Path], ignore_file: IgnoreFile = IgnoreFile if Path(path).is_dir(): object_hash = _get_dir_hash(directory=path, hash=hash, ignore_file=ignore_file) else: + if os.path.islink(path): # ensure we're hashing the contents of the linked file + path = Path(os.readlink(convert_windows_path_to_unix(path))) object_hash = _get_file_hash(filename=path, hash=hash) return str(object_hash.hexdigest()) @@ -220,18 +236,80 @@ def get_object_hash(path: Union[str, Path], ignore_file: IgnoreFile = IgnoreFile def traverse_directory( root: str, files: List[str], source: str, prefix: str, ignore_file: IgnoreFile = IgnoreFile() ) -> Iterable[Tuple[str, Union[str, Any]]]: - dir_parts = [os.path.relpath(root, source) for _ in files] - dir_parts = ["" if dir_part == "." else dir_part + "/" for dir_part in dir_parts] + """ + Enumerate all files in the given directory and compose paths for them to be uploaded to in the remote storage. + e.g. [/mnt/c/Users/dipeck/upload_files/my_file1.txt, /mnt/c/Users/dipeck/upload_files/my_file2.txt] --> + [(/mnt/c/Users/dipeck/upload_files/my_file1.txt, LocalUpload//upload_files/my_file1.txt), + (/mnt/c/Users/dipeck/upload_files/my_file2.txt, LocalUpload//upload_files/my_file2.txt))] + + :param root: Root directory path + :type root: str + :param files: List of all file paths in the directory + :type files: List[str] + :param source: Local path to project directory + :type source: str + :param prefix: Remote upload path for project directory (e.g. LocalUpload//project_dir) + :type prefix: str + :param ignore_file: The .amlignore or .gitignore file in the project directory + :type ignore_file: azure.ai.ml._utils._asset_utils.IgnoreFile + :return: Zipped list of tuples representing the local path and remote destination path for each file + :rtype: Iterable[Tuple[str, Union[str, Any]]] + """ + # Normalize Windows paths + root = convert_windows_path_to_unix(root) + source = convert_windows_path_to_unix(source) + working_dir = convert_windows_path_to_unix(os.getcwd()) + project_dir = root[len(str(working_dir)) :] + "/" + file_paths = [ + convert_windows_path_to_unix(os.path.join(root, name)) + for name in files + if not ignore_file.is_file_excluded(os.path.join(root, name)) + ] # get all files not excluded by the ignore file + file_paths_including_links = {fp: None for fp in file_paths} + + for path in file_paths: + target_prefix = "" + symlink_prefix = "" + + # check for symlinks to get their true paths + if os.path.islink(path): + target_absolute_path = os.path.join(working_dir, os.readlink(path)) + target_prefix = "/".join([root, str(os.readlink(path))]).replace(project_dir, "/") + + # follow and add child links if the directory is a symlink + if os.path.isdir(target_absolute_path): + symlink_prefix = path.replace(root + "/", "") + + for r, _, f in os.walk(target_absolute_path, followlinks=True): + target_file_paths = { + os.path.join(r, name): symlink_prefix + os.path.join(r, name).replace(target_prefix, "") + for name in f + } # for each symlink, store its target_path as key and symlink path as value + file_paths_including_links.update(target_file_paths) # Add discovered symlinks to file paths list + else: + file_path_info = { + target_absolute_path: path.replace(root + "/", "") + } # for each symlink, store its target_path as key and symlink path as value + file_paths_including_links.update(file_path_info) # Add discovered symlinks to file paths list + del file_paths_including_links[path] # Remove original symlink entry now that detailed entry has been added + else: + pass + file_paths = sorted( - [os.path.join(root, name) for name in files if not ignore_file.is_file_excluded(os.path.join(root, name))] - ) - blob_paths = sorted( - [ - prefix + dir_part + name - for (dir_part, name) in zip(dir_parts, files) - if not ignore_file.is_file_excluded(os.path.join(root, name)) - ] - ) + file_paths_including_links + ) # sort files to keep consistent order in case of repeat upload comparisons + dir_parts = [os.path.relpath(root, source) for _ in file_paths] + dir_parts = ["" if dir_part == "." else dir_part + "/" for dir_part in dir_parts] + blob_paths = [] + + for (dir_part, name) in zip(dir_parts, file_paths): + if file_paths_including_links.get( + name + ): # for symlinks, use symlink name and structure in directory to create remote upload path + blob_path = prefix + dir_part + file_paths_including_links.get(name) + else: + blob_path = prefix + dir_part + name.replace(root + "/", "") + blob_paths.append(blob_path) return zip(file_paths, blob_paths) @@ -247,16 +325,195 @@ def get_directory_size(root: os.PathLike) -> Tuple[int, Dict[str, int]]: """Returns total size of a directory and a dictionary itemizing each sub-path and its size.""" total_size = 0 size_list = {} - for dirpath, _, filenames in os.walk(root): + for dirpath, _, filenames in os.walk(root, followlinks=True): for name in filenames: full_path = os.path.join(dirpath, name) - if not os.path.islink(full_path): # symlinks aren't counted + if not os.path.islink(full_path): path_size = os.path.getsize(full_path) - size_list[full_path] = path_size - total_size += path_size + else: + path_size = os.path.getsize( + os.readlink(convert_windows_path_to_unix(full_path)) + ) # ensure we're counting the size of the linked file + size_list[full_path] = path_size + total_size += path_size return total_size, size_list +def upload_file( + storage_client: Union["BlobStorageClient", "Gen2StorageClient"], + source: str, + dest: str = None, + msg: Optional[str] = None, + size: int = 0, + show_progress: Optional[bool] = None, + in_directory: bool = False, + callback: Any = None, +) -> None: + """ + Upload a single file to remote storage. + + :param storage_client: Storage client object + :type storage_client: Union[azure.ai.ml._artifacts._blob_storage_helper.BlobStorageClient, azure.ai.ml._artifacts._gen2_storage_helper.Gen2StorageClient] + :param source: Local path to project directory + :type source: str + :param dest: Remote upload path for project directory (e.g. LocalUpload//project_dir) + :type dest: str + :param msg: Message to be shown with progress bar (e.g. "Uploading ") + :type msg: str + :param size: Size of the file in bytes + :type size: int + :param show_progress: Whether to show progress bar or not + :type show_progress: bool + :param in_directory: Whether the file is part of a directory of files + :type in_directory: bool + :param callback: Callback to progress bar + :type callback: Any + :return: None + """ + validate_content = size > 0 # don't do checksum for empty files + + if ( + type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME + ): # Only for Gen2StorageClient, Blob Storage doesn't have true directories + if in_directory: + storage_client.file_client = storage_client.sub_directory_client.create_file(source.split("/")[-1]) + else: + storage_client.file_client = storage_client.directory_client.create_file(source.split("/")[-1]) + + with open(source, "rb") as data: + if show_progress and not in_directory: + file_size, _ = get_directory_size(source) + file_size_in_mb = file_size / 10**6 + if file_size_in_mb < 1: + msg += Fore.GREEN + " (< 1 MB)" + else: + msg += Fore.GREEN + f" ({round(file_size_in_mb, 2)} MBs)" + cntx_manager = FileUploadProgressBar(msg=msg) + else: + cntx_manager = suppress() + + with cntx_manager as c: + callback = c.update_to if (show_progress and not in_directory) else None + if type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME: + storage_client.file_client.upload_data( + data=data.read(), + overwrite=True, + validate_content=validate_content, + raw_response_hook=callback, + max_concurrency=MAX_CONCURRENCY, + ) + elif type(storage_client).__name__ == BLOB_STORAGE_CLIENT_NAME: + storage_client.container_client.upload_blob( + name=dest, + data=data, + validate_content=validate_content, + overwrite=storage_client.overwrite, + raw_response_hook=callback, + max_concurrency=MAX_CONCURRENCY, + ) + + storage_client.uploaded_file_count += 1 + + +def upload_directory( + storage_client: Union["BlobStorageClient", "Gen2StorageClient"], + source: str, + dest: str, + msg: str, + show_progress: bool, + ignore_file: IgnoreFile, +) -> None: + """ + Upload directory to remote storage. + + :param storage_client: Storage client object + :type storage_client: Union[azure.ai.ml._artifacts._blob_storage_helper.BlobStorageClient, azure.ai.ml._artifacts._gen2_storage_helper.Gen2StorageClient] + :param source: Local path to project directory + :type source: str + :param dest: Remote upload path for project directory (e.g. LocalUpload//project_dir) + :type dest: str + :param msg: Message to be shown with progress bar (e.g. "Uploading ") + :type msg: str + :param show_progress: Whether to show progress bar or not + :type show_progress: bool + :param ignore_file: The .amlignore or .gitignore file in the project directory + :type ignore_file: azure.ai.ml._utils._asset_utils.IgnoreFile + :return: None + """ + source_path = Path(source).resolve() + prefix = "" if dest == "" else dest + "/" + prefix += os.path.basename(source_path) + "/" + + if ( + type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME + ): # Only for Gen2StorageClient, Blob Storage doesn't have true directories + storage_client.sub_directory_client = storage_client.directory_client.create_sub_directory( + prefix.strip("/").split("/")[-1] + ) + + # Enumerate all files in the given directory and compose paths for them to be uploaded to in the remote storage + upload_paths = [] + size_dict = {} + total_size = 0 + for root, _, files in os.walk(source_path, followlinks=True): + upload_paths += list(traverse_directory(root, files, source_path, prefix, ignore_file=ignore_file)) + + # Get each file's size for progress bar tracking + for path, _ in upload_paths: + if os.path.islink(path): + path_size = os.path.getsize( + os.readlink(convert_windows_path_to_unix(path)) + ) # ensure we're counting the size of the linked file + else: + path_size = os.path.getsize(path) + size_dict[path] = path_size + total_size += path_size + + upload_paths = sorted(upload_paths) + if len(upload_paths) == 0: + raise EmptyDirectoryError( + message=EMPTY_DIRECTORY_ERROR.format(source), + no_personal_data_message=msg.format("[source]"), + target=ErrorTarget.ARTIFACT, + error_category=ErrorCategory.USER_ERROR, + ) + storage_client.total_file_count = len(upload_paths) + + if ( + type(storage_client).__name__ == BLOB_STORAGE_CLIENT_NAME + ): # Only for Gen2StorageClient, Blob Storage doesn't have true directories + # Only for BlobStorageClient + # Azure Blob doesn't allow metadata setting at the directory level, so the first + # file in the directory is designated as the file where the confirmation metadata + # will be added at the end of the upload. + storage_client.indicator_file = upload_paths[0][1] + storage_client.check_blob_exists() + + # Submit paths to workers for upload + num_cores = int(cpu_count()) * PROCESSES_PER_CORE + with ThreadPoolExecutor(max_workers=num_cores) as ex: + futures_dict = { + ex.submit( + upload_file, + storage_client=storage_client, + source=src, + dest=dest, + size=size_dict.get(src), + in_directory=True, + show_progress=show_progress, + ): (src, dest) + for (src, dest) in upload_paths + } + if show_progress: + warnings.simplefilter("ignore", category=TqdmWarning) + msg += f" ({round(total_size/10**6, 2)} MBs)" + ascii = system() == "Windows" # Default unicode progress bar doesn't display well on Windows + with tqdm(total=total_size, desc=msg, ascii=ascii) as pbar: + for future in as_completed(futures_dict): + file_path_name = futures_dict[future][0] + pbar.update(size_dict.get(file_path_name) or 0) + + @retry( exceptions=ResourceExistsError, failure_msg="Asset creation exceeded maximum retries.", diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_azureml_polling.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_azureml_polling.py index 960e9ad741f3..1c4cbe4da751 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_azureml_polling.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_azureml_polling.py @@ -24,7 +24,6 @@ class AzureMLPolling(ARMPolling): def update_status(self): """Update the current status of the LRO.""" super(ARMPolling, self).update_status() - print(".", end="", flush=True) def polling_wait( diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_data_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_data_utils.py index a046b0e5defd..7306740948a0 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_data_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_data_utils.py @@ -23,7 +23,7 @@ module_logger = logging.getLogger(__name__) -def download_mltable_schema(mltable_schema_url: str): +def download_mltable_metadata_schema(mltable_schema_url: str): response = requests.get(mltable_schema_url, stream=True) return response.json() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_endpoint_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_endpoint_utils.py index bb56d40d6a74..d9128b44b0d2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_endpoint_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_endpoint_utils.py @@ -23,7 +23,7 @@ from azure.ai.ml.constants import AzureMLResourceType, LROConfigurations from azure.ai.ml.entities import BatchDeployment -from azure.ai.ml._utils._arm_id_utils import is_ARM_id_for_resource +from azure.ai.ml._utils._arm_id_utils import is_ARM_id_for_resource, is_registry_id_for_resource from azure.ai.ml.entities._assets._artifacts.code import Code from azure.ai.ml.operations._operation_orchestrator import OperationOrchestrator @@ -47,28 +47,27 @@ def polling_wait( :param (bool, optional) is_local: If poller is for a local endpoint, so the timeout is removed. :param (int, optional) timeout: New value to overwrite the default timeout. """ - module_logger.info(f"{message}") - + module_logger.warning(f"{message}") if is_local: """We removed timeout on local endpoints in case it takes a long time to pull image or install conda env. We want user to be able to see that. """ while not poller.done(): - module_logger.info(".") + module_logger.warning(".") time.sleep(LROConfigurations.SLEEP_TIME) else: poller.result(timeout=timeout) if poller.done(): - module_logger.info("Done ") + module_logger.warning("Done ") else: module_logger.warning("Timeout waiting for long running operation") if start_time: end_time = time.time() duration = divmod(int(round(end_time - start_time)), 60) - module_logger.info(f"({duration[0]}m {duration[1]}s)\n") + module_logger.warning(f"({duration[0]}m {duration[1]}s)\n") def local_endpoint_polling_wrapper(func: Callable, message: str, **kwargs) -> Any: @@ -131,23 +130,28 @@ def upload_dependencies(deployment: Deployment, orchestrators: OperationOrchestr module_logger.debug(f"Uploading the dependencies for deployment {deployment.name}") # Create a code asset if code is not already an ARM ID - if deployment.code_configuration and not is_ARM_id_for_resource( - deployment.code_configuration.code, AzureMLResourceType.CODE + if ( + deployment.code_configuration + and not is_ARM_id_for_resource(deployment.code_configuration.code, AzureMLResourceType.CODE) + and not is_registry_id_for_resource(deployment.code_configuration.code) ): deployment.code_configuration.code = orchestrators.get_asset_arm_id( Code(base_path=deployment._base_path, path=deployment.code_configuration.code), azureml_type=AzureMLResourceType.CODE, ) - deployment.environment = ( - orchestrators.get_asset_arm_id(deployment.environment, azureml_type=AzureMLResourceType.ENVIRONMENT) - if deployment.environment - else None - ) - deployment.model = ( - orchestrators.get_asset_arm_id(deployment.model, azureml_type=AzureMLResourceType.MODEL) - if deployment.model - else None - ) + + if not is_registry_id_for_resource(deployment.environment): + deployment.environment = ( + orchestrators.get_asset_arm_id(deployment.environment, azureml_type=AzureMLResourceType.ENVIRONMENT) + if deployment.environment + else None + ) + if not is_registry_id_for_resource(deployment.model): + deployment.model = ( + orchestrators.get_asset_arm_id(deployment.model, azureml_type=AzureMLResourceType.MODEL) + if deployment.model + else None + ) if isinstance(deployment, BatchDeployment) and deployment.compute: deployment.compute = orchestrators.get_asset_arm_id( deployment.compute, azureml_type=AzureMLResourceType.COMPUTE diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py index 9dc0d2e33085..4ddd6b5a6e2f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py @@ -7,6 +7,7 @@ import logging import os import re +from pathlib import PosixPath, PureWindowsPath import pydash import requests import sys @@ -636,3 +637,7 @@ def parse_args_description_from_docstring(docstring): args[arg] += " " + args_region[0] args_region.pop(0) return args + + +def convert_windows_path_to_unix(path: Union[str, PathLike]) -> PosixPath: + return PureWindowsPath(path).as_posix() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py index 7d39ccd89bdf..f38216c2a097 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py @@ -2,4 +2,4 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -VERSION = "0.1.0b4" +VERSION = "0.0.139" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py b/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py index 11b98af06597..966c42f41a35 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py @@ -58,6 +58,7 @@ "Only AmlCompute/KubernetesCompute cluster properties are supported, compute name {}, is {} type." ) MAX_AUTOINCREMENT_ATTEMPTS = 3 +REGISTRY_DISCOVERY_BASE_URI = "https://eastus.api.azureml.ms" REGISTRY_URI_REGEX_FORMAT = "azureml://registries/*" REGISTRY_URI_FORMAT = "azureml://registries/" REGISTRY_VERSION_PATTERN = "^azureml://registries/([^/]+)/([^/]+)/([^/]+)/versions/([^/]+)" @@ -95,7 +96,7 @@ API_URL_KEY = "api" ANONYMOUS_ENV_NAME = "CliV2AnonymousEnvironment" SKIP_VALIDATION_MESSAGE = "To skip this validation use the --skip-validation param" -MLTABLE_SCHEMA_URL_FALLBACK = "https://azuremlschemasprod.azureedge.net/latest/MLTable.schema.json" +MLTABLE_METADATA_SCHEMA_URL_FALLBACK = "https://azuremlschemasprod.azureedge.net/latest/MLTable.schema.json" INVOCATION_ZIP_FILE = "invocation.zip" INVOCATION_BAT_FILE = "Invocation.bat" INVOCATION_BASH_FILE = "Invocation.sh" @@ -110,6 +111,7 @@ } ANONYMOUS_COMPONENT_NAME = "azureml_anonymous" +GIT_PATH_PREFIX = "git+" class SearchSpace: @@ -326,6 +328,7 @@ class EndpointGetLogsFields(object): class CommonYamlFields(object): TYPE = "type" + NAME = "name" class JobComputePropertyFields(object): @@ -757,3 +760,21 @@ class TimeZone(str, Enum): TONGA__STANDARD_TIME = "Tonga Standard Time" SAMOA_STANDARD_TIME = "Samoa Standard Time" LINE_ISLANDS_STANDARD_TIME = "Line Islands Standard Time" + + +class IO_CONSTANTS: + PRIMITIVE_STR_2_TYPE = {"integer": int, "string": str, "number": float, "boolean": bool} + PRIMITIVE_TYPE_2_STR = {int: "integer", str: "string", float: "number", bool: "boolean"} + + # For validation, indicates specific parameters combination for each type + INPUT_TYPE_COMBINATION = { + "uri_folder": ["path", "mode"], + "uri_file": ["path", "mode"], + "mltable": ["path", "mode"], + "mlflow_model": ["path", "mode"], + "custom_model": ["path", "mode"], + "integer": ["default", "min", "max"], + "number": ["default", "min", "max"], + "string": ["default"], + "boolean": ["default"], + } diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py index 4eaf5ef72284..880488be0650 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py @@ -157,7 +157,7 @@ def _to_rest_object(self) -> ModelVersionData: def _update_path(self, asset_artifact: ArtifactStorageInfo) -> None: - # datastore_arm_id is nul for registry scenario, so capture the full_storage_path + # datastore_arm_id is null for registry scenario, so capture the full_storage_path if not asset_artifact.datastore_arm_id and asset_artifact.full_storage_path: self.path = asset_artifact.full_storage_path else: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py index 0bdf2e16eff2..4b7b14956a49 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py @@ -3,28 +3,28 @@ # --------------------------------------------------------- import logging import uuid +from enum import Enum from functools import wraps from abc import ABC, abstractmethod -from typing import Dict, Union, List -import pydash -from marshmallow import ValidationError -from azure.ai.ml._utils.utils import map_single_brackets_and_warn -from azure.ai.ml.constants import JobType, ComponentJobConstants, ComponentSource +from typing import Dict, Union, List, Optional +from azure.ai.ml._utils._arm_id_utils import get_resource_name_from_arm_id_safe +from azure.ai.ml.constants import JobType, ComponentSource from azure.ai.ml.entities._job.pipeline._attr_dict import _AttrDict -from azure.ai.ml.entities._job.pipeline._pipeline_job_helpers import process_sdk_component_job_io -from azure.ai.ml.entities._job.pipeline._io import InputsAttrDict, OutputsAttrDict, PipelineOutputBase, NodeIOMixin +from azure.ai.ml.entities._job.pipeline._io import ( + InputsAttrDict, + OutputsAttrDict, + PipelineOutputBase, + NodeIOMixin, + PipelineInput, +) from azure.ai.ml.entities._job.sweep.search_space import SweepDistribution from azure.ai.ml.entities._mixins import RestTranslatableMixin, YamlTranslatableMixin, TelemetryMixin from azure.ai.ml.entities._job._input_output_helpers import ( build_input_output, - to_rest_dataset_literal_inputs, - to_rest_data_outputs, - validate_inputs_for_command, ) -from azure.ai.ml.entities._job.pipeline._exceptions import UserErrorException -from azure.ai.ml.entities import Component, Job, CommandComponent +from azure.ai.ml.entities import Component, Job, ResourceConfiguration from azure.ai.ml.entities._inputs_outputs import Input, Output -from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory +from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget from azure.ai.ml.entities._util import convert_ordered_dict_to_dict from azure.ai.ml.entities._validation import SchemaValidatableMixin, ValidationResult @@ -66,9 +66,13 @@ class BaseNode( :type type: str :param component: Id or instance of the component version to be run for the step :type component: Union[Component, str] - :param name: Name of the command. + :param inputs: Inputs to the node. + :type inputs: Dict[str, Union[Input, SweepDistribution, str, bool, int, float, Enum, dict]] + :param outputs: Mapping of output data bindings used in the job. + :type outputs: Dict[str, Union[str, Output, dict]] + :param name: Name of the node. :type name: str - :param description: Description of the command. + :param description: Description of the node. :type description: str :param tags: Tag dictionary. Tags can be added, removed, and updated. :type tags: dict[str, str] @@ -87,6 +91,8 @@ def __init__( *, type: str = JobType.COMPONENT, component: Component, + inputs: Dict[str, Union[PipelineInput, PipelineOutputBase, Input, str, bool, int, float, Enum, "Input"]] = None, + outputs: Dict[str, Union[str, Output, "Output"]] = None, name: str = None, display_name: str = None, description: str = None, @@ -96,6 +102,30 @@ def __init__( experiment_name: str = None, **kwargs, ): + self._init = True + _from_component_func = kwargs.pop("_from_component_func", False) + + # initialize io + inputs, outputs = inputs or {}, outputs or {} + self._validate_io(inputs, self._get_supported_inputs_types(), Input) + self._validate_io(outputs, self._get_supported_outputs_types(), Output) + # parse empty dict to None so we won't pass default mode, type to backend + for k, v in inputs.items(): + if v == {}: + inputs[k] = None + + # TODO: get rid of self._job_inputs, self._job_outputs once we have unified Input + self._job_inputs, self._job_outputs = inputs, outputs + if isinstance(component, Component): + # Build the inputs from component input definition and given inputs, unfilled inputs will be None + self._inputs = self._build_inputs_dict(component.inputs, inputs or {}) + # Build the outputs from component output definition and given outputs, unfilled outputs will be None + self._outputs = self._build_outputs_dict(component.outputs, outputs or {}) + else: + # Build inputs/outputs dict without meta when definition not available + self._inputs = self._build_inputs_dict_without_meta(inputs or {}) + self._outputs = self._build_outputs_dict_without_meta(outputs or {}) + super(BaseNode, self).__init__(**kwargs) self.type = type self._component = component @@ -108,7 +138,52 @@ def __init__( self.experiment_name = experiment_name self.kwargs = kwargs + # Generate an id for every instance + self._instance_id = str(uuid.uuid4()) + if _from_component_func: + # add current component in pipeline stack for dsl scenario + self._register_in_current_pipeline_component_builder() + self._base_path = None # if _base_path is not + self._init = False + + @classmethod + def _get_supported_inputs_types(cls): + return None + + @classmethod + def _get_supported_outputs_types(cls): + return None + + @classmethod + def _validate_io(cls, io_dict: dict, allowed_types: Optional[tuple], parse_cls): + if allowed_types is None: + return + for key, value in io_dict.items(): + # output mode of last node should not affect input mode of next node + if isinstance(value, PipelineOutputBase): + # value = copy.deepcopy(value) + value = value._deepcopy() # Decoupled input and output + io_dict[key] = value + value.mode = None + if value is None or isinstance(value, allowed_types): + pass + elif isinstance(value, dict): + # parse dict to allowed type + io_dict[key] = parse_cls(**value) + else: + msg = "Expecting {} for input/output {}, got {} instead." + raise ValidationException( + message=msg.format(allowed_types, key, type(value)), + no_personal_data_message=msg.format(allowed_types, "[key]", type(value)), + target=ErrorTarget.PIPELINE, + ) + + def _initializing(self) -> bool: + # use this to indicate ongoing init process so all attributes set during init process won't be set as + # arbitrary attribute in _AttrDict + # TODO: replace this hack + return self._init def _set_base_path(self, base_path): """ @@ -138,8 +213,7 @@ def _get_component_name(self): return self._component.name def _to_dict(self) -> Dict: - # return dict instead of OrderedDict in case it will be further used in rest request - return convert_ordered_dict_to_dict(self._dump_for_validation()) + return self._dump_for_validation() @classmethod def _get_validation_error_target(cls) -> ErrorTarget: @@ -202,16 +276,30 @@ def _to_job(self) -> Job: @classmethod def _from_rest_object(cls, obj: dict) -> "BaseNode": - pass + from azure.ai.ml.entities._job.pipeline._load_component import pipeline_node_factory - def _node_specified_pre_to_rest_operations(self, rest_obj): + return pipeline_node_factory.load_from_rest_object(obj=obj) + + @classmethod + def _rest_object_to_init_params(cls, obj: dict): """ - Override this method to add custom operations on rest_obj before return it in self._to_rest_object(). + Transfer the rest object to a dict containing items to init the node. Will be used in _from_rest_object in + subclasses. """ - pass + inputs = obj.get("inputs", {}) + outputs = obj.get("outputs", {}) + + obj["inputs"] = BaseNode._from_rest_inputs(inputs) + obj["outputs"] = BaseNode._from_rest_outputs(outputs) + + # Change computeId -> compute + compute_id = obj.pop("computeId", None) + obj["compute"] = get_resource_name_from_arm_id_safe(compute_id) + + return obj @classmethod - def _picked_fields_in_to_rest(cls) -> List[str]: + def _picked_fields_from_dict_to_rest_object(cls) -> List[str]: """ Override this method to add custom fields to be picked from self._to_dict() in self._to_rest_object(). Pick nothing by default. @@ -221,12 +309,15 @@ def _picked_fields_in_to_rest(cls) -> List[str]: def _to_rest_object(self, **kwargs) -> dict: """ Convert self to a rest object for remote call. - It's not recommended to override this method. - Instead, override self._picked_fields_in_to_rest to pick serialized fields from self._to_dict(); - and override self._node_specified_pre_to_rest_operations to add custom operations on rest_obj before return it. """ - base_dict = pydash.pick(self._to_dict(), *self._picked_fields_in_to_rest()) - base_dict.update( + base_dict, rest_obj = self._to_dict(), {} + for key in self._picked_fields_from_dict_to_rest_object(): + if key not in base_dict: + rest_obj[key] = None + else: + rest_obj[key] = base_dict.get(key) + + rest_obj.update( dict( name=self.name, display_name=self.display_name, @@ -238,12 +329,8 @@ def _to_rest_object(self, **kwargs) -> dict: **self._get_attrs(), ) ) - self._node_specified_pre_to_rest_operations(base_dict) - # Convert current parameterized inputs/outputs to Inputs/Outputs. - # Note: this step must execute after self._validate(), validation errors will be thrown then when referenced - # component has not been resolved to arm id. - return convert_ordered_dict_to_dict(base_dict) + return convert_ordered_dict_to_dict(rest_obj) @property def inputs(self) -> InputsAttrDict: @@ -306,7 +393,7 @@ def _refine_optional_inputs_with_no_value(self, node, kwargs): meta = value._data if ( isinstance(meta, Input) - and meta._is_parameter_type is False + and meta._is_primitive_type is False and meta.optional is True and not meta.path and key not in kwargs diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py index fba458db219a..a2f919670f53 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py @@ -4,7 +4,6 @@ import copy import logging import os -import uuid from enum import Enum from typing import Dict, List, Optional, Union @@ -20,8 +19,9 @@ AmlToken, UserIdentity, CommandJobLimits as RestCommandJobLimits, + ResourceConfiguration as RestResourceConfiguration, ) -from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, NodeType +from azure.ai.ml.constants import NodeType from azure.ai.ml.entities._job.sweep.objective import Objective from azure.ai.ml.entities import ( @@ -33,13 +33,10 @@ CommandJob, ) from azure.ai.ml.entities._inputs_outputs import Input, Output -from azure.ai.ml._restclient.v2022_02_01_preview.models import ( - ResourceConfiguration as RestResourceConfiguration, -) from azure.ai.ml.entities._job.sweep.early_termination_policy import EarlyTerminationPolicy from azure.ai.ml.entities._job.sweep.search_space import SweepDistribution from .._job.pipeline._io import PipelineInput, PipelineOutputBase -from .._util import validate_attribute_type, get_rest_dict +from .._util import validate_attribute_type, get_rest_dict, convert_ordered_dict_to_dict from azure.ai.ml.entities._job.distribution import ( MpiDistribution, TensorFlowDistribution, @@ -49,7 +46,6 @@ from ..._schema import PathAwareSchema from ..._schema.job.distribution import PyTorchDistributionSchema, TensorFlowDistributionSchema, MPIDistributionSchema from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget -from ..._utils._arm_id_utils import get_resource_name_from_arm_id_safe module_logger = logging.getLogger(__name__) @@ -111,11 +107,13 @@ def __init__( # validate init params are valid type validate_attribute_type(attrs_to_check=locals(), attr_type_map=self._attr_type_map()) - self._init = True kwargs.pop("type", None) - _from_component_func = kwargs.pop("_from_component_func", False) - super(Command, self).__init__(type=NodeType.COMMAND, component=component, compute=compute, **kwargs) + BaseNode.__init__( + self, type=NodeType.COMMAND, inputs=inputs, outputs=outputs, component=component, compute=compute, **kwargs + ) + # init mark for _AttrDict + self._init = True # initialize command job properties self.limits = limits self.identity = identity @@ -128,10 +126,13 @@ def __init__( self.resources = self.resources or self.component.resources self.distribution = self.distribution or self.component.distribution - # initialize io - inputs, outputs = inputs or {}, outputs or {} + self._swept = False + self._init = False + + @classmethod + def _get_supported_inputs_types(cls): # when command node is constructed inside dsl.pipeline, inputs can be PipelineInput or Output of another node - supported_input_types = ( + return ( PipelineInput, PipelineOutputBase, Input, @@ -142,33 +143,10 @@ def __init__( float, Enum, ) - self._validate_io(inputs, supported_input_types, Input) - supported_output_types = (str, Output) - self._validate_io(outputs, supported_output_types, Output) - # parse empty dict to None so we won't pass default mode, type to backend - for k, v in inputs.items(): - if v == {}: - inputs[k] = None - # TODO: get rid of self._job_inputs, self._job_outputs once we have unified Input - self._job_inputs, self._job_outputs = inputs, outputs - if isinstance(component, Component): - # Build the inputs from component input definition and given inputs, unfilled inputs will be None - self._inputs = self._build_inputs_dict(component.inputs, inputs or {}) - # Build the outputs from component output definition and given outputs, unfilled outputs will be None - self._outputs = self._build_outputs_dict(component.outputs, outputs or {}) - else: - # Build inputs/outputs dict without meta when definition not available - self._inputs = self._build_inputs_dict_without_meta(inputs or {}) - self._outputs = self._build_outputs_dict_without_meta(outputs or {}) - - # Generate an id for every component instance - self._instance_id = str(uuid.uuid4()) - if _from_component_func: - # add current component in pipeline stack for dsl scenario - self._register_in_current_pipeline_component_builder() - self._swept = False - self._init = False + @classmethod + def _get_supported_outputs_types(cls): + return str, Output @property def distribution(self) -> Union[PyTorchDistribution, MpiDistribution, TensorFlowDistribution]: @@ -325,34 +303,6 @@ def sweep( ) return sweep_node - def _initializing(self) -> bool: - # use this to indicate ongoing init process so all attributes set during init process won't be set as - # arbitrary attribute in _AttrDict - # TODO: replace this hack - return self._init - - @classmethod - def _validate_io(cls, io_dict: dict, allowed_types: tuple, parse_cls): - for key, value in io_dict.items(): - # output mode of last node should not affect input mode of next node - if isinstance(value, PipelineOutputBase): - # value = copy.deepcopy(value) - value = value._deepcopy() # Decoupled input and output - io_dict[key] = value - value.mode = None - if value is None or isinstance(value, allowed_types): - pass - elif isinstance(value, dict): - # parse dict to allowed type - io_dict[key] = parse_cls(**value) - else: - msg = "Expecting {} for input/output {}, got {} instead." - raise ValidationException( - message=msg.format(allowed_types, key, type(value)), - no_personal_data_message=msg.format(allowed_types, "[key]", type(value)), - target=ErrorTarget.COMMAND_JOB, - ) - @classmethod def _attr_type_map(cls) -> dict: return { @@ -387,49 +337,45 @@ def _to_job(self) -> CommandJob: ) @classmethod - def _picked_fields_in_to_rest(cls) -> List[str]: + def _picked_fields_from_dict_to_rest_object(cls) -> List[str]: return ["resources", "distribution", "limits", "environment_variables"] - def _node_specified_pre_to_rest_operations(self, rest_obj): - for key in self._picked_fields_in_to_rest(): - if key not in rest_obj: - rest_obj[key] = None - + def _to_rest_object(self, **kwargs) -> dict: + rest_obj = super()._to_rest_object(**kwargs) rest_obj.update( - dict( - componentId=self._get_component_id(), - distribution=get_rest_dict(self.distribution), - limits=get_rest_dict(self.limits), - resources=get_rest_dict(self.resources, clear_empty_value=True), + convert_ordered_dict_to_dict( + dict( + componentId=self._get_component_id(), + distribution=get_rest_dict(self.distribution), + limits=get_rest_dict(self.limits), + resources=get_rest_dict(self.resources, clear_empty_value=True), + ) ) ) + return rest_obj @classmethod def _from_rest_object(cls, obj: dict) -> "Command": - inputs = obj.get("inputs", {}) - outputs = obj.get("outputs", {}) + obj = BaseNode._rest_object_to_init_params(obj) - obj["inputs"] = cls._from_rest_inputs(inputs) - obj["outputs"] = cls._from_rest_outputs(outputs) - - # resources + # resources, sweep won't have resources if "resources" in obj and obj["resources"]: resources = RestResourceConfiguration.from_dict(obj["resources"]) obj["resources"] = ResourceConfiguration._from_rest_object(resources) - # Change componentId -> component, computeId -> compute + # Change componentId -> component component_id = obj.pop("componentId", None) - compute_id = obj.pop("computeId", None) obj["component"] = component_id - obj["compute"] = get_resource_name_from_arm_id_safe(compute_id) - # distribution + # distribution, sweep won't have distribution if "distribution" in obj and obj["distribution"]: obj["distribution"] = DistributionConfiguration._from_rest_object(obj["distribution"]) + # handle limits if "limits" in obj and obj["limits"]: rest_limits = RestCommandJobLimits.from_dict(obj["limits"]) obj["limits"] = CommandJobLimits()._from_rest_object(rest_limits) + return Command(**obj) def _build_inputs(self): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py index 177a37010ef6..ea6edfc638a2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py @@ -3,7 +3,6 @@ # --------------------------------------------------------- import copy import logging -import uuid from typing import Dict, List, Union from enum import Enum import re @@ -11,45 +10,22 @@ from marshmallow import Schema from .base_node import BaseNode -from azure.ai.ml._restclient.v2022_02_01_preview.models import ( - JobInput as RestJobInput, - JobOutput as RestJobOutput, -) -from azure.ai.ml.constants import ( - ComponentJobConstants, - BASE_PATH_CONTEXT_KEY, - NodeType, - ComponentSource, - ARM_ID_PREFIX, -) - -from azure.ai.ml.entities._job._input_output_helpers import ( - to_rest_dataset_literal_inputs, - from_rest_inputs_to_dataset_literal, - to_rest_data_outputs, - from_rest_data_outputs, -) -from azure.ai.ml.entities._job.pipeline._pipeline_job_helpers import ( - process_sdk_component_job_io, - from_dict_to_rest_io, -) +from azure.ai.ml.constants import NodeType, ARM_ID_PREFIX from azure.ai.ml.entities import ( Component, ParallelComponent, ParallelJob, ResourceConfiguration, - Environment, ) from azure.ai.ml.entities._inputs_outputs import Input, Output -from azure.ai.ml._restclient.v2022_02_01_preview.models import ResourceConfiguration as RestResourceConfiguration +from .._job.distribution import DistributionConfiguration from .._job.pipeline._io import PipelineInput, PipelineOutputBase from azure.ai.ml.entities._deployment.deployment_settings import BatchRetrySettings from azure.ai.ml.entities._job.parallel.parallel_task import ParallelTask from azure.ai.ml.entities._job.parallel.retry_settings import RetrySettings -from azure.ai.ml.entities._job.parallel.parameterized_parallel import ParameterizedParallel from .._util import validate_attribute_type, convert_ordered_dict_to_dict, get_rest_dict from ..._schema import PathAwareSchema -from ..._utils._arm_id_utils import get_resource_name_from_arm_id_safe +from azure.ai.ml._restclient.v2022_02_01_preview.models import ResourceConfiguration as RestResourceConfiguration module_logger = logging.getLogger(__name__) @@ -116,11 +92,13 @@ def __init__( ): # validate init params are valid type validate_attribute_type(attrs_to_check=locals(), attr_type_map=self._attr_type_map()) - self._init = True kwargs.pop("type", None) - _from_component_func = kwargs.pop("_from_component_func", False) - BaseNode.__init__(self, type=NodeType.PARALLEL, component=component, compute=compute, **kwargs) + BaseNode.__init__( + self, type=NodeType.PARALLEL, component=component, inputs=inputs, outputs=outputs, compute=compute, **kwargs + ) + # init mark for _AttrDict + self._init = True self._task = task @@ -165,10 +143,12 @@ def __init__( self.mini_batch_size = self.mini_batch_size or self.component.mini_batch_size self._task = self._task or self.component.task - # initialize io - inputs, outputs = inputs or {}, outputs or {} + self._init = False + + @classmethod + def _get_supported_inputs_types(cls): # when command node is constructed inside dsl.pipeline, inputs can be PipelineInput or Output of another node - supported_input_types = ( + return ( PipelineInput, PipelineOutputBase, Input, @@ -178,32 +158,10 @@ def __init__( float, Enum, ) - self._validate_io(inputs, supported_input_types, Input) - supported_output_types = (str, Output) - self._validate_io(outputs, supported_output_types, Output) - # parse empty dict to None so we won't pass default mode, type to backend - for k, v in inputs.items(): - if v == {}: - inputs[k] = None - # TODO: get rid of self._job_inputs, self._job_outputs once we have unified Input - self._job_inputs, self._job_outputs = inputs, outputs - if isinstance(component, Component): - # Build the inputs from component input definition and given inputs, unfilled inputs will be None - self._inputs = self._build_inputs_dict(component.inputs, inputs or {}) - # Build the outputs from component output definition and given outputs, unfilled outputs will be None - self._outputs = self._build_outputs_dict(component.outputs, outputs or {}) - else: - # Build inputs/outputs dict without meta when definition not available - self._inputs = self._build_inputs_dict_without_meta(inputs or {}) - self._outputs = self._build_outputs_dict_without_meta(outputs or {}) - - # Generate an id for every component instance - self._instance_id = str(uuid.uuid4()) - if _from_component_func: - # add current component in pipeline stack for dsl scenario - self._register_in_current_pipeline_component_builder() - self._init = False + @classmethod + def _get_supported_outputs_types(cls): + return str, Output @property def retry_settings(self) -> RetrySettings: @@ -262,29 +220,6 @@ def set_resources( if isinstance(self.component, Component): self.component.resources = self.resources - def _initializing(self) -> bool: - # use this to indicate ongoing init process so all attributes set during init process won't be set as - # arbitrary attribute in _AttrDict - # TODO: replace this hack - return self._init - - @classmethod - def _validate_io(cls, io_dict: dict, allowed_types: tuple, parse_cls): - for key, value in io_dict.items(): - # output mode of last node should not affect input mode of next node - if isinstance(value, PipelineOutputBase): - # value = copy.deepcopy(value) - value = value._deepcopy() # Decoupled input and output - io_dict[key] = value - value.mode = None - if value is None or isinstance(value, allowed_types): - pass - elif isinstance(value, dict): - # parse dict to allowed type - io_dict[key] = parse_cls(**value) - else: - raise Exception(f"Expecting {allowed_types} for input/output {key}, got {type(value)} instead.") - @classmethod def _attr_type_map(cls) -> dict: return { @@ -335,7 +270,7 @@ def _parallel_attr_to_dict(self, attr, base_type) -> dict: return convert_ordered_dict_to_dict(rest_attr) @classmethod - def _picked_fields_in_to_rest(cls) -> List[str]: + def _picked_fields_from_dict_to_rest_object(cls) -> List[str]: return [ "type", "resources", @@ -347,44 +282,24 @@ def _picked_fields_in_to_rest(cls) -> List[str]: "input_data", ] - def _node_specified_pre_to_rest_operations(self, rest_obj): - for key in self._picked_fields_in_to_rest(): - if key not in rest_obj: - rest_obj[key] = None - + def _to_rest_object(self, **kwargs) -> dict: + rest_obj = super(Parallel, self)._to_rest_object(**kwargs) rest_obj.update( - dict( - componentId=self._get_component_id(), - retry_settings=get_rest_dict(self.retry_settings), - logging_level=self.logging_level, - mini_batch_size=self.mini_batch_size, - resources=self.resources._to_rest_object().as_dict() if self.resources else None, + convert_ordered_dict_to_dict( + dict( + componentId=self._get_component_id(), + retry_settings=get_rest_dict(self.retry_settings), + logging_level=self.logging_level, + mini_batch_size=self.mini_batch_size, + resources=self.resources._to_rest_object().as_dict() if self.resources else None, + ) ) ) + return rest_obj @classmethod def _from_rest_object(cls, obj: dict) -> "Parallel": - inputs = obj.get("inputs", {}) - outputs = obj.get("outputs", {}) - - # JObject -> RestJobInput/RestJobOutput - input_bindings, rest_inputs = from_dict_to_rest_io(inputs, RestJobInput, [ComponentJobConstants.INPUT_PATTERN]) - output_bindings, rest_outputs = from_dict_to_rest_io( - outputs, RestJobOutput, [ComponentJobConstants.OUTPUT_PATTERN] - ) - - # RestJobInput/RestJobOutput -> JobInput/JobOutput - dataset_literal_inputs = from_rest_inputs_to_dataset_literal(rest_inputs) - data_outputs = from_rest_data_outputs(rest_outputs) - - obj["inputs"] = {**dataset_literal_inputs, **input_bindings} - obj["outputs"] = {**data_outputs, **output_bindings} - - # resources - if "resources" in obj and obj["resources"]: - resources = RestResourceConfiguration.from_dict(obj["resources"]) - obj["resources"] = ResourceConfiguration._from_rest_object(resources) - + obj = BaseNode._rest_object_to_init_params(obj) # retry_settings if "retry_settings" in obj and obj["retry_settings"]: obj["retry_settings"] = RetrySettings.from_dict(obj["retry_settings"]) @@ -399,11 +314,19 @@ def _from_rest_object(cls, obj: dict) -> "Parallel": if task_env and isinstance(task_env, str) and task_env.startswith(ARM_ID_PREFIX): obj["task"].environment = task_env[len(ARM_ID_PREFIX) :] - # Change componentId -> component, computeId -> compute + # resources, sweep won't have resources + if "resources" in obj and obj["resources"]: + resources = RestResourceConfiguration.from_dict(obj["resources"]) + obj["resources"] = ResourceConfiguration._from_rest_object(resources) + + # Change componentId -> component component_id = obj.pop("componentId", None) - compute_id = obj.pop("computeId", None) obj["component"] = component_id - obj["compute"] = get_resource_name_from_arm_id_safe(compute_id) + + # distribution, sweep won't have distribution + if "distribution" in obj and obj["distribution"]: + obj["distribution"] = DistributionConfiguration._from_rest_object(obj["distribution"]) + return Parallel(**obj) def _build_inputs(self): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py index 72f35718f09a..d44d45dfc70a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py @@ -2,48 +2,32 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import logging -import uuid from typing import Dict, Union, List -from marshmallow import Schema +import pydash +from marshmallow import Schema, EXCLUDE from azure.ai.ml._restclient.v2022_02_01_preview.models import ( - JobInput as RestJobInput, - JobOutput as RestJobOutput, AmlToken, ManagedIdentity, UserIdentity, ) -import pydash -from marshmallow.utils import EXCLUDE -from azure.ai.ml.constants import ComponentJobConstants, BASE_PATH_CONTEXT_KEY, NodeType -from azure.ai.ml._utils.utils import map_single_brackets_and_warn +from azure.ai.ml.constants import NodeType, BASE_PATH_CONTEXT_KEY from azure.ai.ml.entities._job.pipeline._exceptions import UserErrorException from azure.ai.ml.entities._job.pipeline._io import PipelineInputBase from azure.ai.ml.entities._job.sweep.early_termination_policy import EarlyTerminationPolicy from azure.ai.ml.entities._job.sweep.objective import Objective from azure.ai.ml.entities._job.sweep.parameterized_sweep import ParameterizedSweep from azure.ai.ml.entities._job.sweep.search_space import SweepDistribution -from azure.ai.ml.entities._job._input_output_helpers import ( - to_rest_dataset_literal_inputs, - from_rest_inputs_to_dataset_literal, - to_rest_data_outputs, - from_rest_data_outputs, - validate_inputs_for_command, -) -from azure.ai.ml.entities._job.pipeline._pipeline_job_helpers import ( - process_sdk_component_job_io, - from_dict_to_rest_io, -) -from azure.ai.ml.entities import CommandComponent, Component +from azure.ai.ml.entities import CommandComponent from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.sweep import SweepJob from azure.ai.ml.entities._job.sweep.sampling_algorithm import SamplingAlgorithm from azure.ai.ml.entities._job.job_limits import SweepJobLimits from .base_node import BaseNode -from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget +from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget from ..._schema import PathAwareSchema -from ..._utils._arm_id_utils import get_resource_name_from_arm_id_safe +from ..._schema._utils.data_binding_expression import support_data_binding_expression_for_fields module_logger = logging.getLogger(__name__) @@ -88,25 +72,15 @@ def __init__( identity: Union[ManagedIdentity, AmlToken, UserIdentity] = None, **kwargs, ): - self._init = True - # TODO: get rid of self._job_inputs, self._job_outputs once we have general Input self._job_inputs, self._job_outputs = inputs, outputs - # initialize io - if isinstance(trial, Component): - # Build the inputs from component input definition and given inputs, unfilled inputs will be None - self._inputs = self._build_inputs_dict(trial.inputs, inputs or {}) - # Build the outputs from component output definition and given outputs, unfilled outputs will be None - self._outputs = self._build_outputs_dict(trial.outputs, outputs or {}) - else: - # Build inputs/outputs dict without meta when definition not available - self._inputs = self._build_inputs_dict_without_meta(inputs or {}) - self._outputs = self._build_outputs_dict_without_meta(outputs or {}) - kwargs.pop("type", None) - _from_component_func = kwargs.pop("_from_component_func", False) - BaseNode.__init__(self, type=NodeType.SWEEP, component=trial, compute=compute, **kwargs) + BaseNode.__init__( + self, type=NodeType.SWEEP, component=trial, inputs=inputs, outputs=outputs, compute=compute, **kwargs + ) + # init mark for _AttrDict + self._init = True ParameterizedSweep.__init__( self, sampling_algorithm=sampling_algorithm, @@ -116,11 +90,6 @@ def __init__( search_space=search_space, ) - # Generate an id for every component instance - self._instance_id = str(uuid.uuid4()) - if _from_component_func: - # add current component in pipeline stack for dsl scenario - self._register_in_current_pipeline_component_builder() self.identity = identity self._init = False @@ -128,19 +97,16 @@ def __init__( def trial(self): return self._component - def _initializing(self) -> bool: - return self._init - @classmethod - def _picked_fields_in_to_rest(cls) -> List[str]: + def _picked_fields_from_dict_to_rest_object(cls) -> List[str]: return ["limits", "sampling_algorithm", "objective", "early_termination", "search_space"] - def _node_specified_pre_to_rest_operations(self, rest_obj): - # trial - self._override_missing_properties_from_trial() - if isinstance(self.trial, CommandComponent): - self.trial.command = map_single_brackets_and_warn(self.trial.command) - validate_inputs_for_command(self.trial.command, {**self.inputs, **self.search_space}) + def _to_rest_object(self, **kwargs) -> dict: + rest_obj = super(Sweep, self)._to_rest_object(**kwargs) + # hack: ParameterizedSweep.early_termination is not allowed to be None + for key in ["early_termination"]: + if key in rest_obj and rest_obj[key] is None: + del rest_obj[key] rest_obj.update( dict( @@ -148,6 +114,26 @@ def _node_specified_pre_to_rest_operations(self, rest_obj): trial=self._get_trial_component_rest_obj(), ) ) + return rest_obj + + @classmethod + def _from_rest_object(cls, obj: dict) -> "Sweep": + obj = BaseNode._rest_object_to_init_params(obj) + # TODO: use cls._get_schema() to load from rest object + from azure.ai.ml._schema._sweep.parameterized_sweep import ParameterizedSweepSchema + + schema = ParameterizedSweepSchema(context={BASE_PATH_CONTEXT_KEY: "./"}) + support_data_binding_expression_for_fields(schema, ["type"]) + + base_sweep = schema.load(obj, unknown=EXCLUDE, partial=True) + for key, value in base_sweep.items(): + obj[key] = value + + # trial + trial_component_id = pydash.get(obj, "trial.componentId", None) + obj["trial"] = trial_component_id # check this + + return Sweep(**obj) def _get_trial_component_rest_obj(self): # trial component to rest object is different from usual component @@ -194,44 +180,6 @@ def _to_job(self) -> SweepJob: def _get_component_attr_name(cls): return "trial" - @classmethod - def _from_rest_object(cls, obj: dict) -> "Sweep": - # TODO: use cls._get_schema() to load from rest object - from azure.ai.ml._schema._sweep.parameterized_sweep import ParameterizedSweepSchema - from ..._schema._utils.data_binding_expression import support_data_binding_expression_for_fields - - schema = ParameterizedSweepSchema(context={BASE_PATH_CONTEXT_KEY: "./"}) - support_data_binding_expression_for_fields(schema, ["type"]) - - base_sweep = schema.load(obj, unknown=EXCLUDE) - for key, value in base_sweep.items(): - obj[key] = value - inputs = obj.get("inputs", {}) - outputs = obj.get("outputs", {}) - - # JObject -> RestJobInput/RestJobOutput - input_bindings, rest_inputs = from_dict_to_rest_io(inputs, RestJobInput, [ComponentJobConstants.INPUT_PATTERN]) - output_bindings, rest_outputs = from_dict_to_rest_io( - outputs, RestJobOutput, [ComponentJobConstants.OUTPUT_PATTERN] - ) - - # RestJobInput/RestJobOutput -> JobInput/JobOutput - dataset_literal_inputs = from_rest_inputs_to_dataset_literal(rest_inputs) - data_outputs = from_rest_data_outputs(rest_outputs) - - obj["inputs"] = {**dataset_literal_inputs, **input_bindings} - obj["outputs"] = {**data_outputs, **output_bindings} - - # Change computeId -> compute - compute_id = obj.pop("computeId", None) - obj["compute"] = get_resource_name_from_arm_id_safe(compute_id) - - # trial - trial_component_id = pydash.get(obj, "trial.componentId", None) - obj["trial"] = trial_component_id # check this - - return Sweep(**obj) - @classmethod def _create_schema_for_validation(cls, context) -> Union[PathAwareSchema, Schema]: from azure.ai.ml._schema.pipeline.component_job import SweepSchema diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py index 8d3f536adf6b..39f3d90446a2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import json import os from pathlib import Path from marshmallow import INCLUDE, Schema @@ -25,7 +24,7 @@ from azure.ai.ml.constants import NodeType from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from .component import Component -from .._util import validate_attribute_type +from .._util import validate_attribute_type, convert_ordered_dict_to_dict from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget from .._validation import ValidationResult, _ValidationResultBuilder from ..._schema import PathAwareSchema @@ -154,12 +153,7 @@ def _attr_type_map(cls) -> dict: def _to_dict(self) -> Dict: """Dump the command component content into a dictionary.""" - - # Distribution inherits from autorest generated class, use as_dist() to dump to json - # Replace the name of $schema to schema. - component_schema_dict = self._dump_for_validation() - component_schema_dict.pop("base_path", None) - return {**self._other_parameter, **component_schema_dict} + return convert_ordered_dict_to_dict({**self._other_parameter, **super(CommandComponent, self)._to_dict()}) def _get_environment_id(self) -> Union[str, None]: # Return environment id of environment @@ -201,63 +195,6 @@ def _is_valid_data_binding_expression(self, data_binding_expression: str) -> boo return False return True - @classmethod - def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "CommandComponent": - return CommandComponent( - yaml_str=kwargs.pop("yaml_str", None), - _source=kwargs.pop("_source", ComponentSource.YAML), - **(CommandComponentSchema(context=context).load(data, unknown=INCLUDE, **kwargs)), - ) - - def _to_rest_object(self) -> ComponentVersionData: - # Convert nested ordered dict to dict. - # TODO: we may need to use original dict from component YAML(only change code and environment), returning - # parsed dict might add default value for some field, eg: if we add property "optional" with default value - # to ComponentInput, it will add field "optional" to all inputs even if user doesn't specify one - component = json.loads(json.dumps(self._to_dict())) - - properties = ComponentVersionDetails( - component_spec=component, - description=self.description, - is_anonymous=self._is_anonymous, - properties=self.properties, - tags=self.tags, - ) - result = ComponentVersionData(properties=properties) - result.name = self.name - return result - - @classmethod - def _load_from_rest(cls, obj: ComponentVersionData) -> "CommandComponent": - rest_component_version = obj.properties - inputs = { - k: ComponentInput._from_rest_object(v) - for k, v in rest_component_version.component_spec.pop("inputs", {}).items() - } - outputs = { - k: ComponentOutput._from_rest_object(v) - for k, v in rest_component_version.component_spec.pop("outputs", {}).items() - } - - distribution = rest_component_version.component_spec.pop("distribution", None) - if distribution: - distribution = DistributionConfiguration._from_rest_object(distribution) - - command_component = CommandComponent( - id=obj.id, - is_anonymous=rest_component_version.is_anonymous, - creation_context=obj.system_data, - inputs=inputs, - outputs=outputs, - distribution=distribution, - # use different schema for component from rest since name may be "invalid" - **RestCommandComponentSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).load( - rest_component_version.component_spec, unknown=INCLUDE - ), - _source=ComponentSource.REST, - ) - return command_component - @classmethod def _parse_args_description_from_docstring(cls, docstring): return parse_args_description_from_docstring(docstring) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py index 66c50ca20e80..009f62d7f409 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py @@ -4,14 +4,10 @@ from os import PathLike from pathlib import Path from typing import Dict, Union -from abc import abstractmethod -from marshmallow import Schema -from marshmallow.exceptions import ValidationError -from azure.ai.ml._schema import PathAwareSchema from azure.ai.ml.entities import Asset -from azure.ai.ml.entities._component.utils import build_validate_input, build_validate_output -from azure.ai.ml._restclient.v2022_05_01.models import ComponentVersionData, SystemData +from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput +from azure.ai.ml._restclient.v2022_05_01.models import ComponentVersionData, SystemData, ComponentVersionDetails from azure.ai.ml.constants import ( CommonYamlFields, BASE_PATH_CONTEXT_KEY, @@ -93,6 +89,9 @@ def __init__( is_anonymous=kwargs.pop("is_anonymous", False), base_path=kwargs.pop("base_path", None), ) + # update component name to ANONYMOUS_COMPONENT_NAME if it is anonymous + if hasattr(self, "_is_anonymous"): + self._set_is_anonymous(self._is_anonymous) # TODO: check why do we dropped kwargs inputs = inputs if inputs else {} @@ -102,8 +101,8 @@ def __init__( self._type = type self._display_name = display_name self._is_deterministic = is_deterministic - self._inputs = build_validate_input(inputs) - self._outputs = build_validate_output(outputs) + self._inputs = self.build_validate_io(inputs, is_input=True) + self._outputs = self.build_validate_io(outputs, is_input=False) self._source = kwargs.pop("_source", ComponentSource.SDK) # Store original yaml self._yaml_str = yaml_str @@ -112,6 +111,30 @@ def __init__( self._func = _generate_component_function(self) + @classmethod + def build_validate_io(cls, io_dict: Dict, is_input: bool): + from azure.ai.ml import Output, Input + + component_io = {} + for name, port in io_dict.items(): + if not name.isidentifier(): + msg = "{!r} is not a valid parameter name" + raise ValidationException( + message=msg.format(name), + no_personal_data_message=msg.format("[name]"), + target=ErrorTarget.COMPONENT, + ) + else: + if is_input: + if isinstance(port, Input): + port = port._to_dict() + component_io[name] = ComponentInput(port) + else: + if isinstance(port, Output): + port = port._to_dict() + component_io[name] = ComponentOutput(port) + return component_io + @property def type(self) -> str: """Type of the component, default is 'command'. @@ -220,20 +243,6 @@ def dump(self, path: Union[PathLike, str]) -> None: def _get_validation_error_target(cls) -> ErrorTarget: return ErrorTarget.COMPONENT - def _schema_validate(self) -> ValidationResult: - """Validate the component. - - :raises: ValidationException - """ - origin_name = self.name - if hasattr(self, "_is_anonymous") and getattr(self, "_is_anonymous"): - # The name of an anonymous component is an uuid generated based on its hash. - # Can't change naming logic to avoid breaking previous component reuse, so hack here. - self.name = "dummy_" + self.name.replace("-", "_") - result = SchemaValidatableMixin._schema_validate(self) - self.name = origin_name - return result - @classmethod def _load( cls, @@ -249,54 +258,44 @@ def _load( PARAMS_OVERRIDE_KEY: params_override, } - from azure.ai.ml.entities import CommandComponent, ParallelComponent - - component_type = None type_in_override = find_type_in_override(params_override) - # override takes the priority - customized_component_type = type_in_override or data.get(CommonYamlFields.TYPE, NodeType.COMMAND) - if customized_component_type == NodeType.COMMAND: - component_type = CommandComponent - elif customized_component_type == NodeType.PARALLEL: - component_type = ParallelComponent - else: - msg = f"Unsupported component type: {customized_component_type}." - raise ValidationException( - message=msg, - target=ErrorTarget.COMPONENT, - no_personal_data_message=msg, - error_category=ErrorCategory.USER_ERROR, - ) - # Load yaml content - if yaml_path and Path(yaml_path).is_file(): - with open(yaml_path, "r") as f: - kwargs["yaml_str"] = f.read() - - return component_type._load_from_dict(data=data, context=context, **kwargs) + from azure.ai.ml.entities._component.component_factory import component_factory + + return component_factory.load_from_dict(_type=type_in_override, data=data, context=context, **kwargs) @classmethod def _from_rest_object(cls, component_rest_object: ComponentVersionData) -> "Component": - from azure.ai.ml.entities import CommandComponent, ParallelComponent - - # TODO: should be RestComponentType.CommandComponent, but it did not get generated - component_type = component_rest_object.properties.component_spec["type"] - if component_type == NodeType.COMMAND: - return CommandComponent._load_from_rest(component_rest_object) - elif component_type == NodeType.PARALLEL: - return ParallelComponent._load_from_rest(component_rest_object) - else: - msg = f"Unsupported component type {component_type}." - raise ComponentException( - message=msg, - target=ErrorTarget.COMPONENT, - no_personal_data_message=msg, - error_category=ErrorCategory.SYSTEM_ERROR, - ) + from azure.ai.ml.entities._component.component_factory import component_factory + + return component_factory.load_from_rest(obj=component_rest_object) + + def _to_rest_object(self) -> ComponentVersionData: + # TODO: we may need to use original dict from component YAML(only change code and environment), returning + # parsed dict might add default value for some field, eg: if we add property "optional" with default value + # to ComponentInput, it will add field "optional" to all inputs even if user doesn't specify one + component = self._to_dict() + + properties = ComponentVersionDetails( + component_spec=component, + description=self.description, + is_anonymous=self._is_anonymous, + properties=self.properties, + tags=self.tags, + ) + result = ComponentVersionData(properties=properties) + result.name = self.name + return result - @classmethod - @abstractmethod - def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "Component": - pass + def _to_dict(self) -> Dict: + """Dump the command component content into a dictionary.""" + + # Distribution inherits from autorest generated class, use as_dist() to dump to json + # Replace the name of $schema to schema. + component_schema_dict = self._dump_for_validation() + component_schema_dict.pop("base_path", None) + + # TODO: handle other_parameters and remove override from subclass + return component_schema_dict def _get_telemetry_values(self): return {"type": self.type, "source": self._source, "is_anonymous": self._is_anonymous} diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py new file mode 100644 index 000000000000..f2dfab14c9c6 --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py @@ -0,0 +1,147 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Callable, Dict, Tuple, Any +from marshmallow import INCLUDE, Schema +from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory, ComponentException +from azure.ai.ml._restclient.v2022_05_01.models import ComponentVersionData +from azure.ai.ml._schema.component import BaseComponentSchema +from azure.ai.ml.constants import ( + NodeType, + ComponentSource, + BASE_PATH_CONTEXT_KEY, + CommonYamlFields, + ANONYMOUS_COMPONENT_NAME, +) +from azure.ai.ml.entities import ParallelComponent, CommandComponent, Component +from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput +from azure.ai.ml.entities._job.distribution import DistributionConfiguration + + +class _ComponentFactory: + """A class to create component instances from yaml dict or rest objects without hard-coded type check.""" + + def __init__(self): + self._create_instance_funcs = {} + self._create_schema_funcs = {} + + self.register_type( + _type=NodeType.PARALLEL, + create_instance_func=lambda: ParallelComponent.__new__(ParallelComponent), + create_schema_func=ParallelComponent._create_schema_for_validation, + ) + self.register_type( + _type=NodeType.COMMAND, + create_instance_func=lambda: CommandComponent.__new__(CommandComponent), + create_schema_func=CommandComponent._create_schema_for_validation, + ) + + def get_create_funcs(self, _type: str) -> Tuple[Callable[..., Component], Callable[[Any], Schema]]: + """Get registered functions to create instance & its corresponding schema for the given type.""" + _type = _type.lower() + if _type not in self._create_instance_funcs: + msg = f"Unsupported component type: {_type}." + raise ValidationException( + message=msg, + target=ErrorTarget.COMPONENT, + no_personal_data_message=msg, + error_category=ErrorCategory.USER_ERROR, + ) + create_instance_func = self._create_instance_funcs[_type] + create_schema_func = self._create_schema_funcs[_type] + return create_instance_func, create_schema_func + + def register_type( + self, _type: str, create_instance_func: Callable[..., Component], create_schema_func: Callable[[Any], Schema] + ): + """Register a new component type. + + param _type: the type name of the component. + type _type: str + param create_instance_func: a function to create an instance of the component. + type create_instance_func: Callable[..., Component] + param create_schema_func: a function to create a schema for the component. + type create_schema_func: Callable[[Any], Schema] + """ + self._create_instance_funcs[_type.lower()] = create_instance_func + self._create_schema_funcs[_type.lower()] = create_schema_func + + def load_from_dict(self, *, data: Dict, context: Dict, _type: str = None, **kwargs) -> Component: + """Load a component from a yaml dict. + + param data: the yaml dict. + type data: Dict + param context: the context of the yaml dict. + type context: Dict + param _type: the type name of the component. When None, it will be inferred from the yaml dict. + type _type: str + """ + if _type is None: + _type = data.get(CommonYamlFields.TYPE, NodeType.COMMAND) + else: + data[CommonYamlFields.TYPE] = _type + _type = _type.lower() + create_instance_func, create_schema_func = self.get_create_funcs(_type) + new_instance = create_instance_func() + new_instance.__init__( + yaml_str=kwargs.pop("yaml_str", None), + _source=kwargs.pop("_source", ComponentSource.YAML), + **(create_schema_func(context).load(data, unknown=INCLUDE, **kwargs)), + ) + return new_instance + + def load_from_rest(self, *, obj: ComponentVersionData, _type: str = None) -> Component: + """Load a component from a rest object. + + param obj: the rest object. + type obj: ComponentVersionData + param _type: the type name of the component. When None, it will be inferred from the rest object. + type _type: str + """ + rest_component_version = obj.properties + # type name may be invalid? + if _type is None: + _type = rest_component_version.component_spec[CommonYamlFields.TYPE] + else: + rest_component_version.component_spec[CommonYamlFields.TYPE] = _type + + _type = _type.lower() + inputs = { + k: ComponentInput._from_rest_object(v) + for k, v in rest_component_version.component_spec.pop("inputs", {}).items() + } + outputs = { + k: ComponentOutput._from_rest_object(v) + for k, v in rest_component_version.component_spec.pop("outputs", {}).items() + } + + distribution = rest_component_version.component_spec.pop("distribution", None) + if distribution: + distribution = DistributionConfiguration._from_rest_object(distribution) + + # shouldn't block serialization when name is not valid + # maybe override serialization method for name field? + create_instance_func, create_schema_func = self.get_create_funcs(_type) + origin_name = rest_component_version.component_spec[CommonYamlFields.NAME] + rest_component_version.component_spec[CommonYamlFields.NAME] = ANONYMOUS_COMPONENT_NAME + + new_instance = create_instance_func() + new_instance.__init__( + id=obj.id, + is_anonymous=rest_component_version.is_anonymous, + creation_context=obj.system_data, + inputs=inputs, + outputs=outputs, + distribution=distribution, + **( + create_schema_func({BASE_PATH_CONTEXT_KEY: "./"}).load( + rest_component_version.component_spec, unknown=INCLUDE + ) + ), + _source=ComponentSource.REST, + ) + new_instance.name = origin_name + return new_instance + + +component_factory = _ComponentFactory() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py index 7e87c2f7482c..db0e51150e40 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py @@ -48,7 +48,7 @@ class ComponentInput(ComponentIOItem): def __init__(self, port_dict: Dict): # parse value from string to it's original type. eg: "false" -> False - if port_dict["type"] in self.PARAM_PARSERS.keys(): + if isinstance(port_dict["type"], str) and port_dict["type"] in self.PARAM_PARSERS.keys(): for key in ["default", "min", "max"]: if key in port_dict.keys(): port_dict[key] = self.PARAM_PARSERS[port_dict["type"]](port_dict[key]) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py index 20b9dde338c1..a5ee6cf687d9 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import json import re import os @@ -26,7 +25,7 @@ from azure.ai.ml.entities._job.parallel.retry_settings import RetrySettings from azure.ai.ml.entities._job.parallel.parameterized_parallel import ParameterizedParallel from azure.ai.ml.entities._job.parallel.parallel_task import ParallelTask -from .._util import validate_attribute_type +from .._util import validate_attribute_type, convert_ordered_dict_to_dict from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget from ..._schema import PathAwareSchema @@ -234,11 +233,7 @@ def _attr_type_map(cls) -> dict: def _to_dict(self) -> Dict: """Dump the parallel component content into a dictionary.""" - - # Replace the name of $schema to schema. - component_schema_dict = self._dump_for_validation() - component_schema_dict.pop("base_path", None) - return {**self._other_parameter, **component_schema_dict} + return convert_ordered_dict_to_dict({**self._other_parameter, **super(ParallelComponent, self)._to_dict()}) @classmethod def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "ParallelComponent": @@ -248,24 +243,6 @@ def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "ParallelCompon **(ParallelComponentSchema(context=context).load(data, unknown=INCLUDE, **kwargs)), ) - def _to_rest_object(self) -> ComponentVersionData: - # Convert nested ordered dict to dict. - # TODO: we may need to use original dict from component YAML(only change code and environment), returning - # parsed dict might add default value for some field, eg: if we add property "optional" with default value - # to ComponentInput, it will add field "optional" to all inputs even if user doesn't specify one - component = json.loads(json.dumps(self._to_dict())) - - properties = ComponentVersionDetails( - component_spec=component, - description=self.description, - is_anonymous=self._is_anonymous, - properties=self.properties, - tags=self.tags, - ) - result = ComponentVersionData(properties=properties) - result.name = self.name - return result - @classmethod def _load_from_rest(cls, obj: ComponentVersionData) -> "ParallelComponent": rest_component_version = obj.properties diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py index 2d587b61f48c..1048e4914fa8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py @@ -4,7 +4,6 @@ from typing import Dict from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput -from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget def component_io_to_rest_obj(io_dict: Dict): @@ -31,29 +30,3 @@ def component_output_from_rest_obj(component_io: Dict): io = ComponentOutput._from_rest_object(rest_obj) component_io_dict[name] = io return component_io_dict - - -def build_validate_input(io_dict: Dict): - component_io = {} - for name, port in io_dict.items(): - if not name.isidentifier(): - msg = "{!r} is not a valid parameter name" - raise ValidationException( - message=msg.format(name), no_personal_data_message=msg.format("[name]"), target=ErrorTarget.COMPONENT - ) - else: - component_io[name] = ComponentInput(port) - return component_io - - -def build_validate_output(io_dict: Dict): - component_io = {} - for name, port in io_dict.items(): - if not name.isidentifier(): - msg = "{!r} is not a valid parameter name" - raise ValidationException( - message=msg.format(name), no_personal_data_message=msg.format("[name]"), target=ErrorTarget.COMPONENT - ) - else: - component_io[name] = ComponentOutput(port) - return component_io diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py index e31b4dda8a2c..4a91f448482f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py @@ -63,7 +63,7 @@ def some_pipeline( from azure.ai.ml.entities._job.pipeline._exceptions import UserErrorException, MldesignerComponentDefiningError from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput -from azure.ai.ml.constants import InputOutputModes, AssetTypes +from azure.ai.ml.constants import InputOutputModes, AssetTypes, IO_CONSTANTS from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory, ComponentException from azure.ai.ml.entities._mixins import DictMixin @@ -95,25 +95,6 @@ class Input(DictMixin): :type description: str """ - # For validation, indicates specific parameters combination for each type - _TYPE_COMBINATION_MAPPING = { - "uri_folder": ["path", "mode"], - "uri_file": ["path", "mode"], - "mltable": ["path", "mode"], - "mlflow_model": ["path", "mode"], - "custom_model": ["path", "mode"], - "integer": ["default", "min", "max"], - "number": ["default", "min", "max"], - "string": ["default"], - "boolean": ["default"], - } - _ALLOWED_TYPES = { - "integer": (int), - "string": (str), - "number": (float), - "boolean": (bool), - } - _DATA_TYPE_MAPPING = {int: "integer", str: "string", float: "number", bool: "boolean"} _EMPTY = Parameter.empty @overload @@ -270,20 +251,20 @@ def __init__( self.type = type self.description = description - self._is_parameter_type = self.type in self._ALLOWED_TYPES + self._is_primitive_type = self.type in IO_CONSTANTS.PRIMITIVE_STR_2_TYPE if path and not isinstance(path, str): # this logic will make dsl data binding expression working in the same way as yaml # it's written to handle InputOutputBase, but there will be loop import if we import InputOutputBase here self.path = str(path) else: self.path = path - self.mode = None if self._is_parameter_type else mode + self.mode = None if self._is_primitive_type else mode self.default = default self.optional = True if optional is True else None self.min = min self.max = max self.enum = enum - self._allowed_types = self._ALLOWED_TYPES.get(self.type) + self._allowed_types = IO_CONSTANTS.PRIMITIVE_STR_2_TYPE.get(self.type) self._validate_parameter_combinations() def _to_dict(self, remove_name=True): @@ -327,7 +308,7 @@ def _parse_and_validate(self, val): :param str_val: The input string value from the command line. :return: The parsed value, an exception will be raised if the value is invalid. """ - if self._is_parameter_type: + if self._is_primitive_type: val = self._parse(val) if isinstance(val, str) else val self._validate_or_throw(val) return val @@ -416,8 +397,8 @@ def _validate_parameter_combinations(self): type = parameters.pop("type") # validate parameter combination - if type in self._TYPE_COMBINATION_MAPPING: - valid_parameters = self._TYPE_COMBINATION_MAPPING[type] + if type in IO_CONSTANTS.INPUT_TYPE_COMBINATION: + valid_parameters = IO_CONSTANTS.INPUT_TYPE_COMBINATION[type] for key, value in parameters.items(): if key not in valid_parameters and value is not None: msg = "Invalid parameter for '{}' Input, parameter '{}' should be None but got '{}'" @@ -429,14 +410,14 @@ def _validate_parameter_combinations(self): ) @classmethod - def _get_input_by_type(cls, t: type): - if t in cls._DATA_TYPE_MAPPING: - return cls(type=cls._DATA_TYPE_MAPPING[t]) + def _get_input_by_type(cls, t: type, optional=None): + if t in IO_CONSTANTS.PRIMITIVE_TYPE_2_STR: + return cls(type=IO_CONSTANTS.PRIMITIVE_TYPE_2_STR[t], optional=optional) return None @classmethod - def _get_default_string_input(cls): - return cls(type="string") + def _get_default_string_input(cls, optional=None): + return cls(type="string", optional=optional) @classmethod def _get_param_with_standard_annotation(cls, func): @@ -505,6 +486,7 @@ def __init__( # The name will be updated by the annotated variable name. self.name = None self.type = type + self._is_primitive_type = self.type in IO_CONSTANTS.PRIMITIVE_STR_2_TYPE self.description = description self.path = path @@ -636,15 +618,15 @@ def _is_dataset(data): # Handle enum values annotation = EnumInput(enum=val.__class__) else: - annotation = _get_annotation_cls_by_type(type(val), raise_error=False) + annotation = _get_annotation_cls_by_type(type(val), raise_error=False, optional=True) if not annotation: # Fall back to default - annotation = Input._get_default_string_input() + annotation = Input._get_default_string_input(optional=True) return annotation -def _get_annotation_cls_by_type(t: type, raise_error=False): - cls = Input._get_input_by_type(t) +def _get_annotation_cls_by_type(t: type, raise_error=False, optional=None): + cls = Input._get_input_by_type(t, optional=optional) if cls is None and raise_error: raise UserErrorException(f"Can't convert type {t} to azure.ai.ml.Input") return cls @@ -708,7 +690,7 @@ def _update_annotation_with_default(anno, name, default): return complete_annotation if isinstance(complete_annotation, Input): # Non-parameter Input has no default attribute - if complete_annotation._is_parameter_type and complete_annotation.default is not None: + if complete_annotation._is_primitive_type and complete_annotation.default is not None: # logger.warning( # f"Warning: Default value of f{complete_annotation.name!r} is set twice: " # f"{complete_annotation.default!r} and {default!r}, will use {default!r}" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_attr_dict.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_attr_dict.py index 19557f87fc66..bfa9e93a1255 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_attr_dict.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_attr_dict.py @@ -136,6 +136,8 @@ def try_get_non_arbitrary_attr_for_potential_attr_dict(obj, attr): """ if isinstance(obj, _AttrDict): has_attr = not obj._is_arbitrary_attr(attr) + elif isinstance(obj, dict): + return obj[attr] if attr in obj else None else: has_attr = hasattr(obj, attr) if has_attr: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py index ac62fb3ae6f5..4164c9ce9cb3 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py @@ -25,7 +25,7 @@ class ComponentTranslatableMixin: def _find_source_input_output_type(cls, input: str, pipeline_job_dict: dict): from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob from azure.ai.ml.entities import CommandJob, ParallelJob - from azure.ai.ml.entities._builders import Command, Parallel, Sweep + from azure.ai.ml.entities._builders import BaseNode pipeline_job_inputs = pipeline_job_dict.get("inputs", {}) pipeline_job_outputs = pipeline_job_dict.get("outputs", {}) @@ -77,8 +77,8 @@ def _find_source_input_output_type(cls, input: str, pipeline_job_dict: dict): ) _input_job_name, _io_type, _name = m.groups() _input_job = jobs_dict[_input_job_name] - if isinstance(_input_job, (Command, Parallel, Sweep)): - # If source is Command, get type from io builder + if isinstance(_input_job, BaseNode): + # If source is base node, get type from io builder _source = _input_job[_io_type][_name] try: return _source.type diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_load_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_load_component.py index f9edd3088447..065899cdfd1d 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_load_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_load_component.py @@ -1,12 +1,151 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import Callable, Mapping, Union +from typing import Callable, Mapping, Union, Dict, Any, Optional +from azure.ai.ml import Output +from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory from azure.ai.ml.dsl._component_func import to_component_func from azure.ai.ml.dsl._overrides_definition import OverrideDefinition +from azure.ai.ml.entities._builders import BaseNode, Sweep +from azure.ai.ml.constants import ( + NodeType, + BASE_PATH_CONTEXT_KEY, + CommonYamlFields, +) from azure.ai.ml.entities._builders import Command, Parallel -from azure.ai.ml.entities import Component, CommandComponent, ParallelComponent +from azure.ai.ml.entities import Component +from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob + + +class _PipelineNodeFactory: + """A class to create pipeline node instances from yaml dict or rest objects without hard-coded type check.""" + + def __init__(self): + self._create_instance_funcs = {} + self._load_from_rest_object_funcs = {} + + self.register_type( + _type=NodeType.COMMAND, + create_instance_func=lambda: Command.__new__(Command), + load_from_rest_object_func=Command._from_rest_object, + ) + self.register_type( + _type=NodeType.PARALLEL, + create_instance_func=lambda: Parallel.__new__(Parallel), + load_from_rest_object_func=Parallel._from_rest_object, + ) + self.register_type( + _type=NodeType.SWEEP, + create_instance_func=None, + load_from_rest_object_func=Sweep._from_rest_object, + ) + self.register_type( + _type=NodeType.AUTOML, + create_instance_func=None, + load_from_rest_object_func=self._automl_from_rest_object, + ) + + @classmethod + def _get_func(cls, _type: str, funcs): + _type = _type.lower() + if _type not in funcs: + msg = f"Unsupported component type: {_type}." + raise ValidationException( + message=msg, + target=ErrorTarget.COMPONENT, + no_personal_data_message=msg, + error_category=ErrorCategory.USER_ERROR, + ) + return funcs[_type] + + def get_create_instance_func(self, _type: str) -> Callable[..., BaseNode]: + """Get the function to create a new instance of the node. + + param _type: The type of the node. + type _type: str + """ + return self._get_func(_type, self._create_instance_funcs) + + def get_load_from_rest_object_func(self, _type: str) -> Callable[[Any], BaseNode]: + """Get the function to load a node from a rest object. + + param _type: The type of the node. + type _type: str + """ + return self._get_func(_type, self._load_from_rest_object_funcs) + + def register_type( + self, + _type: str, + create_instance_func: Optional[Callable[..., Union[BaseNode, AutoMLJob]]], + load_from_rest_object_func: Optional[Callable[[Any], Union[BaseNode, AutoMLJob]]], + ): + """Register a type of node. + + param _type: The type of the node. + type _type: str + param create_instance_func: A function to create a new instance of the node. + type create_instance_func: Callable[..., BaseNode] + param load_from_rest_object_func: A function to load a node from a rest object. + type load_from_rest_object_func: Callable[[Any], BaseNode] + """ + if create_instance_func is not None: + self._create_instance_funcs[_type.lower()] = create_instance_func + if load_from_rest_object_func is not None: + self._load_from_rest_object_funcs[_type.lower()] = load_from_rest_object_func + + def load_from_dict(self, *, data: dict, _type: str = None) -> Union[BaseNode, AutoMLJob]: + """Load a node from a dict. + + param data: A dict containing the node's data. + type data: dict + param _type: The type of the node. If not specified, it will be inferred from the data. + type _type: str + """ + if _type is None: + _type = data[CommonYamlFields.TYPE] if CommonYamlFields.TYPE in data else NodeType.COMMAND + else: + data[CommonYamlFields.TYPE] = _type + + _type = _type.lower() + new_instance = self.get_create_instance_func(_type)() + new_instance.__init__(**data) + return new_instance + + def load_from_rest_object(self, *, obj: dict, _type: str = None) -> Union[BaseNode, AutoMLJob]: + """Load a node from a rest object. + + param obj: A rest object containing the node's data. + type obj: dict + param _type: The type of the node. If not specified, it will be inferred from the data. + type _type: str + """ + if _type is None: + _type = obj[CommonYamlFields.TYPE] if CommonYamlFields.TYPE in obj else NodeType.COMMAND + else: + obj[CommonYamlFields.TYPE] = _type + _type = _type.lower() + + return self.get_load_from_rest_object_func(_type)(obj) + + @classmethod + def _automl_from_rest_object(cls, node: Dict) -> AutoMLJob: + # rest dict outputs -> Output objects + outputs = AutoMLJob._from_rest_outputs(node.get("outputs")) + # Output objects -> yaml dict outputs + parsed_outputs = {} + for key, val in outputs.items(): + if isinstance(val, Output): + val = val._to_dict() + parsed_outputs[key] = val + node["outputs"] = parsed_outputs + return AutoMLJob._load_from_dict( + node, + context={BASE_PATH_CONTEXT_KEY: "./"}, + additional_message="Failed to load automl task from backend.", + inside_pipeline=True, + ) def _generate_component_function( @@ -14,11 +153,12 @@ def _generate_component_function( ) -> Callable[..., Union[Command, Parallel]]: # Generate a function which returns a component node. def create_component_func(**kwargs): - if isinstance(component_entity, CommandComponent): - return Command(component=component_entity, inputs=kwargs, _from_component_func=True) - elif isinstance(component_entity, ParallelComponent): - return Parallel(component=component_entity, inputs=kwargs, _from_component_func=True) - else: - raise NotImplementedError(f"Not supported component type: {type(component_entity)}.") + return pipeline_node_factory.load_from_dict( + data=dict(component=component_entity, inputs=kwargs, _from_component_func=True), + _type=component_entity.type, + ) return to_component_func(component_entity, create_component_func) + + +pipeline_node_factory = _PipelineNodeFactory() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py index 82729d6b7f5a..f0d7ea29c470 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py @@ -18,9 +18,8 @@ OutputsAttrDict, InputsAttrDict, ) -from azure.ai.ml.entities._builders import Sweep, Command, BaseNode +from azure.ai.ml.entities._builders import Command, BaseNode from azure.ai.ml._utils.utils import ( - snake_to_camel, camel_to_snake, transform_dict_keys, is_data_binding_expression, @@ -28,13 +27,12 @@ ) from azure.ai.ml._restclient.v2022_02_01_preview.models import ( JobBaseData, - JobOutput as RestJobOutput, PipelineJob as RestPipelineJob, ManagedIdentity, UserIdentity, AmlToken, ) -from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, ComponentSource, NodeType, AZUREML_PRIVATE_FEATURES_ENV_VAR +from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, ComponentSource, AZUREML_PRIVATE_FEATURES_ENV_VAR from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._job.pipeline.pipeline_job_settings import PipelineJobSettings from azure.ai.ml.entities._job.job import Job @@ -151,7 +149,7 @@ def __init__( if isinstance(job_instance, BaseNode): job_instance._set_base_path(self.base_path) - if isinstance(job_instance, (Command, Sweep, Parallel)): + if isinstance(job_instance, BaseNode): job_instance._validate_inputs() binding_inputs = job_instance._build_inputs() if isinstance(job_instance.component, Component): @@ -253,7 +251,7 @@ def _customized_validate(self) -> ValidationResult: """Validate that all provided inputs and parameters are valid for current pipeline and components in it.""" validation_result = self._create_empty_validation_result() for node_name, node in self.jobs.items(): - if isinstance(node, (Command, Sweep, Parallel)): + if isinstance(node, BaseNode): validation_result.merge_with(node._validate(), "jobs.{}".format(node_name)) elif isinstance(node, AutoMLJob): pass @@ -389,28 +387,7 @@ def _load_from_rest(cls, obj: JobBaseData) -> "PipelineJob": if properties.jobs: sub_nodes = {} for node_name, node in properties.jobs.items(): - if "type" in node and node["type"] == NodeType.SWEEP: - sub_nodes[node_name] = Sweep._from_rest_object(node) - elif "type" in node and node["type"] == NodeType.AUTOML: - # rest dict outputs -> Output objects - outputs = AutoMLJob._from_rest_outputs(node.get("outputs")) - # Output objects -> yaml dict outputs - parsed_outputs = {} - for key, val in outputs.items(): - if isinstance(val, Output): - val = val._to_dict() - parsed_outputs[key] = val - node["outputs"] = parsed_outputs - sub_nodes[node_name] = AutoMLJob._load_from_dict( - node, - context={BASE_PATH_CONTEXT_KEY: "./"}, - additional_message="Failed to load automl task from backend.", - inside_pipeline=True, - ) - elif "type" in node and node["type"] == NodeType.PARALLEL: - sub_nodes[node_name] = Parallel._from_rest_object(node) - else: - sub_nodes[node_name] = Command._from_rest_object(node) + sub_nodes[node_name] = BaseNode._from_rest_object(node) else: sub_nodes = None # backend may still store Camel settings, eg: DefaultDatastore, translate them to snake when load back diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_schedule/schedule.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_schedule/schedule.py index 8679ce656c8c..4f0b1c02f118 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_schedule/schedule.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_schedule/schedule.py @@ -74,8 +74,11 @@ class CronSchedule(RestCronSchedule, Schedule): :param start_time: Specifies start time of schedule in ISO 8601 format. If no time zone offset is specified in the start_time, it will default to UTC (+0:00) :type start_time: Union[str, datetime] + :param end_time: Specifies end time of schedule in ISO 8601 format. If no time zone + offset is specified in the end_time, it will default to UTC (+0:00) + :type end_time: Union[str, datetime] + :param time_zone: Time zone in which the schedule runs. This does not apply to the start_time and end_time. :type time_zone: Optional[TimeZone] - :param time_zone: Time zone in which the schedule runs. This does not apply to the start_time. :param expression: Specifies cron expression of schedule. The expression should follow NCronTab format. :type expression: str @@ -87,14 +90,21 @@ def __init__( expression: str, status: str = None, start_time: str = None, + end_time: str = None, time_zone: TimeZone = TimeZone.UTC, ): - super().__init__(expression=expression, schedule_status=status, start_time=start_time, time_zone=time_zone) + super().__init__( + expression=expression, schedule_status=status, start_time=start_time, end_time=end_time, time_zone=time_zone + ) @classmethod def _from_rest_object(cls, obj: RestCronSchedule) -> "CronSchedule": return cls( - expression=obj.expression, status=obj.schedule_status, start_time=obj.start_time, time_zone=obj.time_zone + expression=obj.expression, + status=obj.schedule_status, + start_time=obj.start_time, + end_time=obj.end_time, + time_zone=obj.time_zone, ) @@ -107,9 +117,12 @@ class RecurrenceSchedule(RestRecurrenceSchedule, Schedule): :param start_time: Specifies start time of schedule in ISO 8601 format. If no time zone offset is specified in the start_time, it will default to UTC (+0:00) :type start_time: Union[str, datetime] - :param time_zone: Time zone in which the schedule runs. This does not apply to the start_time. + :param end_time: Specifies end time of schedule in ISO 8601 format. If no time zone + offset is specified in the end_time, it will default to UTC (+0:00) + :type end_time: Union[str, datetime] + :param time_zone: Time zone in which the schedule runs. This does not apply to the start_time and end_time. :type time_zone: Optional[TimeZone] - :param frequency: Specifies frequency with with which to trigger schedule. + :param frequency: Specifies frequency which to trigger schedule with. Possible values include: "minute", "hour", "day", "week", "month". :type frequency: str :param interval: Specifies schedule interval in conjunction with frequency. @@ -128,6 +141,7 @@ def __init__( pattern: RecurrencePattern = None, status: str = None, start_time: str = None, + end_time: str = None, time_zone: TimeZone = TimeZone.UTC, ): super().__init__( @@ -136,6 +150,7 @@ def __init__( pattern=pattern, schedule_status=status, start_time=start_time, + end_time=end_time, time_zone=time_zone, ) @@ -147,5 +162,6 @@ def _from_rest_object(cls, obj: RestRecurrenceSchedule) -> "RecurrenceSchedule": pattern=RecurrencePattern._from_rest_object(obj.pattern) if obj.pattern else None, status=obj.schedule_status, start_time=obj.start_time, + end_time=obj.end_time, time_zone=obj.time_zone, ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py index 9a2577b4a38d..aab175b32b2a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py @@ -16,7 +16,7 @@ from azure.ai.ml._schema import PathAwareSchema from azure.ai.ml.constants import OperationStatus, BASE_PATH_CONTEXT_KEY from azure.ai.ml.entities._job.pipeline._attr_dict import try_get_non_arbitrary_attr_for_potential_attr_dict - +from azure.ai.ml.entities._util import convert_ordered_dict_to_dict module_logger = logging.getLogger(__name__) @@ -300,7 +300,7 @@ def _schema_for_validation(self) -> typing.Union[PathAwareSchema, Schema]: def _dump_for_validation(self) -> typing.Dict: """Convert the resource to a dictionary.""" - return self._schema_for_validation.dump(self) + return convert_ordered_dict_to_dict(self._schema_for_validation.dump(self)) def _validate(self, raise_error=False) -> ValidationResult: """Validate the resource. If raise_error is True, raise ValidationError if validation fails and log warnings if diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py index eb6b1a51a29a..236a31581f2e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py @@ -325,7 +325,7 @@ def _get_latest_version(self, component_name: str) -> Component: def _upload_dependencies(self, component: Component) -> None: get_arm_id_and_fill_back = OperationOrchestrator(self._all_operations, self._operation_scope).get_asset_arm_id - if isinstance(component, (CommandComponent, ParallelComponent)): + if isinstance(component, Component): if component.code is not None: if isinstance(component.code, Code) or is_registry_id_for_resource(component.code): component.code = get_arm_id_and_fill_back(component.code, azureml_type=AzureMLResourceType.CODE) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py index 322488e331eb..91024a714bd2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py @@ -10,7 +10,7 @@ from azure.ai.ml.entities._data.mltable_metadata import MLTableMetadata from azure.core.paging import ItemPaged -from azure.ai.ml.constants import AssetTypes, MLTABLE_SCHEMA_URL_FALLBACK +from azure.ai.ml.constants import AssetTypes, MLTABLE_METADATA_SCHEMA_URL_FALLBACK from azure.ai.ml.operations import DatastoreOperations from azure.ai.ml._restclient.v2022_05_01 import ( AzureMachineLearningWorkspaces as ServiceClient052022, @@ -31,7 +31,7 @@ _archive_or_restore, ) from azure.ai.ml._utils._data_utils import ( - download_mltable_schema, + download_mltable_metadata_schema, read_local_mltable_metadata_contents, read_remote_mltable_metadata_contents, validate_mltable_metadata, @@ -243,13 +243,15 @@ def _validate(self, data: Data) -> Union[List[str], None]: self._assert_local_path_matches_asset_type(abs_path, asset_type) def _try_get_mltable_metadata_jsonschema( - self, mltable_schema_url: str = MLTABLE_SCHEMA_URL_FALLBACK + self, mltable_schema_url: str = MLTABLE_METADATA_SCHEMA_URL_FALLBACK ) -> Union[Dict, None]: try: - return download_mltable_schema(mltable_schema_url) + return download_mltable_metadata_schema(mltable_schema_url) except Exception: logger.info( - 'Failed to download MLTable jsonschema from "%s", skipping validation', mltable_schema_url, exc_info=1 + 'Failed to download MLTable metadata jsonschema from "%s", skipping validation', + mltable_schema_url, + exc_info=1, ) return None diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py index 0602ab57557b..ddccce64650c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py @@ -69,6 +69,9 @@ def create_or_update(self, environment: Environment) -> Environment: sas_uri = None + if not environment.version and self._registry_name: + raise Exception("Environment version is required for registry") + if self._registry_name: sas_uri = get_sas_uri_for_registry_asset( service_client=self._service_client, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py index 0a2c0127ea0e..c32cb12615bc 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py @@ -33,6 +33,7 @@ PipelineConstants, SWEEP_JOB_BEST_CHILD_RUN_ID_PROPERTY_NAME, COMMON_RUNTIME_ENV_VAR, + GIT_PATH_PREFIX, ) from azure.ai.ml.entities._job.job_errors import JobParsingError, PipelineChildJobError @@ -112,6 +113,7 @@ from ._dataset_dataplane_operations import DatasetDataplaneOperations from ._model_dataplane_operations import ModelDataplaneOperations from ._compute_operations import ComputeOperations +from azure.ai.ml._utils.utils import is_private_preview_enabled if TYPE_CHECKING: from azure.ai.ml.operations import DatastoreOperations @@ -381,6 +383,19 @@ def create_or_update( if job.compute == LOCAL_COMPUTE_TARGET: job.environment_variables[COMMON_RUNTIME_ENV_VAR] = "true" + # If private features are enable and job has code value of type str we need to check + # that it is a valid git path case. Otherwise we should throw a ValidationException + # saying that the code value is not a valid code value + if ( + hasattr(job, "code") + and job.code is not None + and isinstance(job.code, str) + and job.code.startswith(GIT_PATH_PREFIX) + and not is_private_preview_enabled() + ): + msg = f"Invalid code value: {job.code}. Git paths are not supported." + raise ValidationException(message=msg, no_personal_data_message=msg) + self._validate(job, raise_on_failure=True) # Create all dependent resources @@ -987,7 +1002,7 @@ def _resolve_arm_id_for_pipeline_job(self, pipeline_job: "PipelineJob", resolver for key, job_instance in pipeline_job.jobs.items(): if isinstance(job_instance, AutoMLJob): self._resolve_arm_id_for_automl_job(job_instance, resolver, inside_pipeline=True) - elif isinstance(job_instance, (Command, Sweep, Parallel)): + elif isinstance(job_instance, BaseNode): # Get the default for the specific job type if ( isinstance(job_instance.component, (CommandComponent, ParallelComponent)) @@ -1027,7 +1042,6 @@ def _append_tid_to_studio_url(self, job: Job) -> None: studio_endpoint = job.services.get("Studio", None) studio_url = studio_endpoint.endpoint cloud_details = _get_cloud_details() - cloud_details = _get_cloud_details() default_scopes = resource_to_scopes(cloud_details.get(ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT)) module_logger.debug(f"default_scopes used: `{default_scopes}`\n") # Extract the tenant id from the credential using PyJWT @@ -1043,7 +1057,7 @@ def _append_tid_to_studio_url(self, job: Job) -> None: def _set_defaults_to_component(self, component: Union[str, Component], settings: PipelineJobSettings): """Set default code&environment to component if not specified.""" - if isinstance(component, (CommandComponent, ParallelComponent)): + if isinstance(component, Component): # TODO: do we have no place to set default code & environment? pass diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py index e81c3834bf36..6a0fb7beffa5 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py @@ -69,6 +69,8 @@ def __init__( @monitor_with_activity(logger, "Model.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update(self, model: Model) -> Model: # TODO: Are we going to implement job_name? name = model.name + if not model.version and self._registry_name: + raise Exception("Model version is required for registry") version = model.version sas_uri = None diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py index 72e24991a55a..735519ff381f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py @@ -56,7 +56,6 @@ def __init__( ): if "app_insights_handler" in kwargs: logger.addHandler(kwargs.pop("app_insights_handler")) - kwargs.pop("base_url", None) self._subscription_id = operation_scope.subscription_id self._resource_group_name = operation_scope.resource_group_name diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed b/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt b/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt new file mode 100644 index 000000000000..8f2d575de2dc --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt @@ -0,0 +1,24 @@ +# NOTE: To avoid breaking changes in a major version bump, all dependencies should pin an upper bound if possible. + +pyyaml<7.0.0,>=5.1.0 +azure-identity +msrest>=0.6.18 +azure-core<2.0.0,>=1.8.0, !=1.22.0 +azure-mgmt-core<2.0.0,>=1.2.0 +marshmallow<4.0.0,>=3.5 +jsonschema<5.0.0,>=4.0.0 +tqdm<=4.63.0 +# Used for PR 718512 +colorama<=0.4.4 +pyjwt<3.0.0 +azure-storage-blob<13.0.0,>=12.10.0 +azure-storage-file-share<13.0.0 +azure-storage-file-datalake<=12.6.0 +pydash<=4.9.0 +pathspec==0.9.* +isodate +# Used for local endpoint story. +docker +azure-common<2.0.0,>=1.1 +typing-extensions>=4.0.1 +applicationinsights<=0.11.10 diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/template_code.py b/sdk/ml/azure-ai-ml/azure/ai/ml/template_code.py deleted file mode 100644 index 9eab23453934..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/template_code.py +++ /dev/null @@ -1,9 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -def template_main(): - print("Package code.") - return True From df3f3e08a85e5ea955f9e1a067b7d28dcd8355cf Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Fri, 1 Jul 2022 13:53:44 -0700 Subject: [PATCH 02/19] Updated changelog for azure-ai-ml --- sdk/ml/azure-ai-ml/CHANGELOG.md | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/sdk/ml/azure-ai-ml/CHANGELOG.md b/sdk/ml/azure-ai-ml/CHANGELOG.md index 918743294eaa..86ba2e85c293 100644 --- a/sdk/ml/azure-ai-ml/CHANGELOG.md +++ b/sdk/ml/azure-ai-ml/CHANGELOG.md @@ -1,15 +1,32 @@ -## Release History +# Release History -### 0.1.0b4 (unreleased) +## 2.6.0 (2022-07-06) -#### Features Added +### Features Added -#### Breaking Changes +- Allow Input/Output objects to be used by CommandComponent. +- Added MoonCake cloud support. +- Unified inputs/outputs building and validation logic in BaseNode. +- Allow Git repo URLs to be used as code for jobs and components. +- Updated AutoML YAML schema to use InputSchema. +- Added end_time to job schedule. +- MIR and pipeline job now support registry assets. -#### Bugs Fixed +### Breaking Changes -#### Other Changes +### Bugs Fixed +- Have mldesigner use argparser to parse incoming args. +- Bumped pyjwt version to <3.0.0. +- Reverted "upload support for symlinks". +- Error message improvement when a YAML UnionField fails to match. +- Reintroduced support for symlinks when uploading. +- Hard coded registry base URL to eastus region to support preview. + +## 0.1.0b4 (unreleased) + +## 0.1.0b3 (2022-05-24) + +### Features Added -### 0.1.0b3 (2022-05-24) - First preview. From e2d206caad4028180fb8ec006b483433eaf1cf8b Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Tue, 5 Jul 2022 09:40:55 -0700 Subject: [PATCH 03/19] Applied hotfixes to pass pipelines --- .../arm_deployment_executor.py | 13 +- .../ai/ml/_artifacts/_artifact_utilities.py | 5 +- .../azure/ai/ml/_azure_environments.py | 114 +++++++++--- sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py | 77 ++++++-- .../ai/ml/_schema/component/component.py | 5 +- .../azure/ai/ml/_schema/core/resource.py | 16 +- .../azure/ai/ml/_schema/core/schema.py | 11 +- .../azure/ai/ml/_schema/job/base_job.py | 5 +- .../azure/ai/ml/_utils/_storage_utils.py | 5 +- .../azure/ai/ml/_utils/_workspace_utils.py | 3 + sdk/ml/azure-ai-ml/azure/ai/ml/constants.py | 15 +- .../azure/ai/ml/dsl/_component_func.py | 4 +- .../azure/ai/ml/dsl/_load_import.py | 97 +--------- .../ai/ml/entities/_builders/base_node.py | 44 +++-- .../azure/ai/ml/entities/_builders/command.py | 123 ++++++++++++- .../ai/ml/entities/_builders/command_func.py | 23 +-- .../_component/_pipeline_component.py | 4 +- .../entities/_component/command_component.py | 21 +-- .../ai/ml/entities/_component/component.py | 55 ++++-- .../entities/_component/component_factory.py | 11 +- .../ai/ml/entities/_component/input_output.py | 101 ----------- .../entities/_component/parallel_component.py | 13 +- .../azure/ai/ml/entities/_component/utils.py | 6 +- .../ai/ml/entities/_datastore/_constants.py | 2 - .../ml/entities/_datastore/azure_storage.py | 9 +- .../ai/ml/entities/_datastore/credentials.py | 4 +- .../azure/ai/ml/entities/_inputs_outputs.py | 61 +++++-- .../azure/ai/ml/entities/_job/command_job.py | 8 - .../azure/ai/ml/entities/_job/job.py | 13 +- .../_job/pipeline/_component_translatable.py | 29 ++- .../azure/ai/ml/entities/_job/pipeline/_io.py | 33 ++-- .../ml/entities/_job/pipeline/pipeline_job.py | 22 ++- .../ai/ml/entities/_job/to_rest_functions.py | 52 ++++++ .../azure/ai/ml/entities/_resource.py | 9 +- .../azure/ai/ml/entities/_validation.py | 165 ++++++++++++++---- .../operations/_batch_endpoint_operations.py | 5 +- .../ai/ml/operations/_component_operations.py | 5 +- .../azure/ai/ml/operations/_job_operations.py | 142 ++++++++------- .../azure-ai-ml/azure/ai/ml/requirements.txt | 2 + 39 files changed, 813 insertions(+), 519 deletions(-) delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py create mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/to_rest_functions.py diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_arm_deployments/arm_deployment_executor.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_arm_deployments/arm_deployment_executor.py index 074421d32030..d7064d56428d 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_arm_deployments/arm_deployment_executor.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_arm_deployments/arm_deployment_executor.py @@ -3,7 +3,12 @@ # --------------------------------------------------------- from typing import Dict, Any, Optional -from azure.ai.ml._azure_environments import ENDPOINT_URLS, _get_cloud_details, resource_to_scopes +from azure.ai.ml._azure_environments import ( + _get_cloud_details, + _get_base_url_from_metadata, + _resource_to_scopes, + _get_azure_portal_id_from_metadata, +) from azure.core.polling import LROPoller from azure.ai.ml._arm_deployments.arm_helper import deployment_message_mapping from azure.ai.ml._utils._arm_id_utils import get_arm_id_object_from_id @@ -39,8 +44,8 @@ def __init__( self._resource_group_name = resource_group_name self._deployment_name = deployment_name self._cloud = _get_cloud_details() - management_hostname = self._cloud.get(ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT).strip("/") - credential_scopes = resource_to_scopes(management_hostname) + management_hostname = _get_base_url_from_metadata() + credential_scopes = _resource_to_scopes(management_hostname) kwargs.pop("base_url", None) if credential_scopes is not None: kwargs["credential_scopes"] = credential_scopes @@ -82,7 +87,7 @@ def deploy_resource( ) module_logger.info( ENDPOINT_DEPLOYMENT_START_MSG.format( - self._cloud.get(ENDPOINT_URLS.AZURE_PORTAL_ENDPOINT), + _get_azure_portal_id_from_metadata(), self._subscription_id, self._resource_group_name, self._deployment_name, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py index 3fb1f84c4805..9b961f9218d4 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py @@ -8,7 +8,7 @@ from pathlib import Path from datetime import datetime, timedelta import uuid -from azure.ai.ml._azure_environments import ENDPOINT_URLS, _get_cloud_details +from azure.ai.ml._azure_environments import _get_storage_endpoint_from_metadata from azure.storage.blob import generate_blob_sas, BlobSasPermissions from azure.storage.filedatalake import generate_file_sas, FileSasPermissions @@ -68,8 +68,7 @@ def get_datastore_info(operations: DatastoreOperations, name: str) -> Dict[str, else: datastore = operations.get_default(include_secrets=True) - cloud_details = _get_cloud_details() - storage_endpoint = cloud_details.get(ENDPOINT_URLS.STORAGE_ENDPOINT) + storage_endpoint = _get_storage_endpoint_from_metadata() credentials = datastore.credentials datastore_info["storage_type"] = datastore.type datastore_info["storage_account"] = datastore.account_name diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_azure_environments.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_azure_environments.py index d8db1bf7f763..7820d4cf171c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_azure_environments.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_azure_environments.py @@ -6,7 +6,9 @@ Metadata to interact with different Azure clouds """ +from typing import Dict from azure.ai.ml.constants import AZUREML_CLOUD_ENV_NAME +from azure.ai.ml._utils.utils import _get_mfe_url_override import os import logging @@ -17,9 +19,6 @@ class AZURE_ENVIRONMENTS: ENV_DEFAULT = "AzureCloud" ENV_US_GOVERNMENT = "AzureUSGovernment" ENV_CHINA = "AzureChinaCloud" - ENV_GERMAN = "AzureGermanCloud" - ENV_USNAT = "USNat" - ENV_USSEC = "USSec" class ENDPOINT_URLS: # pylint: disable=too-few-public-methods,old-style-class,no-init @@ -52,27 +51,6 @@ class ENDPOINT_URLS: # pylint: disable=too-few-public-methods,old-style-class,n ENDPOINT_URLS.AML_RESOURCE_ID: "https://ml.azure.us/", ENDPOINT_URLS.STORAGE_ENDPOINT: "core.usgovcloudapi.net", }, - AZURE_ENVIRONMENTS.ENV_GERMAN: { - ENDPOINT_URLS.AZURE_PORTAL_ENDPOINT: "https://portal.azure.de/", - ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT: "https://management.microsoftazure.de/", - ENDPOINT_URLS.ACTIVE_DIRECTORY_ENDPOINT: "https://login.microsoftonline.de/", - ENDPOINT_URLS.AML_RESOURCE_ID: "https://ml.azure.de", - ENDPOINT_URLS.STORAGE_ENDPOINT: "core.cloudapi.de", - }, - AZURE_ENVIRONMENTS.ENV_USNAT: { - ENDPOINT_URLS.AZURE_PORTAL_ENDPOINT: "https://portal.azure.eaglex.ic.gov/", - ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT: "https://management.azure.eaglex.ic.gov/", - ENDPOINT_URLS.ACTIVE_DIRECTORY_ENDPOINT: "https://login.microsoftonline.eaglex.ic.gov/", - ENDPOINT_URLS.AML_RESOURCE_ID: "https://ml.azure.eaglex.ic.gov", - ENDPOINT_URLS.STORAGE_ENDPOINT: "core.eaglex.ic.gov", - }, - AZURE_ENVIRONMENTS.ENV_USSEC: { - ENDPOINT_URLS.AZURE_PORTAL_ENDPOINT: "https://portal.azure.scloud/", - ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT: "https://management.azure.microsoft.scloud/", - ENDPOINT_URLS.ACTIVE_DIRECTORY_ENDPOINT: "https://login.microsoftonline.microsoft.scloud/", - ENDPOINT_URLS.AML_RESOURCE_ID: "https://ml.azure.microsoft.scloud", - ENDPOINT_URLS.STORAGE_ENDPOINT: "core.microsoft.scloud", - }, } @@ -81,7 +59,12 @@ def _get_default_cloud_name(): return os.getenv(AZUREML_CLOUD_ENV_NAME, AZURE_ENVIRONMENTS.ENV_DEFAULT) -def _get_cloud_details(cloud=None): +def _get_cloud_details(cloud: str = AZURE_ENVIRONMENTS.ENV_DEFAULT): + """Returns a Cloud endpoints object for the specified Azure Cloud + + :param cloud: cloud name + :return: azure environment endpoint. + """ if cloud is None: module_logger.debug("Using the default cloud configuration: '%s'.", AZURE_ENVIRONMENTS.ENV_DEFAULT) cloud = _get_default_cloud_name() @@ -93,7 +76,7 @@ def _get_cloud_details(cloud=None): return azure_environment -def _set_cloud(cloud=None): +def _set_cloud(cloud: str = AZURE_ENVIRONMENTS.ENV_DEFAULT): if cloud is not None: if cloud not in _environments: raise Exception('Unknown cloud environment supplied: "{0}".'.format(cloud)) @@ -102,7 +85,84 @@ def _set_cloud(cloud=None): os.environ[AZUREML_CLOUD_ENV_NAME] = cloud -def resource_to_scopes(resource): +def _get_base_url_from_metadata(cloud_name: str = None, is_local_mfe: bool = False): + """Retrieve the base url for a cloud from the metadata in SDK. + + :param cloud_name: cloud name + :return: base url for a cloud + """ + base_url = None + if is_local_mfe: + base_url = _get_mfe_url_override() + + if base_url is None: + cloud_details = _get_cloud_details(cloud_name) + base_url = cloud_details.get(ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT).strip("/") + return base_url + + +def _get_aml_resource_id_from_metadata(cloud_name: str = None): + """Retrieve the aml_resource_id for a cloud from the metadata in SDK. + + :param cloud_name: cloud name + :return: aml_resource_id for a cloud + """ + cloud_details = _get_cloud_details(cloud_name) + aml_resource_id = cloud_details.get(ENDPOINT_URLS.AML_RESOURCE_ID).strip("/") + return aml_resource_id + + +def _get_active_directory_url_from_metadata(cloud_name: str = None): + """Retrieve the active_directory_url for a cloud from the metadata in SDK. + + :param cloud_name: cloud name + :return: active_directory for a cloud + """ + cloud_details = _get_cloud_details(cloud_name) + active_directory_url = cloud_details.get(ENDPOINT_URLS.ACTIVE_DIRECTORY_ENDPOINT).strip("/") + return active_directory_url + + +def _get_storage_endpoint_from_metadata(cloud_name: str = None): + """Retrieve the storage_endpoint for a cloud from the metadata in SDK. + + :param cloud_name: cloud name + :return: storage_endpoint for a cloud + """ + cloud_details = _get_cloud_details(cloud_name) + storage_endpoint = cloud_details.get(ENDPOINT_URLS.STORAGE_ENDPOINT) + return storage_endpoint + + +def _get_azure_portal_id_from_metadata(cloud_name: str = None): + """Retrieve the azure_portal_id for a cloud from the metadata in SDK. + + :param cloud_name: cloud name + :return: azure_portal_id for a cloud + """ + cloud_details = _get_cloud_details(cloud_name) + azure_portal_id = cloud_details.get(ENDPOINT_URLS.AZURE_PORTAL_ENDPOINT) + return azure_portal_id + + +def _get_cloud_information_from_metadata(cloud_name: str = None, **kwargs) -> Dict: + """Retrieve the cloud information from the metadata in SDK. + + :param cloud_name: cloud name + :return: A dictionary of additional configuration parameters required for passing in cloud information. + """ + cloud_details = _get_cloud_details(cloud_name) + credential_scopes = _resource_to_scopes(cloud_details.get(ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT).strip("/")) + + # Update the kwargs with the cloud information + client_kwargs = {"cloud": cloud_name} + if credential_scopes is not None: + client_kwargs["credential_scopes"] = credential_scopes + kwargs.update(client_kwargs) + return kwargs + + +def _resource_to_scopes(resource): """Convert the resource ID to scopes by appending the /.default suffix and return a list. For example: 'https://management.core.windows.net/' -> ['https://management.core.windows.net//.default'] diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py index 1ac1b2b35d4a..ac5fff0398e8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py @@ -8,7 +8,12 @@ from os import PathLike from pathlib import Path from typing import Optional, Tuple, Union -from azure.ai.ml._azure_environments import ENDPOINT_URLS, _get_cloud_details, _get_default_cloud_name, _set_cloud +from azure.ai.ml._azure_environments import ( + _get_default_cloud_name, + _set_cloud, + _get_cloud_information_from_metadata, + _get_base_url_from_metadata, +) from azure.identity import ChainedTokenCredential from azure.core.polling import LROPoller @@ -95,18 +100,60 @@ def __init__( :type resource_group_name: str :param workspace_name: Workspace to use in the client, optional for non workspace dependent operations., defaults to None :type workspace_name: str, optional + :param kwargs: A dictionary of additional configuration parameters. For e.g. kwargs = {"cloud": "AzureUSGovernment"} + :type kwargs: dict + + .. note:: + + The cloud parameter in kwargs in this class is what gets + the MLClient to work for non-standard Azure Clouds, + e.g. AzureUSGovernment, AzureChinaCloud + + The following pseudo-code shows how to get a list of workspaces using MLClient. + .. code-block:: python + + from azure.identity import DefaultAzureCredential, AzureAuthorityHosts + from azure.ai.ml import MLClient + from azure.ai.ml.entities import Workspace + + # Enter details of your subscription + subscription_id = "AZURE_SUBSCRIPTION_ID" + resource_group = "RESOURCE_GROUP_NAME" + + # When using sovereign domains (that is, any cloud other than AZURE_PUBLIC_CLOUD), + # you must use an authority with DefaultAzureCredential. Default authority value : AzureAuthorityHosts.AZURE_PUBLIC_CLOUD + # Expected values for authority for sovereign clouds: AzureAuthorityHosts.AZURE_CHINA or AzureAuthorityHosts.AZURE_GOVERNMENT + credential = DefaultAzureCredential(authority=AzureAuthorityHosts.AZURE_CHINA) + + # When using sovereign domains (that is, any cloud other than AZURE_PUBLIC_CLOUD), + # you must pass in the cloud name in kwargs. Default cloud is AzureCloud + kwargs = {"cloud": "AzureChinaCloud"} + # get a handle to the subscription + ml_client = MLClient(credential, subscription_id, resource_group, **kwargs) + + # Get a list of workspaces in a resource group + for ws in ml_client.workspaces.list(): + print(ws.name, ":", ws.location, ":", ws.description) + """ - cloud_name = kwargs.get("cloud_name", _get_default_cloud_name()) - module_logger.debug("Cloud configured in MLClient: '%s'.", cloud_name) + if credential is None: + raise ValueError("credential can not be None") + + self._credential = credential + cloud_name = kwargs.get("cloud", _get_default_cloud_name()) + self._cloud = cloud_name _set_cloud(cloud_name) + if "cloud" not in kwargs: + module_logger.debug("Cloud input is missing. Using default Cloud setting in MLClient: '%s'.", cloud_name) + module_logger.debug("Cloud configured in MLClient: '%s'.", cloud_name) self._registry_name = kwargs.pop("registry_name", None) self._operation_scope = OperationScope( subscription_id, resource_group_name, workspace_name, self._registry_name ) - self._add_user_agent(kwargs) # Cannot send multiple base_url as azure-cli sets the base_url automatically. kwargs.pop("base_url", None) + self._add_user_agent(kwargs) user_agent = None properties = {"subscription_id": subscription_id, "resource_group_name": resource_group_name} @@ -117,10 +164,10 @@ def __init__( app_insights_handler = get_appinsights_log_handler(user_agent, **{"properties": properties}) app_insights_handler_kwargs = {"app_insights_handler": app_insights_handler} - self._credential = credential - - cloud_details = _get_cloud_details(cloud_name) - base_url = cloud_details.get(ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT).strip("/") + base_url = _get_base_url_from_metadata(cloud_name=cloud_name, is_local_mfe=True) + self._base_url = base_url + kwargs.update(_get_cloud_information_from_metadata(cloud_name)) + self._kwargs = kwargs self._operation_container = OperationsContainer() @@ -281,6 +328,7 @@ def __init__( self._service_client_02_2022_preview, self._operation_container, self._credential, + _service_client_kwargs=kwargs, **ops_kwargs, ) self._operation_container.add(AzureMLResourceType.JOB, self._jobs) @@ -304,7 +352,7 @@ def from_config( :type path: str :param _file_name: Allows overriding the config file name to search for when path is a directory path. :type _file_name: str - :param kwargs: A dictionary of additional configuration parameters. + :param kwargs: A dictionary of additional configuration parameters. For e.g. kwargs = {"cloud": "AzureUSGovernment"} :type kwargs: dict :return: The workspace object for an existing Azure ML Workspace. @@ -363,7 +411,7 @@ def from_config( ) """ - This method provides a way to create MLClient object for cli to levarage cli context for authentication. + This method provides a way to create MLClient object for cli to leverage cli context for authentication. With this we do not have to use AzureCliCredentials from azure-identity package (not meant for heavy usage). The credentials are passed by cli get_mgmt_service_client when it created a object of this class. """ @@ -508,7 +556,7 @@ def workspace_name(self) -> Optional[str]: """ return self._operation_scope.workspace_name - def _get_new_client(self, workspace_name: str) -> "MLClient": + def _get_new_client(self, workspace_name: str, **kwargs) -> "MLClient": """Returns a new MLClient object with the specified arguments :param str workspace_name: AzureML workspace of the new MLClient @@ -519,6 +567,7 @@ def _get_new_client(self, workspace_name: str) -> "MLClient": subscription_id=self._operation_scope.subscription_id, resource_group_name=self._operation_scope.resource_group_name, workspace_name=workspace_name, + **kwargs, ) @classmethod @@ -606,12 +655,6 @@ def _(entity: Job, operations, **kwargs): return operations[AzureMLResourceType.JOB].create_or_update(entity, **kwargs) -@_create_or_update.register(BaseNode) -def _(entity: Job, operations): - module_logger.debug("Creating or updating job") - return operations[AzureMLResourceType.JOB].create_or_update(entity) - - @_create_or_update.register(Model) def _(entity: Model, operations): module_logger.debug("Creating or updating model") diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/component.py index b0504aa2fa82..aa1537be4bf2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/component/component.py @@ -5,10 +5,11 @@ from azure.ai.ml._schema.core.fields import VersionField, PythonFuncNameStr from azure.ai.ml.constants import AzureMLResourceType, BASE_PATH_CONTEXT_KEY -from azure.ai.ml._schema import PathAwareSchema, UnionField, NestedField, ArmVersionedStr +from azure.ai.ml._schema import UnionField, NestedField, ArmVersionedStr from azure.ai.ml._schema.component.input_output import InputPortSchema, ParameterSchema, OutputPortSchema from azure.ai.ml._schema.job.creation_context import CreationContextSchema from ..core.fields import RegistryStr +from ..core.resource import ResourceSchema class ComponentNameStr(PythonFuncNameStr): @@ -16,7 +17,7 @@ def _get_field_name(self): return "Component" -class BaseComponentSchema(PathAwareSchema): +class BaseComponentSchema(ResourceSchema): schema = fields.Str(data_key="$schema", attribute="_schema") name = ComponentNameStr(required=True) id = UnionField( diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/resource.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/resource.py index 20bb3670662f..e5ac4b4500a4 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/resource.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/resource.py @@ -3,8 +3,9 @@ # --------------------------------------------------------- import logging -from marshmallow import fields +from marshmallow import fields, post_load, pre_load from .schema import YamlFileSchema +from ...constants import SOURCE_PATH_CONTEXT_KEY module_logger = logging.getLogger(__name__) @@ -14,3 +15,16 @@ class ResourceSchema(YamlFileSchema): id = fields.Str(attribute="id") description = fields.Str(attribute="description") tags = fields.Dict(keys=fields.Str, attribute="tags") + + @post_load + def pass_source_path(self, data, **kwargs): + from ...entities import Resource + + if isinstance(data, dict): + # data will be used in Resource.__init__ + data["source_path"] = self.context[SOURCE_PATH_CONTEXT_KEY] + elif isinstance(data, Resource): + # some resource will make dict into object in their post_load + # not sure if it's a better way to unify them + data._set_source_path(self.context[SOURCE_PATH_CONTEXT_KEY]) + return data diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/schema.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/schema.py index 64b01f641cd1..6f15239bacb2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/schema.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/schema.py @@ -8,7 +8,7 @@ from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta from azure.ai.ml._utils.utils import load_yaml -from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, FILE_PREFIX, PARAMS_OVERRIDE_KEY +from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, FILE_PREFIX, PARAMS_OVERRIDE_KEY, SOURCE_PATH_CONTEXT_KEY from marshmallow import post_load, pre_load, fields from pydash import objects @@ -80,9 +80,14 @@ class YamlFileSchema(PathAwareSchema): def __init__(self, *args, **kwargs): self._previous_base_path = None super().__init__(*args, **kwargs) + self._previous_source_path = None + if SOURCE_PATH_CONTEXT_KEY not in self.context: + self.context[SOURCE_PATH_CONTEXT_KEY] = None @pre_load def load_from_file(self, data, **kwargs): + # always push update + self._previous_source_path = self.context[SOURCE_PATH_CONTEXT_KEY] if isinstance(data, str) and data.startswith(FILE_PREFIX): self._previous_base_path = Path(self.context[BASE_PATH_CONTEXT_KEY]) # Use directly if absolute path @@ -94,6 +99,8 @@ def load_from_file(self, data, **kwargs): # deepcopy self.context[BASE_PATH_CONTEXT_KEY] to update old base path self.old_base_path = copy.deepcopy(self.context[BASE_PATH_CONTEXT_KEY]) self.context[BASE_PATH_CONTEXT_KEY] = path.parent + self.context[SOURCE_PATH_CONTEXT_KEY] = path + data = load_yaml(path) return data return data @@ -104,4 +111,6 @@ def reset_base_path(self, data, **kwargs): if self._previous_base_path is not None: # pop state self.context[BASE_PATH_CONTEXT_KEY] = self._previous_base_path + # always pop state + self.context[SOURCE_PATH_CONTEXT_KEY] = self._previous_source_path return data diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/base_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/base_job.py index adba551e7b11..663eec0e6411 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/base_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/base_job.py @@ -4,7 +4,7 @@ import logging from azure.ai.ml.constants import AzureMLResourceType -from azure.ai.ml._schema import NestedField, PathAwareSchema +from azure.ai.ml._schema import NestedField from azure.ai.ml._schema.job.identity import ManagedIdentitySchema, AMLTokenIdentitySchema, UserIdentitySchema from marshmallow import fields @@ -12,11 +12,12 @@ from .creation_context import CreationContextSchema from .job_output import JobOutputSchema from .services import JobServiceSchema +from ..core.resource import ResourceSchema module_logger = logging.getLogger(__name__) -class BaseJobSchema(PathAwareSchema): +class BaseJobSchema(ResourceSchema): creation_context = NestedField(CreationContextSchema, dump_only=True) services = fields.Dict(keys=fields.Str(), values=NestedField(JobServiceSchema)) name = fields.Str() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_storage_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_storage_utils.py index 5218f21db4d0..937282cbbe61 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_storage_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_storage_utils.py @@ -12,7 +12,7 @@ from azure.ai.ml._restclient.v2021_10_01.models import ( DatastoreType, ) -from azure.ai.ml._azure_environments import ENDPOINT_URLS, _get_cloud_details +from azure.ai.ml._azure_environments import _get_storage_endpoint_from_metadata from azure.ai.ml.constants import ( FILE_PREFIX, FOLDER_PREFIX, @@ -134,8 +134,7 @@ def get_storage_client( f"types for artifact upload include: {*SUPPORTED_STORAGE_TYPES,}" raise ValidationException(message=msg, no_personal_data_message=msg, target=ErrorTarget.DATASTORE) - cloud_details = _get_cloud_details() - storage_endpoint = cloud_details.get(ENDPOINT_URLS.STORAGE_ENDPOINT) + storage_endpoint = _get_storage_endpoint_from_metadata() if not account_url and storage_endpoint: account_url = STORAGE_ACCOUNT_URLS[storage_type].format(storage_account, storage_endpoint) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_workspace_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_workspace_utils.py index 93ebf16d2896..16a4ada21a10 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_workspace_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_workspace_utils.py @@ -9,6 +9,7 @@ from azure.ai.ml._vendor.azure_resources._resource_management_client import ResourceManagementClient from azure.ai.ml.constants import ArmConstants from azure.identity import ChainedTokenCredential +from azure.ai.ml._azure_environments import _get_base_url_from_metadata module_logger = logging.getLogger(__name__) @@ -35,6 +36,7 @@ def get_resource_group_location( client = ResourceManagementClient( credential=credentials, subscription_id=subscription_id, + base_url=_get_base_url_from_metadata(), api_version=ArmConstants.AZURE_MGMT_RESOURCE_API_VERSION, ) rg = client.resource_groups.get(resource_group_name) @@ -48,6 +50,7 @@ def delete_resource_by_arm_id( client = ResourceManagementClient( credential=credentials, subscription_id=subscription_id, + base_url=_get_base_url_from_metadata(), api_version=ArmConstants.AZURE_MGMT_RESOURCE_API_VERSION, ) client.resources.begin_delete_by_id(arm_id, api_version) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py b/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py index 966c42f41a35..3b3d8ffd694c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/constants.py @@ -11,6 +11,7 @@ ONLINE_ENDPOINT_TYPE = "online" BATCH_ENDPOINT_TYPE = "batch" BASE_PATH_CONTEXT_KEY = "base_path" +SOURCE_PATH_CONTEXT_KEY = "source_path" PARAMS_OVERRIDE_KEY = "params_override" TYPE = "type" JOBLIMITSTYPE = "JobLimitsType" @@ -91,6 +92,7 @@ BATCH_JOB_CHILD_RUN_NAME = "batchscoring" BATCH_JOB_CHILD_RUN_OUTPUT_NAME = "score" DEFAULT_ARTIFACT_STORE_OUTPUT_NAME = "default" +DEFAULT_EXPERIMENT_NAME = "Default" CREATE_ENVIRONMENT_ERROR_MESSAGE = "It looks like you are trying to specify a conda file for the --file/-f argument. --file/-f is reserved for the Azure ML Environment definition (see schema here: {}). To specify a conda file via command-line argument, please use --conda-file/-c argument." API_URL_KEY = "api" @@ -765,7 +767,18 @@ class TimeZone(str, Enum): class IO_CONSTANTS: PRIMITIVE_STR_2_TYPE = {"integer": int, "string": str, "number": float, "boolean": bool} PRIMITIVE_TYPE_2_STR = {int: "integer", str: "string", float: "number", bool: "boolean"} - + TYPE_MAPPING_YAML_2_REST = { + "string": "String", + "integer": "Integer", + "number": "Number", + "boolean": "Boolean", + } + PARAM_PARSERS = { + "float": float, + "integer": lambda v: int(float(v)), # backend returns 10.0 for integer, parse it to float before int + "boolean": lambda v: str(v).lower() == "true", + "number": float, + } # For validation, indicates specific parameters combination for each type INPUT_TYPE_COMBINATION = { "uri_folder": ["path", "mode"], diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_component_func.py b/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_component_func.py index 24e38065a8f1..810e885a4209 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_component_func.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_component_func.py @@ -13,9 +13,9 @@ def get_dynamic_input_parameter(inputs: Mapping): return [ KwParameter( name=name, - annotation=input.get_python_builtin_type_str(), + annotation=input._get_python_builtin_type_str(), default=None, - _type=input.get_python_builtin_type_str(), + _type=input._get_python_builtin_type_str(), ) for name, input in inputs.items() ] diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_load_import.py b/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_load_import.py index d7fedf401e1c..d276f4944a99 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_load_import.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/dsl/_load_import.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import Callable, Union +from typing import Callable from azure.ai.ml.entities._builders import Command from azure.ai.ml.constants import ( @@ -41,98 +41,3 @@ def to_component(*, job: ComponentTranslatableMixin, **kwargs) -> Callable[..., # set default base path as "./". Because if code path is relative path and base path is None, will raise error when # get arm id of Code return job._to_component(context={BASE_PATH_CONTEXT_KEY: Path("./")}) - - -def _generate_package( - *, - assets: Union[list, dict, str] = None, - package_name: str = "assets", - source_directory: str = ".", - force_regenerate: bool = False, - mode: str = "reference", - **kwargs, -) -> None: - """For a set of components, generate a python module which contains component consumption functions and import it - for use. - - :param assets: List[assets_identifier], dict[module_name, assets_identifier] or str - - * None: we will generate a module for default ml_client, not supported for now. - - * list example: specify as assets pattern list and we will generate modules - - .. code-block:: python - - # workspace assets, module name will be workspace name - assets = ["azureml://subscriptions/{subscription_id}/resourcegroups/{resource_group}/ - workspaces/{workspace_name}"] - - # feed assets, module name will be feed name - assets = ["azureml://feeds/HuggingFace"] - - # local assets, module name will be "local" - assets = ["file:components/**/module_spec.yaml"] - - * dict example: module name as key and assets_identifier as value - - .. code-block:: python - - # module name with an assets identifier - assets = {"module_name": "azureml://subscriptions/{subscription_id}/" - "resourcegroups/{resource_group}/workspaces/{workspace_name}"} - # module name with a list of assets identifier - assets = {"module_name": ["azureml://subscriptions/{subscription_id}/" - "resourcegroups/{resource_group}/workspaces/{workspace_name}", - "file:components/**/module_spec.yaml"]} - - * str example: specify as ``assets.yaml`` and config file which contains the modules dict - - .. remarks:: - - module_name: a string which is the name of the generated python module. - If user specify "module_name", a python file will be created: module_name.py. - components: single or list of glob string which specify a set of components. Example values: - * assets from workspace - 1. all assets - ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group}/ - workspaces/{workspace_name}`` - 2. components with name filter - ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group} - /workspaces/{workspace_name}/components/microsoft_samples_*`` - 3. datasets - ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group} - /workspaces/{workspace_name}/datasets`` - * components from local yaml - ``file:components/**/module_spec.yaml`` - * components from feeds - For feed concept, please see: `https://aka.ms/azuremlsharing`. - azureml://feeds/HuggingFace # All assets in feed. - azureml://feeds/HuggingFace/components/Microsoft* - - :type assets: typing.Union[None, list, dict, str] - :param source_directory: parent folder to generate source code. - * If not specified, we generate the file relative to the folder of python file that triggers the - dsl.generate_module call. - * If specified, we also generate all non-exist intermediate path. - :type source_directory: str - :param package_name: name of the generated python package. Example: cool-component-package - * If not specified, we generate the module directory under {source_directory} - * If specified: we generate the module file to specified package. - * If the cool-component-package folder does not exists, we will create a new skeleton package under - {source_directory}/cool-component-package and print info in command line and ask user to do: - ``pip install -e {source_directory}/cool-component-package`` - Then next user can do: 'from cool.component.package import module_name' - * If the folder exists, we trigger the __init__.py in the folder. - :type package_name: str - :param force_regenerate: whether to force regenerate the python module file. - * If True, will always generate and re-import the newly generated file. - * If False, will reuse previous generated file. If the existing file not valid, raise import error. - :type force_regenerate: bool - :param mode: whether to retain a snapshot of assets in package. - * reference: will not build/download snapshot of asset, load by name for remote assets. - * snapshot: will build/download snapshot of asset, load from local yaml. - :type mode: str - :param kwargs: A dictionary of additional configuration parameters. - :type kwargs: dict - """ - pass diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py index 4b7b14956a49..0ae1a1c293da 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/base_node.py @@ -5,7 +5,7 @@ import uuid from enum import Enum from functools import wraps -from abc import ABC, abstractmethod +from abc import abstractmethod from typing import Dict, Union, List, Optional from azure.ai.ml._utils._arm_id_utils import get_resource_name_from_arm_id_safe from azure.ai.ml.constants import JobType, ComponentSource @@ -57,9 +57,7 @@ def wrapper(*args, **kwargs): return wrapper -class BaseNode( - RestTranslatableMixin, NodeIOMixin, TelemetryMixin, YamlTranslatableMixin, _AttrDict, SchemaValidatableMixin, ABC -): +class BaseNode(Job, NodeIOMixin, YamlTranslatableMixin, _AttrDict, SchemaValidatableMixin): """Base class for node in pipeline, used for component version consumption. Can't be instantiated directly. :param type: Type of pipeline node @@ -103,7 +101,19 @@ def __init__( **kwargs, ): self._init = True + _from_component_func = kwargs.pop("_from_component_func", False) + super(BaseNode, self).__init__( + type=type, + name=name, + display_name=display_name, + description=description, + tags=tags, + properties=properties, + compute=compute, + experiment_name=experiment_name, + **kwargs, + ) # initialize io inputs, outputs = inputs or {}, outputs or {} @@ -126,16 +136,7 @@ def __init__( self._inputs = self._build_inputs_dict_without_meta(inputs or {}) self._outputs = self._build_outputs_dict_without_meta(outputs or {}) - super(BaseNode, self).__init__(**kwargs) - self.type = type self._component = component - self.name = name - self.display_name = display_name - self.description = description - self.tags = dict(tags) if tags else {} - self.properties = dict(properties) if properties else {} - self.compute = compute - self.experiment_name = experiment_name self.kwargs = kwargs # Generate an id for every instance @@ -144,7 +145,7 @@ def __init__( # add current component in pipeline stack for dsl scenario self._register_in_current_pipeline_component_builder() - self._base_path = None # if _base_path is not + self._source_path = self._component._source_path if isinstance(self._component, Component) else None self._init = False @classmethod @@ -193,6 +194,12 @@ def _set_base_path(self, base_path): """ self._base_path = base_path + def _set_source_path(self, source_path): + """ + Update the source path for the node. + """ + self._source_path = source_path + def _get_component_id(self) -> Union[str, Component]: """Return component id if possible.""" if isinstance(self._component, Component) and self._component.id: @@ -230,7 +237,7 @@ def _validate_inputs(self, raise_error=True): # raise error when required input with no default value not set if ( not self._is_input_set(input_name=key) # input not provided - and meta._optional is False # and it's required + and meta.optional is not True # and it's required and meta.default is None # and it does not have default ): validation_result.append_error( @@ -272,6 +279,13 @@ def _get_component_attr_name(cls) -> str: @abstractmethod def _to_job(self) -> Job: + """ + This private function is used by the CLI to get a plain job object so that the CLI can properly serialize the object. + It is needed as BaseNode._to_dict() dumps objects using pipeline child job schema instead of standalone job schema, + for example Command objects dump have a nested component property, which doesn't apply to stand alone command jobs. + BaseNode._to_dict() needs to be able to dump to both pipeline child job dict as well as stand alone job dict base on context. + """ + pass @classmethod diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py index a2f919670f53..23952db6be04 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py @@ -19,10 +19,18 @@ AmlToken, UserIdentity, CommandJobLimits as RestCommandJobLimits, + JobBaseData, + CommandJob as RestCommandJob, ResourceConfiguration as RestResourceConfiguration, ) -from azure.ai.ml.constants import NodeType +from azure.ai.ml.constants import ( + LOCAL_COMPUTE_TARGET, + LOCAL_COMPUTE_PROPERTY, + ComponentSource, + NodeType, + BASE_PATH_CONTEXT_KEY, +) from azure.ai.ml.entities._job.sweep.objective import Objective from azure.ai.ml.entities import ( Component, @@ -36,7 +44,7 @@ from azure.ai.ml.entities._job.sweep.early_termination_policy import EarlyTerminationPolicy from azure.ai.ml.entities._job.sweep.search_space import SweepDistribution from .._job.pipeline._io import PipelineInput, PipelineOutputBase -from .._util import validate_attribute_type, get_rest_dict, convert_ordered_dict_to_dict +from .._util import validate_attribute_type, get_rest_dict, load_from_dict, convert_ordered_dict_to_dict from azure.ai.ml.entities._job.distribution import ( MpiDistribution, TensorFlowDistribution, @@ -44,8 +52,13 @@ DistributionConfiguration, ) from ..._schema import PathAwareSchema +from azure.ai.ml._schema.job.command_job import CommandJobSchema from ..._schema.job.distribution import PyTorchDistributionSchema, TensorFlowDistributionSchema, MPIDistributionSchema -from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget +from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory +from azure.ai.ml.entities._job._input_output_helpers import ( + from_rest_inputs_to_dataset_literal, + from_rest_data_outputs, +) module_logger = logging.getLogger(__name__) @@ -108,6 +121,7 @@ def __init__( validate_attribute_type(attrs_to_check=locals(), attr_type_map=self._attr_type_map()) kwargs.pop("type", None) + self._parameters = kwargs.pop("parameters", {}) BaseNode.__init__( self, type=NodeType.COMMAND, inputs=inputs, outputs=outputs, component=component, compute=compute, **kwargs ) @@ -148,12 +162,21 @@ def _get_supported_inputs_types(cls): def _get_supported_outputs_types(cls): return str, Output + @property + def parameters(self) -> Dict[str, str]: + """MLFlow parameters + + :return: MLFlow parameters logged in job. + :rtype: Dict[str, str] + """ + return self._parameters + @property def distribution(self) -> Union[PyTorchDistribution, MpiDistribution, TensorFlowDistribution]: return self._distribution @distribution.setter - def distribution(self, value): + def distribution(self, value: Union[Dict, PyTorchDistribution, TensorFlowDistribution, MpiDistribution]): if isinstance(value, dict): dist_schema = UnionField( [ @@ -170,7 +193,7 @@ def resources(self) -> ResourceConfiguration: return self._resources @resources.setter - def resources(self, value): + def resources(self, value: Union[Dict, ResourceConfiguration]): if isinstance(value, dict): value = ResourceConfiguration(**value) self._resources = value @@ -184,6 +207,19 @@ def command(self) -> Optional[str]: # the same as code return self.component.command if hasattr(self.component, "command") else None + @command.setter + def command(self, value: str) -> None: + if isinstance(self.component, Component): + self.component.command = value + else: + msg = "Can't set command property for a registered component {}" + raise ValidationException( + msg=msg.format(self.component), + no_personal_data_message=msg.format(self.component), + target=ErrorTarget.COMMAND_JOB, + error_category=ErrorCategory.USER_ERROR, + ) + @property def code(self) -> Optional[Union[str, PathLike]]: # BaseNode is an _AttrDict to allow dynamic attributes, so that lower version of SDK can work with attributes @@ -195,6 +231,19 @@ def code(self) -> Optional[Union[str, PathLike]]: # which is invalid in schema validation. return self.component.code if hasattr(self.component, "code") else None + @code.setter + def code(self, value: str) -> None: + if isinstance(self.component, Component): + self.component.code = value + else: + msg = "Can't set code property for a registered component {}" + raise ValidationException( + msg=msg.format(self.component), + no_personal_data_message=msg.format(self.component), + target=ErrorTarget.COMMAND_JOB, + error_category=ErrorCategory.USER_ERROR, + ) + def set_resources( self, *, @@ -317,6 +366,7 @@ def _attr_type_map(cls) -> dict: def _to_job(self) -> CommandJob: return CommandJob( + id=self.id, name=self.name, display_name=self.display_name, description=self.description, @@ -326,6 +376,7 @@ def _to_job(self) -> CommandJob: experiment_name=self.experiment_name, code=self.component.code, compute=self.compute, + status=self.status, environment=self.environment, distribution=self.distribution, identity=self.identity, @@ -334,6 +385,9 @@ def _to_job(self) -> CommandJob: limits=self.limits, inputs=self._job_inputs, outputs=self._job_outputs, + services=self.services, + creation_context=self.creation_context, + parameters=self.parameters, ) @classmethod @@ -354,6 +408,22 @@ def _to_rest_object(self, **kwargs) -> dict: ) return rest_obj + @classmethod + def _load_from_dict(cls, data: Dict, context: Dict, additional_message: str, **kwargs) -> "Command": + from .command_func import command + + loaded_data = load_from_dict(CommandJobSchema, data, context, additional_message, **kwargs) + + # resources a limits properties are flatten in command() function, exact them and set separately + resources = loaded_data.pop("resources", None) + limits = loaded_data.pop("limits", None) + + command_job = command(base_path=context[BASE_PATH_CONTEXT_KEY], **loaded_data) + + command_job.resources = resources + command_job.limits = limits + return command_job + @classmethod def _from_rest_object(cls, obj: dict) -> "Command": obj = BaseNode._rest_object_to_init_params(obj) @@ -378,6 +448,48 @@ def _from_rest_object(cls, obj: dict) -> "Command": return Command(**obj) + @classmethod + def _load_from_rest_job(cls, obj: JobBaseData) -> "Command": + from .command_func import command + + rest_command_job: RestCommandJob = obj.properties + + command_job = command( + name=obj.name, + display_name=rest_command_job.display_name, + description=rest_command_job.description, + tags=rest_command_job.tags, + properties=rest_command_job.properties, + command=rest_command_job.command, + experiment_name=rest_command_job.experiment_name, + services=rest_command_job.services, + status=rest_command_job.status, + creation_context=obj.system_data, + code=rest_command_job.code_id, + compute=rest_command_job.compute_id, + environment=rest_command_job.environment_id, + distribution=DistributionConfiguration._from_rest_object(rest_command_job.distribution), + parameters=rest_command_job.parameters, + identity=rest_command_job.identity, + environment_variables=rest_command_job.environment_variables, + inputs=from_rest_inputs_to_dataset_literal(rest_command_job.inputs), + outputs=from_rest_data_outputs(rest_command_job.outputs), + ) + command_job._id = obj.id + command_job.resources = ResourceConfiguration._from_rest_object(rest_command_job.resources) + command_job.limits = CommandJobLimits._from_rest_object(rest_command_job.limits) + command_job.component._source = ComponentSource.REST # This is used by pipeline job telemetries. + + # Handle special case of local job + if ( + command_job.resources is not None + and command_job.resources.properties is not None + and command_job.resources.properties.get(LOCAL_COMPUTE_PROPERTY, None) + ): + command_job.compute = LOCAL_COMPUTE_TARGET + command_job.resources.properties.pop(LOCAL_COMPUTE_PROPERTY) + return command_job + def _build_inputs(self): inputs = super(Command, self)._build_inputs() built_inputs = {} @@ -403,6 +515,7 @@ def __call__(self, *args, **kwargs) -> "Command": if name not in kwargs.keys(): # use setattr here to make sure owner of input won't change setattr(node.inputs, name, original_input._data) + node._job_inputs[name] = original_input._data # get outputs for name, original_output in self.outputs.items(): # use setattr here to make sure owner of input won't change diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command_func.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command_func.py index 2c3fb11d6938..c13740b8c7f2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command_func.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command_func.py @@ -12,7 +12,6 @@ CommandComponent, ) from azure.ai.ml.entities._job.distribution import MpiDistribution, TensorFlowDistribution, PyTorchDistribution -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from azure.ai.ml.entities._job.pipeline._component_translatable import ComponentTranslatableMixin from azure.ai.ml._restclient.v2022_02_01_preview.models import ManagedIdentity, AmlToken, UserIdentity from azure.ai.ml.entities._inputs_outputs import Input, Output @@ -32,26 +31,23 @@ def _parse_input(input_value): component_input, job_input = None, None if isinstance(input_value, Input): - component_input = input_value._to_component_input() + component_input = Input(**input_value._to_dict()) input_type = input_value.type if input_type in SUPPORTED_INPUTS: job_input = Input(**input_value._to_dict()) - elif isinstance(input_value, ComponentInput): - # if user provided component input, job input will be None - component_input = input_value elif isinstance(input_value, dict): - # if user provided dict, we try to parse it to ComponentInput and JobInput separately. - # only parse to JobInput for path type + # if user provided dict, we try to parse it to Input. + # for job input, only parse for path type input_type = input_value.get("type", None) if input_type in SUPPORTED_INPUTS: job_input = Input(**input_value) - component_input = ComponentInput(input_value) + component_input = Input(**input_value) elif isinstance(input_value, (SweepDistribution, str, bool, int, float)): # Input bindings are not supported component_input = ComponentTranslatableMixin._to_component_input_builder_function(input_value) job_input = input_value else: - msg = f"Unsupported input type: {type(input_value)}, only Input, ComponentInput, dict, str, bool, int and float are supported." + msg = f"Unsupported input type: {type(input_value)}, only Input, dict, str, bool, int and float are supported." raise ValidationException(message=msg, no_personal_data_message=msg, target=ErrorTarget.JOB) return component_input, job_input @@ -59,10 +55,8 @@ def _parse_input(input_value): def _parse_output(output_value): component_output, job_output = None, None if isinstance(output_value, Output): - component_output = output_value._to_component_output() + component_output = Output(**output_value._to_dict()) job_output = Output(**output_value._to_dict()) - elif isinstance(output_value, ComponentOutput): - component_output = output_value elif not output_value: # output value can be None or empty dictionary # None output value will be packed into a JobOutput object with mode = ReadWriteMount & type = UriFolder @@ -70,11 +64,11 @@ def _parse_output(output_value): job_output = output_value elif isinstance(output_value, dict): # When output value is a non-empty dictionary job_output = Output(**output_value) - component_output = ComponentOutput(output_value) + component_output = Output(**output_value) elif isinstance(output_value, str): # When output is passed in from pipeline job yaml job_output = output_value else: - msg = f"Unsupported output type: {type(output_value)}, only Output, ComponentOutput, JobOutput, and dict are supported." + msg = f"Unsupported output type: {type(output_value)}, only Output and dict are supported." raise ValidationException(message=msg, no_personal_data_message=msg, target=ErrorTarget.JOB) return component_output, job_output @@ -163,7 +157,6 @@ def command( if component is None: component = CommandComponent( - base_path=os.getcwd(), # base path should be current folder name=name, tags=tags, code=code, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/_pipeline_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/_pipeline_component.py index af2668bfda99..2e35a34cf240 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/_pipeline_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/_pipeline_component.py @@ -18,9 +18,9 @@ class _PipelineComponent(Component): :param components: Id to components dict inside pipeline definition. :type components: OrderedDict[str, Component] :param inputs: Inputs of the component. - :type inputs: ComponentInputs + :type inputs: Component inputs :param outputs: Outputs of the component. - :type outputs: ComponentOutputs + :type outputs: Component outputs """ def __init__(self, components: Dict[str, BaseNode], **kwargs): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py index 39f3d90446a2..b312f1d1bf2b 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py @@ -6,13 +6,8 @@ from marshmallow import INCLUDE, Schema from typing import Dict, Union -from azure.ai.ml._restclient.v2022_05_01.models import ( - ComponentVersionData, - ComponentVersionDetails, -) -from azure.ai.ml._schema.component.command_component import CommandComponentSchema, RestCommandComponentSchema +from azure.ai.ml._schema.component.command_component import CommandComponentSchema from azure.ai.ml.entities._job.distribution import ( - DistributionConfiguration, MpiDistribution, TensorFlowDistribution, PyTorchDistribution, @@ -20,9 +15,8 @@ from azure.ai.ml.entities._job.resource_configuration import ResourceConfiguration from azure.ai.ml.entities._job.parameterized_command import ParameterizedCommand from azure.ai.ml.entities._assets import Environment -from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, COMPONENT_TYPE, ComponentSource +from azure.ai.ml.constants import COMPONENT_TYPE from azure.ai.ml.constants import NodeType -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from .component import Component from .._util import validate_attribute_type, convert_ordered_dict_to_dict from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget @@ -168,9 +162,10 @@ def _create_schema_for_validation(cls, context) -> Union[PathAwareSchema, Schema return CommandComponentSchema(context=context) def _customized_validate(self): - return self._validate_command() + return super(CommandComponent, self)._customized_validate().merge_with(self._validate_command()) def _validate_command(self) -> ValidationResult: + validation_result = self._create_empty_validation_result() # command if self.command: invalid_expressions = [] @@ -179,9 +174,11 @@ def _validate_command(self) -> ValidationResult: invalid_expressions.append(data_binding_expression) if invalid_expressions: - error_msg = "Invalid data binding expression: {}".format(", ".join(invalid_expressions)) - return _ValidationResultBuilder.from_single_message(error_msg, "command") - return _ValidationResultBuilder.success() + validation_result.append_error( + yaml_path="command", + message="Invalid data binding expression: {}".format(", ".join(invalid_expressions)), + ) + return validation_result def _is_valid_data_binding_expression(self, data_binding_expression: str) -> bool: current_obj = self diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py index 009f62d7f409..1e1b30b718a4 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py @@ -1,26 +1,30 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +import typing +from abc import abstractmethod from os import PathLike from pathlib import Path from typing import Dict, Union +from marshmallow import Schema + +from azure.ai.ml._schema import PathAwareSchema from azure.ai.ml.entities import Asset -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from azure.ai.ml._restclient.v2022_05_01.models import ComponentVersionData, SystemData, ComponentVersionDetails from azure.ai.ml.constants import ( - CommonYamlFields, BASE_PATH_CONTEXT_KEY, PARAMS_OVERRIDE_KEY, ComponentSource, ANONYMOUS_COMPONENT_NAME, + SOURCE_PATH_CONTEXT_KEY, ) -from azure.ai.ml.constants import NodeType from azure.ai.ml.entities._mixins import RestTranslatableMixin, YamlTranslatableMixin, TelemetryMixin -from azure.ai.ml._utils.utils import load_yaml, dump_yaml_to_file, hash_dict +from azure.ai.ml._utils.utils import dump_yaml_to_file, hash_dict, is_private_preview_enabled from azure.ai.ml.entities._util import find_type_in_override -from azure.ai.ml._ml_exceptions import ComponentException, ErrorCategory, ErrorTarget, ValidationException +from azure.ai.ml._ml_exceptions import ErrorCategory, ErrorTarget, ValidationException from azure.ai.ml.entities._validation import ValidationResult, SchemaValidatableMixin +from azure.ai.ml.entities._inputs_outputs import Input, Output class Component(Asset, RestTranslatableMixin, TelemetryMixin, YamlTranslatableMixin, SchemaValidatableMixin): @@ -88,11 +92,12 @@ def __init__( creation_context=creation_context, is_anonymous=kwargs.pop("is_anonymous", False), base_path=kwargs.pop("base_path", None), + source_path=kwargs.pop("source_path", None), ) # update component name to ANONYMOUS_COMPONENT_NAME if it is anonymous if hasattr(self, "_is_anonymous"): self._set_is_anonymous(self._is_anonymous) - # TODO: check why do we dropped kwargs + # TODO: check why do we dropped kwargs, seems because _source is not a valid parameter for a super.__init__ inputs = inputs if inputs else {} outputs = outputs if outputs else {} @@ -112,9 +117,7 @@ def __init__( self._func = _generate_component_function(self) @classmethod - def build_validate_io(cls, io_dict: Dict, is_input: bool): - from azure.ai.ml import Output, Input - + def build_validate_io(cls, io_dict: Union[Dict, Input, Output], is_input: bool): component_io = {} for name, port in io_dict.items(): if not name.isidentifier(): @@ -126,13 +129,9 @@ def build_validate_io(cls, io_dict: Dict, is_input: bool): ) else: if is_input: - if isinstance(port, Input): - port = port._to_dict() - component_io[name] = ComponentInput(port) + component_io[name] = port if isinstance(port, Input) else Input(**port) else: - if isinstance(port, Output): - port = port._to_dict() - component_io[name] = ComponentOutput(port) + component_io[name] = port if isinstance(port, Output) else Output(**port) return component_io @property @@ -239,10 +238,32 @@ def dump(self, path: Union[PathLike, str]) -> None: yaml_serialized = self._to_dict() dump_yaml_to_file(path, yaml_serialized, default_flow_style=False) + @classmethod + @abstractmethod + def _create_schema_for_validation(cls, context) -> typing.Union[PathAwareSchema, Schema]: + pass + @classmethod def _get_validation_error_target(cls) -> ErrorTarget: return ErrorTarget.COMPONENT + def _customized_validate(self) -> ValidationResult: + validation_result = super(Component, self)._customized_validate() + # If private features are enable and component has code value of type str we need to check + # that it is a valid git path case. Otherwise we should throw a ValidationError + # saying that the code value is not valid + if ( + hasattr(self, "code") + and self.code is not None + and isinstance(self.code, str) + and self.code.startswith("git+") + and not is_private_preview_enabled() + ): + validation_result.append_error( + message="Not a valid code value: git paths are not supported.", yaml_path="code" + ) + return validation_result + @classmethod def _load( cls, @@ -255,6 +276,7 @@ def _load( params_override = params_override or [] context = { BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"), + SOURCE_PATH_CONTEXT_KEY: Path(yaml_path) if yaml_path else None, PARAMS_OVERRIDE_KEY: params_override, } @@ -270,9 +292,6 @@ def _from_rest_object(cls, component_rest_object: ComponentVersionData) -> "Comp return component_factory.load_from_rest(obj=component_rest_object) def _to_rest_object(self) -> ComponentVersionData: - # TODO: we may need to use original dict from component YAML(only change code and environment), returning - # parsed dict might add default value for some field, eg: if we add property "optional" with default value - # to ComponentInput, it will add field "optional" to all inputs even if user doesn't specify one component = self._to_dict() properties = ComponentVersionDetails( diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py index f2dfab14c9c6..3efb959c169f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component_factory.py @@ -3,9 +3,8 @@ # --------------------------------------------------------- from typing import Callable, Dict, Tuple, Any from marshmallow import INCLUDE, Schema -from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory, ComponentException +from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory from azure.ai.ml._restclient.v2022_05_01.models import ComponentVersionData -from azure.ai.ml._schema.component import BaseComponentSchema from azure.ai.ml.constants import ( NodeType, ComponentSource, @@ -14,7 +13,7 @@ ANONYMOUS_COMPONENT_NAME, ) from azure.ai.ml.entities import ParallelComponent, CommandComponent, Component -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput +from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._job.distribution import DistributionConfiguration @@ -107,12 +106,10 @@ def load_from_rest(self, *, obj: ComponentVersionData, _type: str = None) -> Com _type = _type.lower() inputs = { - k: ComponentInput._from_rest_object(v) - for k, v in rest_component_version.component_spec.pop("inputs", {}).items() + k: Input._from_rest_object(v) for k, v in rest_component_version.component_spec.pop("inputs", {}).items() } outputs = { - k: ComponentOutput._from_rest_object(v) - for k, v in rest_component_version.component_spec.pop("outputs", {}).items() + k: Output._from_rest_object(v) for k, v in rest_component_version.component_spec.pop("outputs", {}).items() } distribution = rest_component_version.component_spec.pop("distribution", None) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py deleted file mode 100644 index db0e51150e40..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/input_output.py +++ /dev/null @@ -1,101 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -import copy -from typing import Dict, Type, Union - -from azure.ai.ml.entities._mixins import RestTranslatableMixin - - -class ComponentIOItem(dict, RestTranslatableMixin): - """Component input/output. Inherit from dictionary for flexibility.""" - - def __init__(self, port_dict: Dict): - self._type = port_dict["type"] - self._default = port_dict.get("default", None) - super().__init__(port_dict) - - @property - def type(self) -> str: - return self._type - - @property - def default(self): - return self._default - - -class ComponentInput(ComponentIOItem): - # map from yaml type to rest object type - DATA_TYPE_MAPPING = { - "string": "String", - "integer": "Integer", - "number": "Number", - "boolean": "Boolean", - } - # map from yaml type to python built in type - PYTHON_BUILT_IN_TYPE_MAPPING = { - "string": str, - "integer": int, - "number": float, - "boolean": bool, - } - PARAM_PARSERS = { - "float": float, - "integer": lambda v: int(float(v)), # backend returns 10.0 for integer, parse it to float before int - "boolean": lambda v: str(v).lower() == "true", - "number": lambda v: str(v), - } - - def __init__(self, port_dict: Dict): - # parse value from string to it's original type. eg: "false" -> False - if isinstance(port_dict["type"], str) and port_dict["type"] in self.PARAM_PARSERS.keys(): - for key in ["default", "min", "max"]: - if key in port_dict.keys(): - port_dict[key] = self.PARAM_PARSERS[port_dict["type"]](port_dict[key]) - self._optional = self.PARAM_PARSERS["boolean"](port_dict.get("optional", "false")) - super().__init__(port_dict) - - @property - def python_builtin_type(self) -> Type[Union[int, str, float, bool]]: - """Return python builtin type of the input.""" - return self.PYTHON_BUILT_IN_TYPE_MAPPING[self.type] - - def get_python_builtin_type_str(self) -> str: - """Get python builtin type for current input in string, eg: str. Return yaml type if not available.""" - try: - return self.python_builtin_type.__name__ - except KeyError: - return self._type - - def _to_rest_object(self) -> Dict: - result = copy.deepcopy(self) - # parse string -> String, integer -> Integer, etc. - if result["type"] in result.DATA_TYPE_MAPPING.keys(): - result["type"] = result.DATA_TYPE_MAPPING[result["type"]] - return result - - @classmethod - def _from_rest_object(cls, rest_dict: Dict) -> "ComponentInput": - reversed_data_type_mapping = {v: k for k, v in cls.DATA_TYPE_MAPPING.items()} - # parse String -> string, Integer -> integer, etc - if rest_dict["type"] in reversed_data_type_mapping.keys(): - rest_dict["type"] = reversed_data_type_mapping[rest_dict["type"]] - return ComponentInput(rest_dict) - - def _is_literal(self) -> bool: - """Returns True if this input is literal input.""" - return self._type in ["number", "integer", "boolean", "string"] - - def _is_path(self) -> bool: - """Returns True if this input is path input.""" - return self._type == "path" - - -class ComponentOutput(ComponentIOItem): - def _to_rest_object(self) -> Dict: - result = copy.deepcopy(self) - return result - - @classmethod - def _from_rest_object(cls, rest_dict: Dict) -> "ComponentOutput": - return ComponentOutput(rest_dict) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py index a5ee6cf687d9..56bb28448a11 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/parallel_component.py @@ -7,10 +7,7 @@ from marshmallow import INCLUDE, Schema from typing import Dict, Any, Union -from azure.ai.ml._restclient.v2021_10_01.models import ( - ComponentVersionData, - ComponentVersionDetails, -) +from azure.ai.ml._restclient.v2021_10_01.models import ComponentVersionData from azure.ai.ml._schema.component.parallel_component import ParallelComponentSchema, RestParallelComponentSchema from azure.ai.ml.constants import ( BASE_PATH_CONTEXT_KEY, @@ -18,8 +15,8 @@ NodeType, ComponentSource, ) -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from .component import Component +from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._job.resource_configuration import ResourceConfiguration from azure.ai.ml.entities._deployment.deployment_settings import BatchRetrySettings from azure.ai.ml.entities._job.parallel.retry_settings import RetrySettings @@ -247,12 +244,10 @@ def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "ParallelCompon def _load_from_rest(cls, obj: ComponentVersionData) -> "ParallelComponent": rest_component_version = obj.properties inputs = { - k: ComponentInput._from_rest_object(v) - for k, v in rest_component_version.component_spec.pop("inputs", {}).items() + k: Input._from_rest_object(v) for k, v in rest_component_version.component_spec.pop("inputs", {}).items() } outputs = { - k: ComponentOutput._from_rest_object(v) - for k, v in rest_component_version.component_spec.pop("outputs", {}).items() + k: Output._from_rest_object(v) for k, v in rest_component_version.component_spec.pop("outputs", {}).items() } parallel_component = ParallelComponent( id=obj.id, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py index 1048e4914fa8..3fd8a1fc5783 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/utils.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from typing import Dict -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput +from azure.ai.ml.entities._inputs_outputs import Input, Output def component_io_to_rest_obj(io_dict: Dict): @@ -18,7 +18,7 @@ def component_input_from_rest_obj(component_io: Dict): """Rest component inputs/outputs to dictionary.""" component_io_dict = {} for name, rest_obj in component_io.items(): - io = ComponentInput._from_rest_object(rest_obj) + io = Input._from_rest_object(rest_obj) component_io_dict[name] = io return component_io_dict @@ -27,6 +27,6 @@ def component_output_from_rest_obj(component_io: Dict): """Rest component inputs/outputs to dictionary.""" component_io_dict = {} for name, rest_obj in component_io.items(): - io = ComponentOutput._from_rest_object(rest_obj) + io = Output._from_rest_object(rest_obj) component_io_dict[name] = io return component_io_dict diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/_constants.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/_constants.py index a90063d860f6..97a257ab9d52 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/_constants.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/_constants.py @@ -3,8 +3,6 @@ # --------------------------------------------------------- # Miscellaneous -DEFAULT_ENDPOINT = "core.windows.net" HTTPS = "https" HTTP = "http" -DEFAULT_AUTHORITY_URL = "https://login.microsoftonline.com" WORKSPACE_BLOB_STORE = "workspaceblobstore" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py index 7eea7d6a5065..c8e166cf7baa 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py @@ -5,6 +5,7 @@ from pathlib import Path from typing import Dict, Union from azure.ai.ml.entities._datastore.datastore import Datastore +from azure.ai.ml._azure_environments import _get_storage_endpoint_from_metadata from azure.ai.ml._restclient.v2022_05_01.models import ( AzureBlobDatastore as RestAzureBlobDatastore, @@ -22,7 +23,7 @@ ) from azure.ai.ml.entities._datastore.utils import from_rest_datastore_credentials -from ._constants import DEFAULT_ENDPOINT, HTTPS +from ._constants import HTTPS from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, TYPE from azure.ai.ml.entities._util import load_from_dict @@ -60,7 +61,7 @@ def __init__( file_share_name: str, description: str = None, tags: Dict = None, - endpoint: str = DEFAULT_ENDPOINT, + endpoint: str = _get_storage_endpoint_from_metadata(), protocol: str = HTTPS, properties: Dict = None, credentials: Union[AccountKeyCredentials, SasTokenCredentials], @@ -156,7 +157,7 @@ def __init__( container_name: str, description: str = None, tags: Dict = None, - endpoint: str = DEFAULT_ENDPOINT, + endpoint: str = _get_storage_endpoint_from_metadata(), protocol: str = HTTPS, properties: Dict = None, credentials: Union[AccountKeyCredentials, SasTokenCredentials] = None, @@ -253,7 +254,7 @@ def __init__( filesystem: str, description: str = None, tags: Dict = None, - endpoint: str = DEFAULT_ENDPOINT, + endpoint: str = _get_storage_endpoint_from_metadata(), protocol: str = HTTPS, properties: Dict = None, credentials: Union[ServicePrincipalCredentials, CertificateCredentials] = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/credentials.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/credentials.py index 77d1dd369a07..6834a95d0e76 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/credentials.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/credentials.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from ._constants import DEFAULT_AUTHORITY_URL +from azure.ai.ml._azure_environments import _get_active_directory_url_from_metadata from azure.ai.ml.entities._mixins import RestTranslatableMixin from azure.ai.ml._restclient.v2022_05_01.models import ( AccountKeyDatastoreCredentials, @@ -100,7 +100,7 @@ def __ne__(self, other: object) -> bool: class BaseTenantCredentials(DatastoreCredentials): def __init__( self, - authority_url: str = DEFAULT_AUTHORITY_URL, + authority_url: str = _get_active_directory_url_from_metadata(), resource_url: str = None, tenant_id: str = None, client_id: str = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py index 4a91f448482f..4afa05a671c2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs.py @@ -57,18 +57,17 @@ def some_pipeline( from typing import overload from collections import OrderedDict -from typing import Union, Sequence, Iterable +from typing import Dict, Union, Sequence, Iterable from enum import EnumMeta, Enum as PyEnum from inspect import Parameter, signature from azure.ai.ml.entities._job.pipeline._exceptions import UserErrorException, MldesignerComponentDefiningError -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from azure.ai.ml.constants import InputOutputModes, AssetTypes, IO_CONSTANTS from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory, ComponentException -from azure.ai.ml.entities._mixins import DictMixin +from azure.ai.ml.entities._mixins import DictMixin, RestTranslatableMixin -class Input(DictMixin): +class Input(DictMixin, RestTranslatableMixin): """Define an input of a Component or Job. Default to be a uri_folder Input. @@ -260,10 +259,13 @@ def __init__( self.path = path self.mode = None if self._is_primitive_type else mode self.default = default - self.optional = True if optional is True else None + self.optional = optional self.min = min self.max = max self.enum = enum + # normalize properties like ["default", "min", "max", "optional"] + self._normalize_self_properties() + self._allowed_types = IO_CONSTANTS.PRIMITIVE_STR_2_TYPE.get(self.type) self._validate_parameter_combinations() @@ -275,10 +277,6 @@ def _to_dict(self, remove_name=True): result = {key: getattr(self, key) for key in keys} return _remove_empty_values(result) - def _to_component_input(self): - data = self._to_dict() - return ComponentInput(data) - def _parse(self, val): """Parse value passed from command line. @@ -390,6 +388,10 @@ def _validate_or_throw(self, value): target=ErrorTarget.PIPELINE, ) + def _get_python_builtin_type_str(self) -> str: + """Get python builtin type for current input in string, eg: str. Return yaml type if not available.""" + return IO_CONSTANTS.PRIMITIVE_STR_2_TYPE[self.type].__name__ if self._is_primitive_type else self.type + def _validate_parameter_combinations(self): """Validate different parameter combinations according to type""" parameters = ["type", "path", "mode", "default", "min", "max"] @@ -409,6 +411,15 @@ def _validate_parameter_combinations(self): target=ErrorTarget.PIPELINE, ) + def _normalize_self_properties(self): + # parse value from string to it's original type. eg: "false" -> False + if self.type in IO_CONSTANTS.PARAM_PARSERS: + for key in ["default", "min", "max"]: + if getattr(self, key) is not None: + self[key] = IO_CONSTANTS.PARAM_PARSERS[self.type](self[key]) + self.optional = IO_CONSTANTS.PARAM_PARSERS["boolean"](getattr(self, "optional", "false")) + self.optional = True if self.optional is True else None + @classmethod def _get_input_by_type(cls, t: type, optional=None): if t in IO_CONSTANTS.PRIMITIVE_TYPE_2_STR: @@ -423,8 +434,27 @@ def _get_default_string_input(cls, optional=None): def _get_param_with_standard_annotation(cls, func): return _get_param_with_standard_annotation(func, is_func=True) + def _to_rest_object(self) -> Dict: + # this is for component rest object when using Input as component inputs, as for job input usage, + # rest object is generated by extracting Input's properties, see details in to_rest_dataset_literal_inputs() + result = self._to_dict() + # parse string -> String, integer -> Integer, etc. + if result["type"] in IO_CONSTANTS.TYPE_MAPPING_YAML_2_REST: + result["type"] = IO_CONSTANTS.TYPE_MAPPING_YAML_2_REST[result["type"]] + return result + + @classmethod + def _from_rest_object(cls, rest_dict: Dict) -> "Input": + # this is for component rest object when using Input as component inputs + reversed_data_type_mapping = {v: k for k, v in IO_CONSTANTS.TYPE_MAPPING_YAML_2_REST.items()} + # parse String -> string, Integer -> integer, etc + if rest_dict["type"] in reversed_data_type_mapping: + rest_dict["type"] = reversed_data_type_mapping[rest_dict["type"]] + + return Input(**rest_dict) + -class Output(DictMixin): +class Output(DictMixin, RestTranslatableMixin): """Define an output of a Component or Job. :param type: The type of the data output. Possible values include: @@ -504,8 +534,15 @@ def _to_dict(self, remove_name=True): result = {key: getattr(self, key) for key in keys} return _remove_empty_values(result) - def _to_component_output(self): - return ComponentOutput(self._to_dict()) + def _to_rest_object(self) -> Dict: + # this is for component rest object when using Output as component outputs, as for job output usage, + # rest object is generated by extracting Output's properties, see details in to_rest_data_outputs() + return self._to_dict() + + @classmethod + def _from_rest_object(cls, rest_dict: Dict) -> "Output": + # this is for component rest object when using Output as component outputs + return Output(**rest_dict) class EnumInput(Input): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py index 5b2916b2e84f..2b544356ae99 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py @@ -253,14 +253,6 @@ def _to_node(self, context: Dict = None, **kwargs): ) def _validate(self) -> None: - if self.name is None: - msg = "Job name is required" - raise ValidationException( - message=msg, - no_personal_data_message=msg, - target=ErrorTarget.JOB, - error_category=ErrorCategory.USER_ERROR, - ) if self.compute is None: msg = "compute is required" raise ValidationException( diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job.py index fa25ea57dea2..7eba233d9871 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job.py @@ -15,6 +15,7 @@ JobType, PARAMS_OVERRIDE_KEY, JobServices, + SOURCE_PATH_CONTEXT_KEY, ) from azure.ai.ml.entities._mixins import RestTranslatableMixin, TelemetryMixin from azure.ai.ml.entities._resource import Resource @@ -42,7 +43,7 @@ def _is_pipeline_child_job(job: JobBaseData) -> bool: return job.properties is None -class Job(Resource, RestTranslatableMixin, ComponentTranslatableMixin, TelemetryMixin): +class Job(Resource, ComponentTranslatableMixin, TelemetryMixin): """Base class for job, can't be instantiated directly. :param name: Name of the resource. @@ -202,22 +203,23 @@ def _load( data = data or {} params_override = params_override or [] context = { + SOURCE_PATH_CONTEXT_KEY: Path(yaml_path) if yaml_path else None, BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"), PARAMS_OVERRIDE_KEY: params_override, } from azure.ai.ml.entities import ( - CommandJob, PipelineJob, ) from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob from azure.ai.ml.entities._job.sweep.sweep_job import SweepJob + from azure.ai.ml.entities._builders.command import Command job_type: Optional[Type["Job"]] = None type_in_override = find_type_in_override(params_override) type = type_in_override or data.get(CommonYamlFields.TYPE, JobType.COMMAND) # override takes the priority if type == JobType.COMMAND: - job_type = CommandJob + job_type = Command elif type == JobType.SWEEP: job_type = SweepJob elif type == JobType.AUTOML: @@ -241,10 +243,11 @@ def _load( @classmethod def _from_rest_object(cls, job_rest_object: Union[JobBaseData, Run]) -> "Job": - from azure.ai.ml.entities import CommandJob, PipelineJob + from azure.ai.ml.entities import PipelineJob from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob from azure.ai.ml.entities._job.sweep.sweep_job import SweepJob from azure.ai.ml.entities._job.base_job import _BaseJob + from azure.ai.ml.entities._builders.command import Command try: if isinstance(job_rest_object, Run): @@ -253,7 +256,7 @@ def _from_rest_object(cls, job_rest_object: Union[JobBaseData, Run]) -> "Job": elif _is_pipeline_child_job(job_rest_object): raise PipelineChildJobError(job_id=job_rest_object.id) elif job_rest_object.properties.job_type == RestJobType.COMMAND: - return CommandJob._load_from_rest(job_rest_object) + return Command._load_from_rest_job(job_rest_object) elif job_rest_object.properties.job_type == RestJobType.SWEEP: return SweepJob._load_from_rest(job_rest_object) elif job_rest_object.properties.job_type == RestJobType.AUTO_ML: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py index 4164c9ce9cb3..62d9c9fbc1fc 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_component_translatable.py @@ -9,7 +9,6 @@ from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml._ml_exceptions import JobException, ErrorTarget from azure.ai.ml.constants import ComponentJobConstants, AssetTypes -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from azure.ai.ml.entities._job.sweep.search_space import Choice, Randint, SweepDistribution @@ -122,9 +121,7 @@ def _find_source_input_output_type(cls, input: str, pipeline_job_dict: dict): raise JobException(message=msg, no_personal_data_message=msg, target=ErrorTarget.PIPELINE) @classmethod - def _to_component_input( - cls, input: Union[Input, str, bool, int, float], pipeline_job_dict=None, **kwargs - ) -> ComponentInput: + def _to_component_input(cls, input: Union[Input, str, bool, int, float], pipeline_job_dict=None, **kwargs) -> Input: pipeline_job_dict = pipeline_job_dict or {} input_variable = {} @@ -155,10 +152,10 @@ def _to_component_input( no_personal_data_message=msg, target=ErrorTarget.PIPELINE, ) - return ComponentInput(input_variable) + return Input(**input_variable) @classmethod - def _to_component_input_builder_function(cls, input: Union[Input, str, bool, int, float]) -> ComponentInput: + def _to_component_input_builder_function(cls, input: Union[Input, str, bool, int, float]) -> Input: input_variable = {} if isinstance(input, Input): @@ -175,14 +172,14 @@ def _to_component_input_builder_function(cls, input: Union[Input, str, bool, int else: input_variable["type"] = cls.PYTHON_SDK_TYPE_MAPPING[type(input)] input_variable["default"] = input - return ComponentInput(input_variable) + return Input(**input_variable) @classmethod def _to_component_output( cls, output: Union[Output, str, bool, int, float], pipeline_job_dict=None, **kwargs - ) -> ComponentOutput: + ) -> Output: """ - Translate outputs to ComponentOutputs and infer component output type from linked pipeline output, its original + Translate outputs to Outputs and infer component output type from linked pipeline output, its original type or default type """ pipeline_job_dict = pipeline_job_dict or {} @@ -193,7 +190,7 @@ def _to_component_output( # default to url_folder if failed to get type output_type = AssetTypes.URI_FOLDER output_variable = {"type": output_type} - return ComponentOutput(output_variable) + return Output(**output_variable) else: output_variable = {} @@ -216,12 +213,12 @@ def _to_component_output( no_personal_data_message=msg, target=ErrorTarget.PIPELINE, ) - return ComponentOutput(output_variable) + return Output(**output_variable) def _to_component_inputs( self, inputs: Dict[str, Union[Input, str, bool, int, float]], **kwargs - ) -> Dict[str, ComponentInput]: - """Translate inputs to ComponentInputs. + ) -> Dict[str, Input]: + """Translate inputs to Inputs. :param inputs: mapping from input name to input object. :return: mapping from input name to translated component input. @@ -232,13 +229,13 @@ def _to_component_inputs( translated_component_inputs[io_name] = self._to_component_input(io_value, pipeline_job_dict) return translated_component_inputs - def _to_component_outputs(self, outputs: Dict[str, Output], **kwargs) -> Dict[str, ComponentOutput]: - """Translate outputs to ComponentOutputs + def _to_component_outputs(self, outputs: Dict[str, Output], **kwargs) -> Dict[str, Output]: + """Translate outputs to Outputs :param outputs: mapping from output name to output object. :return: mapping from output name to translated component output. """ - # Translate outputs to ComponentOutputs. + # Translate outputs to Outputs. pipeline_job_dict = kwargs.get("pipeline_job_dict", {}) translated_component_outputs = {} for output_name, output_value in outputs.items(): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io.py index 0807c2635e96..2ab741f79dc7 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io.py @@ -9,7 +9,6 @@ from azure.ai.ml.entities import Data from azure.ai.ml.constants import ComponentJobConstants from azure.ai.ml.entities._job.pipeline._exceptions import UserErrorException -from azure.ai.ml.entities._component.input_output import ComponentInput, ComponentOutput from azure.ai.ml.entities._job._input_output_helpers import ( to_rest_data_outputs, to_rest_dataset_literal_inputs, @@ -57,11 +56,11 @@ def _resolve_builders_2_data_bindings(data: Union[list, dict]) -> Union[list, di class InputOutputBase(ABC): - def __init__(self, meta: Union[ComponentInput, ComponentOutput], data, **kwargs): + def __init__(self, meta: Union[Input, Output], data, **kwargs): """Base class of input & output :param meta: Metadata of this input/output, eg: type, min, max, etc. - :type meta: Union[ComponentInput, ComponentOutput] + :type meta: Union[Input, Output] :param data: Actual value of input/output, None means un-configured data. :type data: Union[None, int, bool, float, str azure.ai.ml.Input, @@ -169,7 +168,7 @@ class PipelineInputBase(InputOutputBase): def __init__( self, name: str, - meta: ComponentInput, + meta: Input, *, data: Union[int, bool, float, str, Output, "PipelineInput", Input] = None, owner: Union["BaseComponent", "PipelineJob"] = None, @@ -180,7 +179,7 @@ def __init__( :param name: The name of the input. :type name: str :param meta: Metadata of this input, eg: type, min, max, etc. - :type meta: ComponentInput + :type meta: Input :param data: The input data. Valid types include int, bool, float, str, Output of another component or pipeline input and Input. Note that the output of another component or pipeline input associated should be reachable in the scope @@ -231,12 +230,12 @@ def _build_data(self, data): ) else: return data - elif isinstance(data, Input) or is_data_binding_expression(data): + # for data binding case, set is_singular=False for case like "${{parent.inputs.job_in_folder}}/sample1.csv" + elif isinstance(data, Input) or is_data_binding_expression(data, is_singular=False): return data - elif self._meta and self._meta._is_path(): - # To support passing azure.ai.ml.entities.Data for path input, we will wrap it to Input. + elif isinstance(self._meta, Input) and not self._meta._is_primitive_type: if isinstance(data, str): - return Input(path=data) + return Input(type=self._meta.type, path=data) else: msg = "only path input is supported now but get {}: {}." raise UserErrorException( @@ -246,7 +245,7 @@ def _build_data(self, data): return data def _to_job_input(self): - """convert the input to ComponentInput, this logic will change if backend contract changes""" + """convert the input to Input, this logic will change if backend contract changes""" if self._data is None: # None data means this input is not configured. result = None @@ -297,7 +296,7 @@ class PipelineOutputBase(InputOutputBase): def __init__( self, name: str, - meta: ComponentOutput, + meta: Output, *, data: Union[Output, str] = None, owner: Union["BaseComponent", "PipelineJob"] = None, @@ -347,7 +346,7 @@ def _build_data(self, data): return data def _to_job_output(self): - """Convert the output to ComponentOutput, this logic will change if backend contract changes""" + """Convert the output to Output, this logic will change if backend contract changes""" if self._data is None: # None data means this output is not configured. result = None @@ -389,7 +388,7 @@ def _deepcopy(self): class PipelineInput(PipelineInputBase): """Define one input of a Pipeline.""" - def __init__(self, name: str, meta: ComponentInput, **kwargs): + def __init__(self, name: str, meta: Input, **kwargs): super(PipelineInput, self).__init__(name=name, meta=meta, **kwargs) def _build_data(self, data): @@ -494,10 +493,10 @@ class NodeIOMixin: def __init__(self, **kwargs): super(NodeIOMixin, self).__init__(**kwargs) - def _build_input(self, name, meta: ComponentInput, data) -> PipelineInputBase: + def _build_input(self, name, meta: Input, data) -> PipelineInputBase: return PipelineInputBase(name=name, meta=meta, data=data, owner=self) - def _build_output(self, name, meta: ComponentOutput, data) -> PipelineOutputBase: + def _build_output(self, name, meta: Output, data) -> PipelineOutputBase: # For un-configured outputs, settings it to None so we won't passing extra fields(eg: default mode) return PipelineOutputBase(name=name, meta=meta, data=data, owner=self) @@ -662,10 +661,10 @@ def _from_rest_outputs(cls, outputs) -> Dict[str, Output]: class PipelineIOMixin(NodeIOMixin): """Provides ability to warp pipeline inputs/outputs and build data bindings dynamically.""" - def _build_input(self, name, meta: ComponentInput, data) -> "PipelineInput": + def _build_input(self, name, meta: Input, data) -> "PipelineInput": return PipelineInput(name=name, meta=meta, data=data, owner=self) - def _build_output(self, name, meta: ComponentOutput, data) -> "PipelineOutput": + def _build_output(self, name, meta: Output, data) -> "PipelineOutput": # TODO: settings data to None for un-configured outputs so we won't passing extra fields(eg: default mode) return PipelineOutput(name=name, meta=meta, data=data, owner=self) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py index f0d7ea29c470..4d470c512dec 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py @@ -46,7 +46,6 @@ ) from azure.ai.ml.entities._job.pipeline._exceptions import UserErrorException from azure.ai.ml.entities._mixins import YamlTranslatableMixin -from azure.ai.ml.entities._util import load_from_dict from azure.ai.ml.entities._schedule.schedule import CronSchedule, RecurrenceSchedule, Schedule from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget @@ -148,6 +147,7 @@ def __init__( for _, job_instance in self.jobs.items(): if isinstance(job_instance, BaseNode): job_instance._set_base_path(self.base_path) + job_instance._set_source_path(self._source_path) if isinstance(job_instance, BaseNode): job_instance._validate_inputs() @@ -249,7 +249,19 @@ def _get_skip_fields_in_schema_validation(self) -> typing.List[str]: def _customized_validate(self) -> ValidationResult: """Validate that all provided inputs and parameters are valid for current pipeline and components in it.""" - validation_result = self._create_empty_validation_result() + validation_result = super(PipelineJob, self)._customized_validate() + + no_compute_nodes = [] + for node_name, node in self.jobs.items(): + if hasattr(node, "compute") and node.compute is None: + no_compute_nodes.append(node_name) + if not self.compute: + for node_name in no_compute_nodes: + validation_result.append_error( + yaml_path=f"jobs.{node_name}.compute", + message="Compute not set", + ) + for node_name, node in self.jobs.items(): if isinstance(node, BaseNode): validation_result.merge_with(node._validate(), "jobs.{}".format(node_name)) @@ -260,6 +272,7 @@ def _customized_validate(self) -> ValidationResult: yaml_path="jobs.{}".format(node_name), message=f"Not supported pipeline job type: {type(node)}", ) + return validation_result def _remove_pipeline_input(self): @@ -282,7 +295,7 @@ def _validate_pipeline_input(self, binding_inputs, component_definition_inputs): # todo: refine get pipeline_input_name from binding pipeline_input_name = component_binding_input[3:-2].split(".")[-1] if pipeline_input_name in self._inputs and self._inputs[pipeline_input_name]._data is None: - if component_definition_inputs[component_input_name]._optional: + if component_definition_inputs[component_input_name].optional: # todo: not remove component input in client side, backend need remove component job # optional input which is binding to a None pipeline input pass @@ -445,7 +458,8 @@ def _load_from_dict(cls, data: Dict, context: Dict, additional_message: str, **k else: path_first_occurrence[component_path] = node_name - loaded_schema = load_from_dict(PipelineJobSchema, data, context, additional_message, **kwargs) + # use this instead of azure.ai.ml.entities._util.load_from_dict to avoid parsing + loaded_schema = cls._create_schema_for_validation(context=context).load(data, **kwargs) # replace repeat component with first occurrence to reduce arm id resolution # current load yaml file logic is in azure.ai.ml._schema.core.schema.YamlFileSchema.load_from_file diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/to_rest_functions.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/to_rest_functions.py new file mode 100644 index 000000000000..6c5689cffd5a --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/to_rest_functions.py @@ -0,0 +1,52 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from functools import singledispatch +from pathlib import Path +from azure.ai.ml.constants import DEFAULT_EXPERIMENT_NAME + +from azure.ai.ml.entities._builders.command import Command +from azure.ai.ml.entities._builders.sweep import Sweep +from .job import Job +from azure.ai.ml.entities._job.job_name_generator import generate_job_name +from azure.ai.ml._restclient.v2022_02_01_preview.models import JobBaseData + + +def generate_defaults(job: Job, rest_job: JobBaseData) -> None: + # Default name to a generated user friendly name. + if not job.name: + rest_job.name = generate_job_name() + + if not job.display_name: + rest_job.properties.display_name = rest_job.name + + # Default experiment to current folder name or "Default" + if not job.experiment_name: + rest_job.properties.experiment_name = Path("./").resolve().stem.replace(" ", "") or DEFAULT_EXPERIMENT_NAME + + +@singledispatch +def to_rest_job_object(something) -> JobBaseData: + raise NotImplementedError() + + +@to_rest_job_object.register(Job) +def _(job: Job) -> JobBaseData: + rest_job = job._to_rest_object() + generate_defaults(job, rest_job) + return rest_job + + +@to_rest_job_object.register(Command) +def _(command: Command) -> JobBaseData: + rest_job = command._to_job()._to_rest_object() + generate_defaults(command, rest_job) + return rest_job + + +@to_rest_job_object.register(Sweep) +def _(sweep: Sweep) -> JobBaseData: + rest_job = sweep._to_job()._to_rest_object() + generate_defaults(sweep, rest_job) + return rest_job diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_resource.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_resource.py index ff28af1d26d1..b21e10fe4044 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_resource.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_resource.py @@ -3,6 +3,7 @@ # --------------------------------------------------------- +import os from abc import ABC, abstractmethod from os import PathLike from typing import Dict, Optional, Union @@ -47,7 +48,10 @@ def __init__( # Hide read only properties in kwargs self._id = kwargs.pop("id", None) - self._base_path = kwargs.pop("base_path", "./") + # source path is added to display file location for validation error messages + # usually, base_path = Path(source_path).parent if source_path else os.getcwd() + self._source_path: Optional[str] = kwargs.pop("source_path", None) + self._base_path = kwargs.pop("base_path", os.getcwd()) self._creation_context = kwargs.pop("creation_context", None) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) @@ -140,6 +144,9 @@ def _get_arm_resource_and_params(self, **kwargs): param = self._to_arm_resource_param(**kwargs) return [(resource, param)] + def _set_source_path(self, value): + self._source_path = value + def __repr__(self) -> str: var_dict = {k.strip("_"): v for (k, v) in vars(self).items()} return f"{self.__class__.__name__}({var_dict})" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py index aab175b32b2a..23ee9a2a1d0b 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_validation.py @@ -3,7 +3,9 @@ # --------------------------------------------------------- import copy +import json import logging +import os.path import typing from os import PathLike from pathlib import Path @@ -12,7 +14,7 @@ from marshmallow import ValidationError, Schema from azure.ai.ml._ml_exceptions import ValidationException, ErrorTarget, ErrorCategory from typing import List - +import strictyaml from azure.ai.ml._schema import PathAwareSchema from azure.ai.ml.constants import OperationStatus, BASE_PATH_CONTEXT_KEY from azure.ai.ml.entities._job.pipeline._attr_dict import try_get_non_arbitrary_attr_for_potential_attr_dict @@ -145,16 +147,17 @@ def invalid_data(self): @property def _single_message(self) -> str: - if not self.messages: + if not self._errors: return "" - if len(self.messages) == 1: - for field, message in self.messages.items(): + if len(self._errors) == 1: + for diagnostic in self._errors: + field, message = diagnostic.location.yaml_path, diagnostic.descriptor.message if field == "*": return message else: return field + ": " + message else: - return str(self.messages) + return json.dumps(self._to_dict(), indent=2) @property def passed(self): @@ -198,6 +201,7 @@ def try_raise( target=error_target, error_category=error_category, ) + return self def append_error( self, @@ -214,6 +218,14 @@ def append_error( ) return self + def resolve_location_for_diagnostics(self, source_path: str): + """ + Resolve location for diagnostics. + """ + resolver = YamlLocationResolver(source_path) + for diagnostic in self._errors + self._warnings: + diagnostic.location.local_path = resolver.resolve(diagnostic.location.yaml_path) + def append_warning( self, yaml_path: str = "*", @@ -230,27 +242,31 @@ def append_warning( return self def _to_dict(self) -> typing.Dict[str, typing.Any]: - messages = [] - for field, message in self.messages.items(): - messages.append( - { - "location": field, - "value": pydash.get(self._target_obj, field, "NOT_FOUND"), - "message": message, - } - ) result = { "result": OperationStatus.SUCCEEDED if self.passed else OperationStatus.FAILED, - "messages": messages, } - if self._warnings: - result["warnings"] = self._warnings + for diagnostic_type, diagnostics in [ + ("errors", self._errors), + ("warnings", self._warnings), + ]: + messages = [] + for diagnostic in diagnostics: + message = { + "message": diagnostic.descriptor.message, + "path": diagnostic.location.yaml_path, + "value": pydash.get(self._target_obj, diagnostic.location.yaml_path, None), + } + if diagnostic.location.local_path: + message["location"] = str(diagnostic.location.local_path) + messages.append(message) + if messages: + result[diagnostic_type] = messages return result class SchemaValidatableMixin: @classmethod - def _create_empty_validation_result(cls): + def _create_empty_validation_result(cls) -> ValidationResult: """Simply create an empty validation result to reduce _ValidationResultBuilder importing, which is a private class.""" return _ValidationResultBuilder.success() @@ -340,6 +356,8 @@ def _schema_validate(self) -> ValidationResult: class _ValidationResultBuilder: + UNKNOWN_MESSAGE = "Unknown field." + def __init__(self): pass @@ -348,7 +366,7 @@ def success(cls): """ Create a validation result with success status. """ - return cls.from_single_message() + return ValidationResult() @classmethod def from_single_message(cls, singular_error_message: str = None, yaml_path: str = "*", data: dict = None): @@ -368,34 +386,107 @@ def from_single_message(cls, singular_error_message: str = None, yaml_path: str def from_validation_error(cls, error: ValidationError): """ Create a validation result from a ValidationError, which will be raised in marshmallow.Schema.load. + Please use this function only for exception in loading file. + + param error: ValidationError raised by marshmallow.Schema.load. """ obj = cls.from_validation_messages(error.messages, data=error.data) obj._valid_data = error.valid_data return obj @classmethod - def from_validation_messages(cls, errors: typing.Dict, data: typing.Dict = None): + def from_validation_messages(cls, errors: typing.Dict, data: typing.Dict): """ Create a validation result from error messages, which will be returned by marshmallow.Schema.validate. + + param errors: error message returned by marshmallow.Schema.validate. + param data: serialized data to validate """ instance = ValidationResult(data=data) - unknown_msg = "Unknown field." errors = copy.deepcopy(errors) - for field, msgs in errors.items(): - if unknown_msg in msgs: - # Unknown field is not a real error, so we should remove it and append a warning. - msgs.remove(unknown_msg) - instance.append_warning(message=unknown_msg, yaml_path=field) - - if len(msgs) != 0: + cls._from_validation_messages_recursively(errors, [], instance) + return instance - def msg2str(msg): - if isinstance(msg, str): - return msg - elif isinstance(msg, dict) and len(msg) == 1 and "_schema" in msg and len(msg["_schema"]) == 1: - return msg["_schema"][0] - else: - return str(msg) + @classmethod + def _from_validation_messages_recursively(cls, errors, path_stack, instance: ValidationResult): + cur_path = ".".join(path_stack) if path_stack else "*" + # single error message + if isinstance(errors, dict) and "_schema" in errors: + instance.append_error( + message=";".join(errors["_schema"]), + yaml_path=cur_path, + ) + # errors on attributes + elif isinstance(errors, dict): + for field, msgs in errors.items(): + # fields.Dict + if field in ["key", "value"]: + cls._from_validation_messages_recursively(msgs, path_stack, instance) + else: + path_stack.append(field) + cls._from_validation_messages_recursively(msgs, path_stack, instance) + path_stack.pop() + # detailed error message + elif isinstance(errors, list) and all(isinstance(msg, str) for msg in errors): + if cls.UNKNOWN_MESSAGE in errors: + # Unknown field is not a real error, so we should remove it and append a warning. + errors.remove(cls.UNKNOWN_MESSAGE) + instance.append_warning(message=cls.UNKNOWN_MESSAGE, yaml_path=cur_path) + if errors: + instance.append_error(message=";".join(errors), yaml_path=cur_path) + # union field + elif isinstance(errors, list): + + def msg2str(msg): + if isinstance(msg, str): + return msg + elif isinstance(msg, dict) and len(msg) == 1 and "_schema" in msg and len(msg["_schema"]) == 1: + return msg["_schema"][0] + else: + return str(msg) - instance.append_error(message="; ".join(map(lambda x: msg2str(x), msgs)), yaml_path=field) - return instance + instance.append_error(message="; ".join(map(lambda x: msg2str(x), errors)), yaml_path=cur_path) + # unknown error + else: + instance.append_error(message=str(errors), yaml_path=cur_path) + + +class YamlLocationResolver: + def __init__(self, source_path): + self._source_path = source_path + + def resolve(self, yaml_path, source_path=None): + """Resolve the location of a yaml path starting from source_path.""" + source_path = source_path or self._source_path + if source_path is None or not os.path.isfile(source_path): + return None + if yaml_path is None or yaml_path == "*": + return source_path + + attrs = yaml_path.split(".") + attrs.reverse() + + return self._resolve_recursively(attrs, Path(source_path)) + + def _resolve_recursively(self, attrs: List[str], source_path: Path): + with open(source_path, encoding="utf-8") as f: + loaded_yaml = strictyaml.load(f.read()) + + while attrs: + attr = attrs.pop() + if attr in loaded_yaml: + loaded_yaml = loaded_yaml.get(attr) + else: + try: + # if current object is a path of a valid yaml file, try to resolve location in new source file + next_path = Path(loaded_yaml.value) + if not next_path.is_absolute(): + next_path = source_path.parent / next_path + return self._resolve_recursively(attrs, source_path=next_path) + except OSError: + pass + except TypeError: + pass + # if not, return current section + break + return f"{source_path}#line {loaded_yaml.start_line}" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py index ae86217a2eee..44a97aabde1a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py @@ -8,7 +8,7 @@ from pathlib import Path import time from typing import Any, Dict, Iterable, Union, TYPE_CHECKING -from azure.ai.ml._azure_environments import ENDPOINT_URLS, _get_cloud_details, resource_to_scopes +from azure.ai.ml._azure_environments import _get_aml_resource_id_from_metadata, _resource_to_scopes from azure.core.polling import LROPoller from azure.identity import ChainedTokenCredential from azure.ai.ml._restclient.v2022_05_01 import ( @@ -266,8 +266,7 @@ def invoke( ) headers = EndpointInvokeFields.DEFAULT_HEADER - cloud_details = _get_cloud_details() - ml_audience_scopes = resource_to_scopes(cloud_details.get(ENDPOINT_URLS.AML_RESOURCE_ID)) + ml_audience_scopes = _resource_to_scopes(_get_aml_resource_id_from_metadata()) module_logger.debug(f"ml_audience_scopes used: `{ml_audience_scopes}`\n") key = self._credentials.get_token(*ml_audience_scopes).token headers[EndpointInvokeFields.AUTHORIZATION] = f"Bearer {key}" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py index 236a31581f2e..b407f10ae261 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py @@ -209,7 +209,10 @@ def validate( component = self._refine_component(component) # local validation only for now - return component._validate(raise_error=raise_on_failure) + # TODO: use remote call to validate the entire component after MFE API is ready + result = component._validate(raise_error=raise_on_failure) + result.resolve_location_for_diagnostics(component._source_path) + return result @monitor_with_telemetry_mixin(logger, "Component.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update(self, component: Union[Component, types.FunctionType], **kwargs) -> Component: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py index c32cb12615bc..ae0a23a82442 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py @@ -16,9 +16,14 @@ Optional, Union, ) -from azure.ai.ml._azure_environments import ENDPOINT_URLS, _get_cloud_details, resource_to_scopes +from azure.ai.ml._azure_environments import ( + _get_base_url_from_metadata, + _get_aml_resource_id_from_metadata, + _resource_to_scopes, +) from azure.ai.ml.entities._assets._artifacts.code import Code from azure.ai.ml.entities._job.job_name_generator import generate_job_name +from .._utils._experimental import experimental from ..entities._validation import ValidationResult, _ValidationResultBuilder try: @@ -96,8 +101,9 @@ from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob from azure.ai.ml.sweep import SweepJob from azure.ai.ml.entities._job.base_job import _BaseJob +from azure.ai.ml.entities._job.to_rest_functions import to_rest_job_object from azure.ai.ml.entities._job.job import _is_pipeline_child_job -from azure.ai.ml.entities._inputs_outputs import Input, Output +from azure.ai.ml.entities._inputs_outputs import Input from azure.ai.ml.entities._builders import Command, BaseNode, Sweep, Parallel from azure.ai.ml.entities._job.pipeline.pipeline_job_settings import PipelineJobSettings from azure.ai.ml._artifacts._artifact_utilities import _upload_and_generate_remote_uri @@ -151,17 +157,20 @@ def __init__( logger.addHandler(kwargs.pop("app_insights_handler")) self._operation_2022_02_preview = service_client_02_2022_preview.jobs self._all_operations = all_operations - self._kwargs = kwargs self._stream_logs_until_completion = stream_logs_until_completion # Dataplane service clients are lazily created as they are needed self._runs_operations_client = None self._dataset_dataplane_operations_client = None self._model_dataplane_operations_client = None + # Kwargs to propagate to dataplane service clients + self._service_client_kwargs = kwargs.pop("_service_client_kwargs", {}) self._api_base_url = None self._container = "azureml" self._credential = credential self._orchestrators = OperationOrchestrator(self._all_operations, self._operation_scope) + self._kwargs = kwargs + @property def _compute_operations(self) -> ComputeOperations: return self._all_operations.get_operation( @@ -175,14 +184,18 @@ def _datastore_operations(self) -> "DatastoreOperations": @property def _runs_operations(self) -> RunOperations: if not self._runs_operations_client: - service_client_run_history = ServiceClientRunHistory(self._credential, base_url=self._api_url) + service_client_run_history = ServiceClientRunHistory( + self._credential, base_url=self._api_url, **self._service_client_kwargs + ) self._runs_operations_client = RunOperations(self._operation_scope, service_client_run_history) return self._runs_operations_client @property def _dataset_dataplane_operations(self) -> DatasetDataplaneOperations: if not self._dataset_dataplane_operations_client: - service_client_dataset_dataplane = ServiceClientDatasetDataplane(self._credential, base_url=self._api_url) + service_client_dataset_dataplane = ServiceClientDatasetDataplane( + self._credential, base_url=self._api_url, **self._service_client_kwargs + ) self._dataset_dataplane_operations_client = DatasetDataplaneOperations( self._operation_scope, service_client_dataset_dataplane ) @@ -191,7 +204,9 @@ def _dataset_dataplane_operations(self) -> DatasetDataplaneOperations: @property def _model_dataplane_operations(self) -> ModelDataplaneOperations: if not self._model_dataplane_operations_client: - service_client_model_dataplane = ServiceClientModelDataplane(self._credential, base_url=self._api_url) + service_client_model_dataplane = ServiceClientModelDataplane( + self._credential, base_url=self._api_url, **self._service_client_kwargs + ) self._model_dataplane_operations_client = ModelDataplaneOperations( self._operation_scope, service_client_model_dataplane ) @@ -313,37 +328,63 @@ def try_get_compute_arm_id(self, compute: Union[Compute, str]): raise ResourceNotFoundError("Not found compute with name {}".format(compute_name)) return None - @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.INTERNALCALL) - def _validate(self, job: Job, raise_on_failure: bool = False) -> ValidationResult: - """Validate a pipeline job. - if there are inline defined entities, e.g. Component, Environment & Code, they won't be created. + @experimental + @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.PUBLICAPI) + def validate(self, job: Job, *, raise_on_failure: bool = False, **kwargs) -> ValidationResult: + """Validate a job. Anonymous assets may be created if there are inline defined entities, e.g. Component, + Environment & Code. + Only pipeline job is supported for now. :param job: Job object to be validated. :type job: Job + :param raise_on_failure: Whether raise error when there are validation errors. + :type raise_on_failure: bool :return: a ValidationResult object containing all found errors. :rtype: ValidationResult """ - # validation is open for PipelineJob only for now + git_code_validation_result = _ValidationResultBuilder.success() + # TODO: move this check to Job._validate after validation is supported for all job types + # If private features are enable and job has code value of type str we need to check + # that it is a valid git path case. Otherwise we should throw a ValidationException + # saying that the code value is not a valid code value + if ( + hasattr(job, "code") + and job.code is not None + and isinstance(job.code, str) + and job.code.startswith(GIT_PATH_PREFIX) + and not is_private_preview_enabled() + ): + git_code_validation_result.append_error( + message=f"Invalid code value: {job.code}. Git paths are not supported.", + yaml_path="code", + ) + if not isinstance(job, PipelineJob): - return _ValidationResultBuilder.success() + return git_code_validation_result.try_raise(error_target=ErrorTarget.JOB, raise_error=raise_on_failure) - job._validate(raise_error=True) - try: - job.compute = self.try_get_compute_arm_id(job.compute) - for node in job.jobs.values(): - node.compute = self.try_get_compute_arm_id(node.compute) - return _ValidationResultBuilder.success() - except Exception as e: - if raise_on_failure: - raise - else: - logger.warning(f"Validation failed: {e}") - return _ValidationResultBuilder.from_single_message(singular_error_message=str(e), yaml_path="compute") + validation_result = job._validate(raise_error=raise_on_failure) + validation_result.merge_with(git_code_validation_result) + # fast return to avoid remote call if local validation not passed + # TODO: use remote call to validate the entire job after MFE API is ready + if validation_result.passed: + try: + job.compute = self.try_get_compute_arm_id(job.compute) + except Exception as e: + validation_result.append_error(yaml_path="compute", message=str(e)) + + for node_name, node in job.jobs.items(): + try: + node.compute = self.try_get_compute_arm_id(node.compute) + except Exception as e: + validation_result.append_error(yaml_path=f"jobs.{node_name}.compute", message=str(e)) + + validation_result.resolve_location_for_diagnostics(job._source_path) + return validation_result.try_raise(raise_error=raise_on_failure, error_target=ErrorTarget.PIPELINE) @monitor_with_telemetry_mixin(logger, "Job.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update( self, - job: Union[Job, BaseNode], + job: Job, *, description: str = None, compute: str = None, @@ -353,7 +394,7 @@ def create_or_update( ) -> Job: """Create or update a job, if there're inline defined entities, e.g. Environment, Code, they'll be created together with the job. - :param Union[Job,BaseNode] job: Job definition or object which can be translate to a job. + :param Job job: Job definition or object which can be translate to a job. :param description: Description to overwrite when submitting the pipeline. :type description: str :param compute: Compute target to overwrite when submitting the pipeline. @@ -365,11 +406,9 @@ def create_or_update( :return: Created or updated job. :rtype: Job """ - if isinstance(job, BaseNode): + if isinstance(job, BaseNode) and not isinstance(job, Command): # Command objects can be used directly job = job._to_job() - self._generate_job_defaults(job) - # Set job properties before submission if description is not None: job.description = description @@ -383,20 +422,7 @@ def create_or_update( if job.compute == LOCAL_COMPUTE_TARGET: job.environment_variables[COMMON_RUNTIME_ENV_VAR] = "true" - # If private features are enable and job has code value of type str we need to check - # that it is a valid git path case. Otherwise we should throw a ValidationException - # saying that the code value is not a valid code value - if ( - hasattr(job, "code") - and job.code is not None - and isinstance(job.code, str) - and job.code.startswith(GIT_PATH_PREFIX) - and not is_private_preview_enabled() - ): - msg = f"Invalid code value: {job.code}. Git paths are not supported." - raise ValidationException(message=msg, no_personal_data_message=msg) - - self._validate(job, raise_on_failure=True) + self.validate(job, raise_on_failure=True) # Create all dependent resources self._resolve_arm_id_or_upload_dependencies(job) @@ -407,7 +433,7 @@ def create_or_update( # MFE does not allow existing properties to be updated, only for new props to be added if not any(prop_name in job.properties for prop_name in git_props.keys()): job.properties = {**job.properties, **git_props} - rest_job_resource = job._to_rest_object() + rest_job_resource = to_rest_job_object(job) # Make a copy of self._kwargs instead of contaminate the original one kwargs = dict(**self._kwargs) @@ -695,17 +721,6 @@ def _get_workspace_url(self, url_key="history"): all_urls = json.loads(download_text_from_url(discovery_url, create_session_with_retry())) return all_urls[url_key] - def _generate_job_defaults(self, job: Job) -> None: - # Default name to a generated user friendly name. - if not job.name: - job.name = generate_job_name() - - # Default experiment to base path - if not job.experiment_name: - job.experiment_name = Path("./").resolve().stem.replace(" ", "") or "Default" - - job.display_name = job.display_name or job.name - def _resolve_arm_id_or_upload_dependencies(self, job: Job) -> None: """This method converts name or name:version to ARM id. Or it registers/uploads nested dependencies. @@ -733,6 +748,13 @@ def _resolve_arm_id_or_upload_dependencies(self, job: Job) -> None: self._resolve_automl_job_inputs(job_instance, job._base_path, inside_pipeline=True) elif isinstance(job, AutoMLJob): self._resolve_automl_job_inputs(job, job._base_path, inside_pipeline=False) + elif isinstance(job, Command): + # TODO: switch to use inputs of Command objects, once the inputs/outputs building logic is removed from the BaseNode constructor. + try: + self._resolve_job_inputs(job._job_inputs.values(), job._base_path) + except AttributeError: + # If the job object doesn't have "inputs" attribute, we don't need to resolve. E.g. AutoML jobs + pass else: try: self._resolve_job_inputs(job.inputs.values(), job._base_path) @@ -908,7 +930,7 @@ def _resolve_arm_id_or_azureml_id(self, job: Job, resolver: Callable) -> Job: if isinstance(job, _BaseJob): job.compute = self._resolve_compute_id(resolver, job.compute) - elif isinstance(job, CommandJob): + elif isinstance(job, Command): job = self._resolve_arm_id_for_command_job(job, resolver) elif isinstance(job, ParallelJob): job = self._resolve_arm_id_for_parallel_job(job, resolver) @@ -928,7 +950,7 @@ def _resolve_arm_id_or_azureml_id(self, job: Job, resolver: Callable) -> Job: ) return job - def _resolve_arm_id_for_command_job(self, job: Job, resolver: Callable) -> Job: + def _resolve_arm_id_for_command_job(self, job: Command, resolver: Callable) -> Job: """Resolve arm_id for CommandJob""" if job.code is not None and is_registry_id_for_resource(job.code): msg = f"Format not supported for code asset: {job.code}" @@ -1041,8 +1063,7 @@ def _append_tid_to_studio_url(self, job: Job) -> None: try: studio_endpoint = job.services.get("Studio", None) studio_url = studio_endpoint.endpoint - cloud_details = _get_cloud_details() - default_scopes = resource_to_scopes(cloud_details.get(ENDPOINT_URLS.RESOURCE_MANAGER_ENDPOINT)) + default_scopes = _resource_to_scopes(_get_base_url_from_metadata()) module_logger.debug(f"default_scopes used: `{default_scopes}`\n") # Extract the tenant id from the credential using PyJWT decode = jwt.decode( @@ -1062,8 +1083,7 @@ def _set_defaults_to_component(self, component: Union[str, Component], settings: pass def _set_headers_with_user_aml_token(self, kwargs) -> Dict[str, str]: - cloud_details = _get_cloud_details() - azure_ml_scopes = resource_to_scopes(cloud_details.get(ENDPOINT_URLS.AML_RESOURCE_ID)) + azure_ml_scopes = _resource_to_scopes(_get_aml_resource_id_from_metadata()) module_logger.debug(f"azure_ml_scopes used: `{azure_ml_scopes}`\n") aml_token = self._credential.get_token(*azure_ml_scopes).token headers = kwargs.pop("headers", {}) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt b/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt index 8f2d575de2dc..239624fb0c8f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt @@ -8,6 +8,8 @@ azure-mgmt-core<2.0.0,>=1.2.0 marshmallow<4.0.0,>=3.5 jsonschema<5.0.0,>=4.0.0 tqdm<=4.63.0 +# Used for PR 825138 +strictyaml<=1.6.1 # Used for PR 718512 colorama<=0.4.4 pyjwt<3.0.0 From 54f2ced5c2e08552af3365a4df834a06d53ba93c Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Tue, 5 Jul 2022 09:43:29 -0700 Subject: [PATCH 04/19] Revised version number --- sdk/ml/azure-ai-ml/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ml/azure-ai-ml/CHANGELOG.md b/sdk/ml/azure-ai-ml/CHANGELOG.md index 86ba2e85c293..6e8b04848ae0 100644 --- a/sdk/ml/azure-ai-ml/CHANGELOG.md +++ b/sdk/ml/azure-ai-ml/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 2.6.0 (2022-07-06) +## 0.1.0b6 (2022-07-06) ### Features Added From 85e0ee634d10b65b65c4bafcac9352cdae5e923d Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Tue, 5 Jul 2022 09:44:13 -0700 Subject: [PATCH 05/19] Typo fix --- sdk/ml/azure-ai-ml/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ml/azure-ai-ml/CHANGELOG.md b/sdk/ml/azure-ai-ml/CHANGELOG.md index 6e8b04848ae0..8d0f22bb5235 100644 --- a/sdk/ml/azure-ai-ml/CHANGELOG.md +++ b/sdk/ml/azure-ai-ml/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 0.1.0b6 (2022-07-06) +## 0.1.0b5 (2022-07-06) ### Features Added From 27ef72bbfbcb80e5bfb52300ce7f6ed95350d462 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Tue, 5 Jul 2022 10:48:00 -0700 Subject: [PATCH 06/19] Added missing dependency for ml --- sdk/ml/azure-ai-ml/setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/ml/azure-ai-ml/setup.py b/sdk/ml/azure-ai-ml/setup.py index 3989b5eb9da2..9d144666a6cb 100644 --- a/sdk/ml/azure-ai-ml/setup.py +++ b/sdk/ml/azure-ai-ml/setup.py @@ -75,6 +75,8 @@ "marshmallow<4.0.0,>=3.5", "jsonschema<5.0.0,>=4.0.0", "tqdm<=4.63.0", + # Used for PR 825138 + "strictyaml<=1.6.1", # Used for PR 718512 "colorama<=0.4.4", "pyjwt<=2.3.0", From 9140b303228a507817d159ddc5f53910d081e856 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Wed, 6 Jul 2022 17:02:38 -0700 Subject: [PATCH 07/19] Replaced failing test files for ml --- .../unittests/test_command_job_entity.py | 7 +- .../unittests/test_command_job_schema.py | 82 +++++- .../unittests/test_component_schema.py | 89 ++++--- .../test_parallel_component_schema.py | 9 +- .../unittests/test_job_operations.py | 9 +- .../unittests/test_pipeline_job_schema.py | 238 ++++++++++-------- .../unittests/test_sweep_job_schema.py | 27 +- 7 files changed, 278 insertions(+), 183 deletions(-) diff --git a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_entity.py b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_entity.py index 1098db8e8f46..3d117737a977 100644 --- a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_entity.py +++ b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_entity.py @@ -14,6 +14,7 @@ from azure.ai.ml import Input from azure.ai.ml._ml_exceptions import ValidationException from azure.ai.ml import MpiDistribution +from azure.ai.ml.entities._job.to_rest_functions import to_rest_job_object from collections import OrderedDict @@ -42,17 +43,17 @@ def test_from_rest_legacy1_command(self, mock_workspace_scope: OperationScope, f resource = json.load(f) rest_job = JobBaseData.deserialize(resource) print(type(rest_job.properties)) - job = CommandJob._from_rest_object(rest_job) + job = Job._from_rest_object(rest_job) assert job.command == "echo ${{inputs.filePath}} && ls ${{inputs.dirPath}}" def test_missing_input_raises(self): with open("./tests/test_configs/command_job/rest_command_job_env_var_command.json", "r") as f: resource = json.load(f) rest_job = JobBaseData.deserialize(resource) - job = CommandJob._from_rest_object(rest_job) + job = Job._from_rest_object(rest_job) job.command = "echo ${{inputs.missing_input}}" with pytest.raises(ValidationException): - job._to_rest_object() + to_rest_job_object(job) def test_calling_command_job_constructor_with_promoted_properties(self): basic_job = CommandJob( diff --git a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py index dd43d518b2ef..e35e4c05cefe 100644 --- a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py +++ b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py @@ -1,8 +1,19 @@ from azure.ai.ml._schema import CommandJobSchema from azure.ai.ml._utils.utils import load_yaml, is_valid_uuid -from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY -from azure.ai.ml.entities import CommandJob +from azure.ai.ml.constants import InputOutputModes, BASE_PATH_CONTEXT_KEY, AssetTypes +from azure.ai.ml.entities import CommandJob, Job from azure.ai.ml.entities._inputs_outputs import Input +from azure.ai.ml.entities._job.to_rest_functions import to_rest_job_object +from azure.ai.ml._restclient.v2022_02_01_preview.models import ( + InputDeliveryMode, + JobInputType, + JobOutputType, + OutputDeliveryMode, + UriFolderJobOutput as RestUriFolderJobOutput, + AmlToken, + UserIdentity, + ManagedIdentity, +) from pathlib import Path from azure.ai.ml import load_job from marshmallow.exceptions import ValidationError @@ -40,7 +51,7 @@ def test_distributions_roundtrip(self): cfg = yaml.safe_load(f) internal_representation: CommandJob = CommandJob(**schema.load(cfg)) rest_intermediate = internal_representation._to_rest_object() - internal_obj = CommandJob._from_rest_object(rest_intermediate) + internal_obj = CommandJob._load_from_rest(rest_intermediate) internal_obj._id = "test-arm-id" reconstructed_yaml = schema.dump(internal_obj) assert reconstructed_yaml["distribution"]["type"].lower() == cfg["distribution"]["type"].lower() @@ -84,6 +95,19 @@ def test_deserialize_inputs_dataset(self): source = internal_representation._to_rest_object() assert source.properties.inputs["test1"].uri == target["inputs"]["test1"]["path"] + def test_deserialize_inputs_dataset_short_form(self): + test_path = "./tests/test_configs/command_job/command_job_inputs_dataset_short_form_test.yml" + with open(test_path, "r") as f: + cfg = yaml.safe_load(f) + context = {BASE_PATH_CONTEXT_KEY: Path(test_path).parent} + schema = CommandJobSchema(context=context) + internal_representation: CommandJob = CommandJob(**schema.load(cfg)) + + assert internal_representation.inputs + assert internal_representation.inputs["test1"].type == "uri_folder" + assert internal_representation.inputs["test1"].mode == "ro_mount" + assert internal_representation.inputs["test1"].path == "test1_dataset@latest" + def test_anonymous_assets(self): test_path = "./tests/test_configs/command_job/inlined_assets.yaml" with open(test_path, "r") as f: @@ -100,7 +124,7 @@ def test_anonymous_assets(self): assert internal_representation.environment.name != envName assert internal_representation.environment.name == "CliV2AnonymousEnvironment" assert internal_representation.environment._is_anonymous - assert internal_representation.environment.version == "559c904a18d86cc54f2f6a9d6ac26c0d" + assert internal_representation.environment.version == "79a6980e14dbe0dac98ed0e902413f88" assert internal_representation.inputs["test1"].path == input_path # Validate default dataset is mounted @@ -179,3 +203,53 @@ def test_input_data_path_resolution(self): internal_representation: CommandJob = CommandJob(**schema.load(cfg)) assert internal_representation.inputs["test1"].path == "../python/sample1.csv" + + def test_inputs_types_command_job(self): + original_entity = load_job(Path("./tests/test_configs/command_job/command_job_input_types.yml")) + rest_representation = to_rest_job_object(original_entity) + reconstructed_entity = Job._from_rest_object(rest_representation) + + assert original_entity.inputs["test_dataset"].mode == InputOutputModes.RO_MOUNT + assert rest_representation.properties.inputs["test_dataset"].job_input_type == JobInputType.URI_FOLDER + assert rest_representation.properties.inputs["test_dataset"].mode == InputDeliveryMode.READ_ONLY_MOUNT + assert reconstructed_entity.inputs["test_dataset"].mode == InputOutputModes.RO_MOUNT + + assert original_entity.inputs["test_url"].mode == InputOutputModes.RO_MOUNT + assert original_entity.inputs["test_url"].type == AssetTypes.URI_FILE + assert original_entity.inputs["test_url"].path == "azureml://fake/url.json" + assert rest_representation.properties.inputs["test_url"].job_input_type == JobInputType.URI_FILE + assert rest_representation.properties.inputs["test_url"].mode == InputDeliveryMode.READ_ONLY_MOUNT + assert rest_representation.properties.inputs["test_url"].uri == "azureml://fake/url.json" + assert reconstructed_entity.inputs["test_url"].mode == InputOutputModes.RO_MOUNT + assert reconstructed_entity.inputs["test_url"].type == AssetTypes.URI_FILE + assert reconstructed_entity.inputs["test_url"].path == "azureml://fake/url.json" + + # assert original_entity.inputs["test_string_literal"] == "literal string" + assert rest_representation.properties.inputs["test_string_literal"].job_input_type == JobInputType.LITERAL + assert rest_representation.properties.inputs["test_string_literal"].value == "literal string" + # assert reconstructed_entity.inputs["test_string_literal"] == "literal string" + + # assert original_entity.inputs["test_literal_valued_int"] == 42 + assert rest_representation.properties.inputs["test_literal_valued_int"].job_input_type == JobInputType.LITERAL + assert rest_representation.properties.inputs["test_literal_valued_int"].value == "42" + # assert reconstructed_entity.inputs["test_literal_valued_int"] == "42" + + def test_outputs_types_standalone_jobs(self): + original_entity = load_job(Path("./tests/test_configs/command_job/command_job_output_types.yml")) + rest_representation = to_rest_job_object(original_entity) + dummy_default = RestUriFolderJobOutput(uri="azureml://foo", mode=OutputDeliveryMode.READ_WRITE_MOUNT) + rest_representation.properties.outputs["default"] = dummy_default + reconstructed_entity = Job._from_rest_object(rest_representation) + + # assert original_entity.outputs["test1"] is None + assert rest_representation.properties.outputs["test1"].job_output_type == JobOutputType.URI_FOLDER + assert rest_representation.properties.outputs["test1"].mode == OutputDeliveryMode.READ_WRITE_MOUNT + + assert original_entity.outputs["test2"].mode == InputOutputModes.UPLOAD + assert rest_representation.properties.outputs["test2"].job_output_type == JobOutputType.URI_FOLDER + assert rest_representation.properties.outputs["test2"].mode == OutputDeliveryMode.UPLOAD + + assert original_entity.outputs["test3"].mode == InputOutputModes.RW_MOUNT + assert rest_representation.properties.outputs["test3"].job_output_type == JobOutputType.URI_FOLDER + assert rest_representation.properties.outputs["test3"].mode == OutputDeliveryMode.READ_WRITE_MOUNT + assert reconstructed_entity.outputs["default"].path == "azureml://foo" diff --git a/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py b/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py index 7a16e9eb0c38..aa2f0d373abf 100644 --- a/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py +++ b/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py @@ -1,3 +1,4 @@ +import copy from typing import Union from unittest import mock @@ -27,7 +28,10 @@ from azure.ai.ml.entities._assets import Code from azure.ai.ml._ml_exceptions import ValidationException, ErrorCategory, ErrorTarget -components_dir = Path("./tests/test_configs/components/") +from .._util import _COMPONENT_TIMEOUT_SECOND + +tests_root_dir = Path(__file__).parent.parent.parent.parent +components_dir = tests_root_dir / "test_configs/components/" def load_component_entity_from_yaml( @@ -94,6 +98,7 @@ def load_component_entity_from_rest_json(path) -> CommandComponent: return internal_component +@pytest.mark.timeout(_COMPONENT_TIMEOUT_SECOND) @pytest.mark.unittest class TestCommandComponent: def test_serialize_deserialize_basic(self, mock_machinelearning_client: MLClient): @@ -102,12 +107,6 @@ def test_serialize_deserialize_basic(self, mock_machinelearning_client: MLClient rest_path = "./tests/test_configs/components/helloworld_component_rest.json" target_entity = load_component_entity_from_rest_json(rest_path) - # backend add optional=False and port name to inputs/outputs so we add it here manually - for name, input in component_entity.inputs.items(): - input["optional"] = str(input.get("optional", False)) - input["name"] = name - for name, output in component_entity.outputs.items(): - output["name"] = name # skip check code and environment component_dict = component_entity._to_dict() assert component_dict["id"] @@ -136,12 +135,6 @@ def test_serialize_deserialize_input_types(self, mock_machinelearning_client: ML rest_path = "./tests/test_configs/components/input_types_component_rest.json" target_entity = load_component_entity_from_rest_json(rest_path) - # backend add optional=False and port name to inputs/outputs so we add it here manually - for name, input in component_entity.inputs.items(): - input["optional"] = "False" - input["name"] = name - for name, output in component_entity.outputs.items(): - output["name"] = name # skip check code and environment component_dict = pydash.omit(dict(component_entity._to_dict()), "command", "environment", "code", "id") expected_dict = pydash.omit( @@ -167,31 +160,33 @@ def test_override_params(self, mock_machinelearning_client: MLClient): ] } component_entity = load_component_entity_from_yaml(test_path, mock_machinelearning_client, context) - assert component_entity.inputs == { + inputs_dict = {k: v._to_dict() for k, v in component_entity.inputs.items()} + assert inputs_dict == { "component_in_number": { "type": "number", - "default": "10.99", + "default": 10.99, "description": "A number", "optional": True, }, "component_in_path": { "type": "uri_folder", "description": "override component_in_path", + "mode": "ro_mount", }, } override_inputs = { - "component_in_path": {"type": "uri_folder"}, - "component_in_number": {"max": "1.0", "min": "0.0", "type": "number"}, + "component_in_path": {"type": "uri_folder", "mode": "ro_mount"}, + "component_in_number": {"max": 1.0, "min": 0.0, "type": "number"}, "override_param3": {"optional": True, "type": "integer"}, "override_param4": {"default": False, "type": "boolean"}, "override_param5": {"default": "str", "type": "string"}, "override_param6": {"enum": ["enum1", "enum2", "enum3"], "type": "string"}, } - context = {PARAMS_OVERRIDE_KEY: [{"inputs": override_inputs}]} + context = {PARAMS_OVERRIDE_KEY: [{"inputs": copy.deepcopy(override_inputs)}]} component_entity = load_component_entity_from_yaml(test_path, mock_machinelearning_client, context) - - assert component_entity.inputs == override_inputs + inputs_dict = {k: v._to_dict() for k, v in component_entity.inputs.items()} + assert inputs_dict == override_inputs def test_serialize_deserialize_with_code_path(self, mock_machinelearning_client: MLClient): test_path = "./tests/test_configs/components/basic_component_code_local_path.yml" @@ -250,18 +245,18 @@ def test_anonymous_component_same_name(self, mock_machinelearning_client: MLClie # scenario 1: same component interface, same code test_path1 = "./tests/test_configs/components/basic_component_code_local_path.yml" component_entity1 = load_component_entity_from_yaml(test_path1, mock_machinelearning_client, is_anonymous=True) - component_name1 = component_entity1._get_anonymous_hash() + component_hash1 = component_entity1._get_anonymous_hash() component_entity2 = load_component_entity_from_yaml(test_path1, mock_machinelearning_client, is_anonymous=True) - component_name2 = component_entity2._get_anonymous_hash() - assert component_name1 == component_name2 + component_hash2 = component_entity2._get_anonymous_hash() + assert component_hash1 == component_hash2 # scenario 2: same component, no code test_path2 = "./tests/test_configs/components/helloworld_component.yml" component_entity1 = load_component_entity_from_yaml(test_path2, mock_machinelearning_client, is_anonymous=True) - component_name1 = component_entity1._get_anonymous_hash() + component_hash1 = component_entity1._get_anonymous_hash() component_entity2 = load_component_entity_from_yaml(test_path2, mock_machinelearning_client, is_anonymous=True) - component_name2 = component_entity2._get_anonymous_hash() - assert component_name1 == component_name2 + component_hash2 = component_entity2._get_anonymous_hash() + assert component_hash1 == component_hash2 # scenario 3: same component interface, different code code_path1 = "./tests/test_configs/components/basic_component_code_local_path.yml" @@ -275,15 +270,15 @@ def test_anonymous_component_same_name(self, mock_machinelearning_client: MLClie is_anonymous=True, fields_to_override=data1, ) - component_name1 = component_entity1._get_anonymous_hash() + component_hash1 = component_entity1._get_anonymous_hash() component_entity2 = load_component_entity_from_yaml( test_path1, mock_machinelearning_client, is_anonymous=True, fields_to_override=data2, ) - component_name2 = component_entity2._get_anonymous_hash() - assert component_name1 != component_name2 + component_hash2 = component_entity2._get_anonymous_hash() + assert component_hash1 != component_hash2 # scenario 4: different component interface, same code data1 = {"display_name": "CommandComponentBasic1"} @@ -295,15 +290,15 @@ def test_anonymous_component_same_name(self, mock_machinelearning_client: MLClie is_anonymous=True, fields_to_override=data1, ) - component_name1 = component_entity1._get_anonymous_hash() + component_hash1 = component_entity1._get_anonymous_hash() component_entity2 = load_component_entity_from_yaml( test_path1, mock_machinelearning_client, is_anonymous=True, fields_to_override=data2, ) - component_name2 = component_entity2._get_anonymous_hash() - assert component_name1 != component_name2 + component_hash2 = component_entity2._get_anonymous_hash() + assert component_hash1 != component_hash2 def test_component_name_validate(self): invalid_component_names = [ @@ -406,14 +401,34 @@ def _check_validation_result(new_asset, should_fail=False) -> None: # existent path _check_validation_result("../python") - def test_component_validate_multiple_invalid_fields(self) -> None: + def test_component_validate_multiple_invalid_fields(self, mock_machinelearning_client: MLClient) -> None: component_path = "./tests/test_configs/components/helloworld_component.yml" + location_str = str(Path(component_path)) component: CommandComponent = load_component(path=component_path) component.name = None component.command += " & echo ${{inputs.non_existent}} & echo ${{outputs.non_existent}}" - validation_result = component._validate() + validation_result = mock_machinelearning_client.components.validate(component) assert validation_result.passed is False - assert validation_result.messages == { - "name": "Missing data for required field.", - "command": "Invalid data binding expression: inputs.non_existent, outputs.non_existent", + assert validation_result._to_dict() == { + "errors": [ + { + "location": f"{location_str}#line 3", + "message": "Missing data for required field.", + "path": "name", + "value": None, + }, + { + "location": f"{location_str}#line 28", + "message": "Invalid data binding expression: inputs.non_existent, outputs.non_existent", + "path": "command", + "value": "echo Hello World & echo " + "[${{inputs.component_in_number}}] & echo " + "${{inputs.component_in_path}} & echo " + "${{outputs.component_out_path}} > " + "${{outputs.component_out_path}}/component_in_number & " + "echo ${{inputs.non_existent}} & echo " + "${{outputs.non_existent}}", + }, + ], + "result": "Failed", } diff --git a/sdk/ml/azure-ai-ml/tests/component/unittests/test_parallel_component_schema.py b/sdk/ml/azure-ai-ml/tests/component/unittests/test_parallel_component_schema.py index d0e4f5a1aaab..d01497b06e79 100644 --- a/sdk/ml/azure-ai-ml/tests/component/unittests/test_parallel_component_schema.py +++ b/sdk/ml/azure-ai-ml/tests/component/unittests/test_parallel_component_schema.py @@ -16,6 +16,8 @@ from azure.ai.ml.entities import ParallelComponent from azure.ai.ml.entities._assets import Code +from .._util import _COMPONENT_TIMEOUT_SECOND + def load_component_entity_from_yaml( path: str, @@ -81,6 +83,7 @@ def load_component_entity_from_rest_json(path) -> ParallelComponent: return internal_component +@pytest.mark.timeout(_COMPONENT_TIMEOUT_SECOND) @pytest.mark.unittest class TestParallelComponent: def test_serialize_deserialize_basic(self, mock_machinelearning_client: MLClient): @@ -89,12 +92,6 @@ def test_serialize_deserialize_basic(self, mock_machinelearning_client: MLClient rest_path = "./tests/test_configs/components/basic_parallel_component_score_rest.json" target_entity = load_component_entity_from_rest_json(rest_path) - # backend add optional=False and port name to inputs/outputs so we add it here manually - for name, input in component_entity.inputs.items(): - input["optional"] = str(input.get("optional", False)) - input["name"] = name - for name, output in component_entity.outputs.items(): - output["name"] = name # skip check code and environment component_dict = component_entity._to_dict() assert component_dict["id"] diff --git a/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py b/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py index 5e64fda9765d..f638e34e504e 100644 --- a/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py +++ b/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py @@ -10,6 +10,9 @@ import pytest import vcr + +from azure.ai.ml.operations._code_operations import CodeOperations +from azure.ai.ml.operations._run_operations import RunOperations from .test_vcr_utils import before_record_cb, vcr_header_filters from azure.identity import DefaultAzureCredential from azure.ai.ml import MLClient, load_job @@ -20,8 +23,6 @@ JobOperations, WorkspaceOperations, ) -from azure.ai.ml.operations._code_operations import CodeOperations -from azure.ai.ml.operations._run_operations import RunOperations from azure.ai.ml.operations._job_ops_helper import get_git_properties from azure.ai.ml.operations._run_history_constants import RunHistoryConstants from azure.ai.ml._scope_dependent_operations import OperationScope @@ -180,11 +181,11 @@ def test_submit_command_job(self, mock_method, mock_job_operation: JobOperations @pytest.mark.skip(reason="Function under test no longer returns Job as output") def test_command_job_resolver_with_virtual_cluster(self, mock_job_operation: JobOperations) -> None: expected = "/subscriptions/test_subscription/resourceGroups/test_resource_group/providers/Microsoft.MachineLearningServices/virtualclusters/testvcinmaster" - job = Job.load(path="tests/test_configs/command_job/command_job_with_virtualcluster.yaml") + job = load_job(path="tests/test_configs/command_job/command_job_with_virtualcluster.yaml") mock_job_operation._resolve_arm_id_or_upload_dependencies(job) assert job.compute == expected - job = Job.load(path="tests/test_configs/command_job/command_job_with_virtualcluster_2.yaml") + job = load_job(path="tests/test_configs/command_job/command_job_with_virtualcluster_2.yaml") mock_job_operation._resolve_arm_id_or_upload_dependencies(job) assert job.compute == expected diff --git a/sdk/ml/azure-ai-ml/tests/pipeline_job/unittests/test_pipeline_job_schema.py b/sdk/ml/azure-ai-ml/tests/pipeline_job/unittests/test_pipeline_job_schema.py index 4dabf16d7103..39ee281d0f30 100644 --- a/sdk/ml/azure-ai-ml/tests/pipeline_job/unittests/test_pipeline_job_schema.py +++ b/sdk/ml/azure-ai-ml/tests/pipeline_job/unittests/test_pipeline_job_schema.py @@ -1,6 +1,7 @@ import re import json from io import StringIO +from pathlib import Path import yaml import pydash @@ -26,7 +27,7 @@ from azure.ai.ml.constants import ( ComponentJobConstants, PipelineConstants, - ANONYMOUS_COMPONENT_NAME + ANONYMOUS_COMPONENT_NAME, ) from azure.ai.ml._utils.utils import load_yaml, is_data_binding_expression from azure.ai.ml.constants import ARM_ID_PREFIX @@ -41,16 +42,15 @@ RecurrenceSchedule as RestRecurrenceSchedule, ) -from .._util import _check_common_schedule_fields, _check_recurrence_schedule_fields +from .._util import _check_common_schedule_fields, _check_recurrence_schedule_fields, _PIPELINE_JOB_TIMEOUT_SECOND def assert_the_same_path(path1, path2): - from pathlib import Path - - assert Path(path1) == Path(path2) + assert Path(path1).resolve() == Path(path2).resolve() @pytest.mark.usefixtures("enable_pipeline_private_preview_features") +@pytest.mark.timeout(_PIPELINE_JOB_TIMEOUT_SECOND) @pytest.mark.unittest class TestPipelineJobSchema: def test_simple_deserialize(self): @@ -564,6 +564,7 @@ def assert_inline_component(self, component_job, component_dict): assert isinstance(component_job.component, (CommandComponent, ParallelComponent)) component = component_job.component or component_job.trial assert component._is_anonymous + # hash will be generated before create_or_update, so can't check it in unit tests assert list(component.inputs.keys()) == list(component_dict.get("inputs", {}).keys()) assert list(component.outputs.keys()) == list(component_dict.get("outputs", {}).keys()) @@ -685,101 +686,88 @@ def mock_get_asset_arm_id(*args, **kwargs): else: assert job.compute == "xxx" - def test_inline_command_job_with_input_bindings(self, mock_machinelearning_client: MLClient, mocker: MockFixture): - test_path = "tests/test_configs/pipeline_jobs/pipeline_job_with_command_job_with_input_bindings.yml" - yaml_obj = load_yaml(test_path) - job = load_job(test_path) - - # check when top level input not exist - with pytest.raises(Exception) as e: - load_job( - test_path, - params_override=[{"jobs.hello_world_inline_commandjob_1.inputs.test1": "${{parent.inputs.not_found}}"}], - ) - assert "Failed to find top level definition for input binding" in str(e.value) - - # Check that all inputs are present and are of type Input or are literals - for index, input_name in enumerate(yaml_obj["inputs"].keys()): - job_obj_input = job.inputs.get(input_name, None) - assert job_obj_input - assert isinstance(job_obj_input, PipelineInput) - job_obj_input = job_obj_input._to_job_input() - if index == 0: - assert isinstance(job_obj_input, Input) - elif index == 1: - assert isinstance(job_obj_input, Input) - else: - assert isinstance(job_obj_input, int) - # Check that all inputs are present in the jobs - for job_name, job_value in yaml_obj["jobs"].items(): - job_obj = job.jobs.get(job_name, None) - assert job_obj is not None - for input_name, input_value in job_obj._build_inputs().items(): - # check for input ports or literal - if isinstance(input_value, str): - assert isinstance(job_obj.inputs[input_name]._data, str) - if isinstance(input_value, int): - assert isinstance(job_obj.inputs[input_name]._data, int) - - # "Upload" the dependencies so that the dataset serialization behavior can be verified - mocker.patch( - "azure.ai.ml.operations._operation_orchestrator.OperationOrchestrator.get_asset_arm_id", - return_value="xxx", - ) - mock_machinelearning_client.jobs._resolve_arm_id_or_upload_dependencies(job) - # Convert to REST object and check that all inputs were turned into data inputs - rest_job = job._to_rest_object() - rest_job_properties: RestPipelineJob = rest_job.properties - rest_component_jobs = rest_job_properties.jobs - - # Test that each job's inputs were serialized properly in the REST translation - expected_inputs = { - "hello_world_inline_commandjob_1": { - "literal_input": {"job_input_type": "Literal", "value": "2"}, - "test1": { - "job_input_type": "Literal", - "value": "${{parent.inputs.job_data_path}}", - }, - "test2": { - "job_input_type": "Literal", - "value": "${{parent.inputs.job_data_path}}", + @pytest.mark.parametrize( + "test_path,expected_inputs", + [ + ( + "tests/test_configs/pipeline_jobs/pipeline_job_with_sweep_job_with_input_bindings.yml", + { + "hello_world": { + "component_in_number": { + "job_input_type": "Literal", + }, + "test1": { + "job_input_type": "Literal", + "value": "${{parent.inputs.job_data_path}}", + }, + }, + "hello_world_inline_commandjob_2": { + "input_from_previous_node": { + "job_input_type": "Literal", + "value": "${{parent.jobs.hello_world.outputs.job_output}}", + }, + "test2": {"job_input_type": "Literal", "value": "${{parent.inputs.job_data_path}}"}, + }, }, - }, - "hello_world_inline_commandjob_2": { - "input_from_previous_node": { - "job_input_type": "Literal", - "value": "${{parent.jobs.hello_world_inline_commandjob_1.outputs.job_output}}", + ), + ( + "tests/test_configs/pipeline_jobs/pipeline_job_with_command_job_with_input_bindings.yml", + { + "hello_world": { + "literal_input": {"job_input_type": "Literal", "value": "2"}, + "test1": { + "job_input_type": "Literal", + "value": "${{parent.inputs.job_data_path}}", + }, + "test2": { + "job_input_type": "Literal", + "value": "${{parent.inputs.job_data_path}}", + }, + }, + "hello_world_inline_commandjob_2": { + "input_from_previous_node": { + "job_input_type": "Literal", + "value": "${{parent.jobs.hello_world.outputs.job_output}}", + }, + "test2": { + "job_input_type": "Literal", + "value": "${{parent.inputs.job_data_path}}", + }, + }, }, - "test2": { - "job_input_type": "Literal", - "value": "${{parent.inputs.job_data_path}}", + ), + ( + "tests/test_configs/pipeline_jobs/pipeline_job_with_parallel_job_with_input_bindings.yml", + { + "hello_world": { + "test1": { + "job_input_type": "Literal", + "value": "${{parent.inputs.job_data_path}}", + } + }, }, - }, - } - for job_name, job_value in yaml_obj["jobs"].items(): - component_job = rest_component_jobs[job_name] - assert isinstance(component_job, dict) - # Check that each input in the yaml is properly serialized in the REST translation - assert component_job["inputs"] == expected_inputs[job_name] - # Test that translating from REST preserves the inputs for each job - from_rest_job = PipelineJob._from_rest_object(rest_job) - rest_job = job._to_rest_object() - for job_name, job_value in from_rest_job.jobs.items(): - rest_component = rest_job.properties.jobs[job_name] - assert expected_inputs[job_name] == rest_component["inputs"] - - def test_inline_parallel_job_with_input_bindings(self, mock_machinelearning_client: MLClient, mocker: MockFixture): - test_path = "tests/test_configs/pipeline_jobs/pipeline_job_with_parallel_job_with_input_bindings.yml" + ), + ], + ) + def test_pipeline_job_with_input_bindings( + self, + mock_machinelearning_client: MLClient, + mocker: MockFixture, + test_path: str, + expected_inputs: Dict[str, Any], + ): yaml_obj = load_yaml(test_path) job = load_job(test_path) - # check when top level input not exist - with pytest.raises(Exception) as e: - load_job( - test_path, - params_override=[{"jobs.batch_inference.inputs.score_input": "${{parent.inputs.not_found}}"}], - ) - assert "Failed to find top level definition for input binding" in str(e.value) + # no on-load check for sweep for now + if "sweep" not in test_path: + # check when top level input not exist + with pytest.raises(Exception) as e: + load_job( + test_path, + params_override=[{"jobs.hello_world.inputs.test1": "${{parent.inputs.not_found}}"}], + ) + assert "Failed to find top level definition for input binding" in str(e.value) # Check that all inputs are present and are of type Input or are literals for index, input_name in enumerate(yaml_obj["inputs"].keys()): @@ -816,21 +804,12 @@ def test_inline_parallel_job_with_input_bindings(self, mock_machinelearning_clie rest_component_jobs = rest_job_properties.jobs # Test that each job's inputs were serialized properly in the REST translation - expected_inputs = { - "batch_inference": { - "score_input": { - "job_input_type": "Literal", - "value": "${{parent.inputs.job_data_path}}", - } - }, - } for job_name, job_value in yaml_obj["jobs"].items(): component_job = rest_component_jobs[job_name] assert isinstance(component_job, dict) # Check that each input in the yaml is properly serialized in the REST translation assert component_job["inputs"] == expected_inputs[job_name] # Test that translating from REST preserves the inputs for each job - from_rest_job = PipelineJob._from_rest_object(rest_job) rest_job = job._to_rest_object() for job_name, job_value in from_rest_job.jobs.items(): @@ -1189,7 +1168,13 @@ def test_command_job_referenced_component_no_meta(self): [ ( "./tests/test_configs/pipeline_jobs/invalid/with_invalid_component.yml", - "Validation for PipelineJobSchema failed:", + # only type matched error message in "component" + r"Missing data for required field\.", + ), + ( + "./tests/test_configs/pipeline_jobs/invalid/type_sensitive_component_error.yml", + # not allowed type + "Value unsupported passed is not in set", ), ( "./tests/test_configs/pipeline_jobs/job_with_incorrect_component_content/pipeline.yml", @@ -1199,10 +1184,30 @@ def test_command_job_referenced_component_no_meta(self): ) def test_pipeline_job_validation_on_load(self, pipeline_job_path: str, expected_error: str) -> None: with pytest.raises(ValidationError, match=expected_error): - job = load_job( - path=pipeline_job_path, - ) - assert isinstance(job, Job) + load_job(path=pipeline_job_path) + + def test_pipeline_job_type_sensitive_error_message(self): + test_path = "./tests/test_configs/pipeline_jobs/helloworld_pipeline_job_inline_comps.yml" + pipeline_job: PipelineJob = load_job(path=test_path) + job_dict = pipeline_job._to_dict() + unsupported_node_type = "unsupported_node_type" + job_dict["jobs"]["hello_world_component_inline"]["type"] = unsupported_node_type + del job_dict["jobs"]["hello_world_component_inline_with_schema"]["component"]["environment"] + errors = pipeline_job._schema_for_validation.validate(job_dict) + type_sensitive_union_field = pipeline_job._schema_for_validation.dump_fields["jobs"].value_field + assert errors == { + "jobs": { + "hello_world_component_inline": { + "value": { + "type": f"Value {unsupported_node_type} passed is " + f"not in set {type_sensitive_union_field.allowed_types}", + } + }, + "hello_world_component_inline_with_schema": { + "value": {"component": {"environment": ["Missing data for required field."]}} + }, + } + } def test_pipeline_node_name_validate(self): invalid_node_names = ["1", "a-c", "1abc", ":::", "hello.world", "Abc", "aBc"] @@ -1279,7 +1284,9 @@ def test_automl_node_in_pipeline_load_dump( with open(test_path) as f: original_dict = yaml.safe_load(f) - mocker.patch("azure.ai.ml.operations._operation_orchestrator.OperationOrchestrator.get_asset_arm_id", return_value="xxx") + mocker.patch( + "azure.ai.ml.operations._operation_orchestrator.OperationOrchestrator.get_asset_arm_id", return_value="xxx" + ) mocker.patch("azure.ai.ml.operations._job_operations._upload_and_generate_remote_uri", return_value="yyy") mock_machinelearning_client.jobs._resolve_arm_id_or_upload_dependencies(pipeline) @@ -1388,6 +1395,19 @@ def test_pipeline_private_preview_features_not_supported(self, test_path, mocker ) assert err_msg in str(e.value) + def test_pipeline_job_source_path_resolution(self): + test_path = "./tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/pipeline.yml" + pipeline_job: PipelineJob = load_job(path=test_path) + assert_the_same_path(pipeline_job._source_path, test_path) + assert_the_same_path( + pipeline_job.jobs["command_node"].component._source_path, + "./tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/component/component.yml", + ) + assert_the_same_path( + pipeline_job.jobs["command_node"].component.environment._source_path, + "./tests/test_configs/environment/environment_docker_context.yml", + ) + def test_pipeline_job_node_base_path_resolution(self, mocker: MockFixture): test_path = "./tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/pipeline.yml" pipeline_job: PipelineJob = load_job(path=test_path) diff --git a/sdk/ml/azure-ai-ml/tests/sweep_job/unittests/test_sweep_job_schema.py b/sdk/ml/azure-ai-ml/tests/sweep_job/unittests/test_sweep_job_schema.py index b4aed32bc859..a3c1a4a4cc56 100644 --- a/sdk/ml/azure-ai-ml/tests/sweep_job/unittests/test_sweep_job_schema.py +++ b/sdk/ml/azure-ai-ml/tests/sweep_job/unittests/test_sweep_job_schema.py @@ -33,6 +33,7 @@ ) from azure.ai.ml._schema import SweepJobSchema from azure.ai.ml import load_job +from azure.ai.ml.entities._job.to_rest_functions import to_rest_job_object @pytest.mark.unittest @@ -166,16 +167,9 @@ def test_sweep_with_string(self): assert rest.properties.search_space["ss"] == expected_rest assert vars(sweep.search_space["ss"]) == expected_ss - @pytest.mark.parametrize( - "yaml_path", - [ - "./tests/test_configs/command_job/command_job_input_types.yml", - "./tests/test_configs/sweep_job/sweep_job_input_types.yml", - ], - ) - def test_inputs_types_sweep_job(self, yaml_path: str): - original_entity = load_job(Path(yaml_path)) - rest_representation = original_entity._to_rest_object() + def test_inputs_types_sweep_job(self): + original_entity = load_job(Path("./tests/test_configs/sweep_job/sweep_job_input_types.yml")) + rest_representation = to_rest_job_object(original_entity) reconstructed_entity = Job._from_rest_object(rest_representation) assert original_entity.inputs["test_dataset"].mode == InputOutputModes.RO_MOUNT @@ -203,16 +197,9 @@ def test_inputs_types_sweep_job(self, yaml_path: str): assert rest_representation.properties.inputs["test_literal_valued_int"].value == "42" assert reconstructed_entity.inputs["test_literal_valued_int"] == "42" - @pytest.mark.parametrize( - "yaml_path", - [ - "./tests/test_configs/command_job/command_job_output_types.yml", - "./tests/test_configs/sweep_job/sweep_job_output_types.yml", - ], - ) - def test_outputs_types_standalone_jobs(self, yaml_path: str): - original_entity = load_job(Path(yaml_path)) - rest_representation = original_entity._to_rest_object() + def test_outputs_types_standalone_jobs(self): + original_entity = load_job(Path("./tests/test_configs/sweep_job/sweep_job_output_types.yml")) + rest_representation = to_rest_job_object(original_entity) dummy_default = RestUriFolderJobOutput(uri="azureml://foo", mode=OutputDeliveryMode.READ_WRITE_MOUNT) rest_representation.properties.outputs["default"] = dummy_default reconstructed_entity = Job._from_rest_object(rest_representation) From 391f1dab7b3ba215bfd0fcbb574d64e08b9ad598 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 09:55:37 -0700 Subject: [PATCH 08/19] Updated release cut to fix pipeline bugs for ml --- .../ai/ml/_artifacts/storage_overview.md | 33 ------------------- sdk/ml/azure-ai-ml/tests/component/_util.py | 1 + .../unittests/test_job_operations.py | 12 +++---- .../azure-ai-ml/tests/pipeline_job/_util.py | 3 ++ 4 files changed, 10 insertions(+), 39 deletions(-) delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md create mode 100644 sdk/ml/azure-ai-ml/tests/component/_util.py diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md deleted file mode 100644 index afb94db74f7b..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/storage_overview.md +++ /dev/null @@ -1,33 +0,0 @@ -# Artifact Storage - -## Overview - -The AzureML v2 artifacts module facilitates interaction with Azure datastores for artifact creation and retrieval. - -#### Supported Storage Account Types - -Azure Storage offers four different account types (Blob, Gen1, Gen2, and File), and AzureML v2 currently supports Blob and Gen2.** Each has its own unique design and architecture which adds value for multiple user groups, but also provides its own set of challenges and requirements when building storage infrastructure. During implementation of the classes and functionality, the goal is to share as much as possible across the account types for code clarity and cleanliness while still taking advantage of their differences where possible and efficient. - -[_gen2_storage_helper.py](_gen2_storage_helper.py) contains the client object and methods for uploading to and downloading from ADLS Gen2 storage accounts. This implementation heavily relies on the [ADLS Gen2 Storage SDK](https://docs.microsoft.com/python/api/azure-storage-file-datalake/azure.storage.filedatalake?view=azure-python). - -[_blob_storage_helper.py](_blob_storage_helper.py) contains the client object and methods for uploading to and downloading from Azure Blob storage accounts. This implementation heavily relies on the [Blob Storage SDK](https://docs.microsoft.com/python/api/azure-storage-blob/azure.storage.blob?view=azure-python). - -**This folder includes an implementation of support for Azure File Storage; however, Azure File datastores are not yet supported for AzureML v2 due to Management Front End (MFE) restrictions. - -#### What are artifacts? - -Artifacts are the datastore representations of the files and folders that Assets are associated with. There can be a many-to-one relationship between assets and artifacts (e.g. asset _experiment-4-dataset:1_ and asset _experiment-1-dataset:1_ can both point to the same file or folder in storage). Artifacts are idempotent and thus are never overwritten or altered via AzureML once uploaded. - -#### Upload Process -![](upload_process_flowchart.png) - -Datastore upload functionality is triggered by calling an **Asset** or **Job** object's `create_or_update` method which then calls `_check_and_upload_path` in [_artifact_utilities.py](_artifact_utilities.py) to do basic checks to see if the path the user provided is a local path or if it is a reference to a remote object (e.g. a storage uri) - -If the path is determined to be a local path, `_upload_to_datastore` in [_artifact_utilities.py](_artifact_utilities.py) is called which 1) checks for a .amlignore or .gitignore file at the path and creates a filter for any excluded files, 2) creates a hash for the path and its contents, and 3) determines the datastore name and finally sends it off to `upload_artifact` in [_artifact_utilities.py](_artifact_utilities.py) which initializes a storage client in [_storage_utils.py](../_utils/_storage_utils.py) corresponding to the datastore type, either **Gen2StorageClient** or **BlobStorageClient**. - -The hash created in `_upload_to_datastore` will be used as a the name of the directory in the v2-specific LocalUpload/ directory inside the datastore where the file(s) will be stored. The storage client checks the hash against all existing directory names to see if the content has already been uploaded. If it has, the client returns the artifact's path in the blob storage along with the name and version of the asset is was last registered to, and the `create_or_update` method concludes its client-side work and continues onto contacting Management Front End for the service call. If the file or folder does not exist, it is uploaded, confirmation metadata is set, and then the asset path is returned to the `create_or_update` method. - - -### Download Process - -Download functionality is currently limited to **Job** and **Model** objects and does not require any of the pre-process steps that uploading does. It simply takes in the path, finds it in the storage account, and downloads it to the user's local machine. diff --git a/sdk/ml/azure-ai-ml/tests/component/_util.py b/sdk/ml/azure-ai-ml/tests/component/_util.py new file mode 100644 index 000000000000..d93cf7462afb --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/component/_util.py @@ -0,0 +1 @@ +_COMPONENT_TIMEOUT_SECOND = 20 * 60 # timeout for component's tests, unit in second. diff --git a/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py b/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py index f638e34e504e..0a7d1c3848ea 100644 --- a/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py +++ b/sdk/ml/azure-ai-ml/tests/job_common/unittests/test_job_operations.py @@ -27,11 +27,11 @@ from azure.ai.ml.operations._run_history_constants import RunHistoryConstants from azure.ai.ml._scope_dependent_operations import OperationScope from azure.ai.ml.constants import AzureMLResourceType, AZUREML_PRIVATE_FEATURES_ENV_VAR -from azure.ai.ml.entities._job.command_job import CommandJob from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob from azure.ai.ml.entities._job.sweep.sweep_job import SweepJob from azure.ai.ml.entities._job.job import Job from azure.ai.ml._restclient.v2021_10_01 import models +from azure.ai.ml.entities._builders import Command @pytest.fixture @@ -125,7 +125,7 @@ def test_list_private_preview(self, mock_job_operation: JobOperations) -> None: @patch.object(Job, "_from_rest_object") def test_get(self, mock_method, mock_job_operation: JobOperations, randstr: Callable[[], str]) -> None: - mock_method.return_value = CommandJob() + mock_method.return_value = Command(component=None) mock_job_operation.get(randstr()) mock_job_operation._operation_2022_02_preview.get.assert_called_once() @@ -134,7 +134,7 @@ def test_get(self, mock_method, mock_job_operation: JobOperations, randstr: Call def test_get_private_preview_flag_returns_latest( self, mock_method, mock_job_operation: JobOperations, randstr: Callable[[], str] ) -> None: - mock_method.return_value = CommandJob() + mock_method.return_value = Command(component=None) mock_job_operation.get(randstr()) mock_job_operation._operation_2022_02_preview.get.assert_called_once() @@ -170,7 +170,7 @@ def test_job_operations_list_schedule_defined_no_name( @pytest.mark.skip(reason="Mock Job missing properties to complete full test in Feb API") @patch.object(Job, "_from_rest_object") def test_submit_command_job(self, mock_method, mock_job_operation: JobOperations) -> None: - mock_method.return_value = CommandJob() + mock_method.return_value = Command(component=None) job = load_job(path="./tests/test_configs/command_job/command_job_test.yml") mock_job_operation.create_or_update(job=job) git_props = get_git_properties() @@ -192,7 +192,7 @@ def test_command_job_resolver_with_virtual_cluster(self, mock_job_operation: Job @patch.object(Job, "_from_rest_object") @pytest.mark.vcr() def test_archive(self, mock_method, mock_job_operation: JobOperations, randstr: Callable[[], str]) -> None: - mock_method.return_value = CommandJob() + mock_method.return_value = Command(component=None) mock_job_operation.archive(name=randstr()) mock_job_operation._operation_2022_02_preview.get.assert_called_once() mock_job_operation._operation_2022_02_preview.create_or_update.assert_called_once() @@ -200,7 +200,7 @@ def test_archive(self, mock_method, mock_job_operation: JobOperations, randstr: @patch.object(Job, "_from_rest_object") @pytest.mark.vcr() def test_restore(self, mock_method, mock_job_operation: JobOperations, randstr: Callable[[], str]) -> None: - mock_method.return_value = CommandJob() + mock_method.return_value = Command(component=None) mock_job_operation.restore(name=randstr()) mock_job_operation._operation_2022_02_preview.get.assert_called_once() mock_job_operation._operation_2022_02_preview.create_or_update.assert_called_once() diff --git a/sdk/ml/azure-ai-ml/tests/pipeline_job/_util.py b/sdk/ml/azure-ai-ml/tests/pipeline_job/_util.py index 7c5b726a8835..df8c102b2b17 100644 --- a/sdk/ml/azure-ai-ml/tests/pipeline_job/_util.py +++ b/sdk/ml/azure-ai-ml/tests/pipeline_job/_util.py @@ -44,3 +44,6 @@ def _check_common_schedule_fields(job_schedule: Union[CronSchedule, RecurrenceSc assert job_schedule.start_time assert job_schedule.time_zone == job_dict_schedule.get("time_zone", "UTC") assert job_schedule.status == job_dict_schedule["status"] + + +_PIPELINE_JOB_TIMEOUT_SECOND = 20 * 60 # timeout for pipeline job's tests, unit in second. From 6549eb5f5b2c7091f80218d77e8c38e85a6d4b2e Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 13:32:36 -0700 Subject: [PATCH 09/19] Added missing test configuration files --- .../unittests/test_component_schema.py | 2 +- ...and_job_inputs_dataset_short_form_test.yml | 13 ++++ .../component/component.yml | 5 +- .../type_sensitive_component_error.yml | 29 ++++++++ ...b_with_command_job_with_input_bindings.yml | 4 +- ..._with_parallel_job_with_input_bindings.yml | 4 +- ...job_with_sweep_job_with_input_bindings.yml | 72 +++++++++++++++++++ 7 files changed, 120 insertions(+), 9 deletions(-) create mode 100644 sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_inputs_dataset_short_form_test.yml create mode 100644 sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/invalid/type_sensitive_component_error.yml create mode 100644 sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_sweep_job_with_input_bindings.yml diff --git a/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py b/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py index aa2f0d373abf..1554ec8f1ebc 100644 --- a/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py +++ b/sdk/ml/azure-ai-ml/tests/component/unittests/test_component_schema.py @@ -30,7 +30,7 @@ from .._util import _COMPONENT_TIMEOUT_SECOND -tests_root_dir = Path(__file__).parent.parent.parent.parent +tests_root_dir = Path(__file__).parent.parent.parent components_dir = tests_root_dir / "test_configs/components/" diff --git a/sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_inputs_dataset_short_form_test.yml b/sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_inputs_dataset_short_form_test.yml new file mode 100644 index 000000000000..ab78083d219b --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_inputs_dataset_short_form_test.yml @@ -0,0 +1,13 @@ +# yaml-language-server: $schema=https://azuremlsdk2.blob.core.windows.net/latest/commandJob.schema.json +command: echo ${{inputs.test1}} +environment: azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1 +name: "test1" +compute: "azureml:testCompute" +experiment_name: mfe-test1 +properties: + test_property: test_value +inputs: + "test1": + type: uri_folder + mode: ro_mount + path: azureml:test1_dataset@latest \ No newline at end of file diff --git a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/component/component.yml b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/component/component.yml index 2e1516c1f726..e93a3379261f 100644 --- a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/component/component.yml +++ b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/inline_file_comp_base_path_sensitive/component/component.yml @@ -12,7 +12,4 @@ code: ../../../python inputs: iris: type: uri_file -environment: - build: - path: ../../../environment/environment_files - dockerfile_path: DockerfileNonDefault +environment: file:../../../environment/environment_docker_context.yml diff --git a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/invalid/type_sensitive_component_error.yml b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/invalid/type_sensitive_component_error.yml new file mode 100644 index 000000000000..91e3ec34aa9c --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/invalid/type_sensitive_component_error.yml @@ -0,0 +1,29 @@ +type: pipeline + +name: simplepipelinejobnopath +description: The hello world pipeline job +tags: + tag: tagvalue + owner: sdkteam + +experiment_name: my_first_experiment + +compute: azureml:cpu-cluster + +inputs: + job_in_number: 10 + job_in_path: + path: ../../data + mode: ro_mount + +settings: + continue_on_step_failure: True + +jobs: + hello_world_unsupported_type: + type: unsupported + component: ../../components/invalid/no_environment.yml + compute: azureml:cpu-cluster + inputs: + component_in_number: ${{parent.inputs.job_in_number}} + component_in_path: ${{parent.inputs.job_in_path}} diff --git a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_command_job_with_input_bindings.yml b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_command_job_with_input_bindings.yml index d396a3087cd4..4197c2cd97b2 100644 --- a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_command_job_with_input_bindings.yml +++ b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_command_job_with_input_bindings.yml @@ -19,7 +19,7 @@ outputs: mode: mount jobs: - hello_world_inline_commandjob_1: + hello_world: type: command command: pip freeze && echo Hello World @@ -46,7 +46,7 @@ jobs: experiment_name: test-iris-example-2 description: Train a model on the Iris dataset-2. inputs: - "input_from_previous_node": ${{parent.jobs.hello_world_inline_commandjob_1.outputs.job_output}} + "input_from_previous_node": ${{parent.jobs.hello_world.outputs.job_output}} "test2": ${{parent.inputs.job_data_path}} properties: test_property: test_value diff --git a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_parallel_job_with_input_bindings.yml b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_parallel_job_with_input_bindings.yml index 67793cd20f8c..bda0d34bc37d 100644 --- a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_parallel_job_with_input_bindings.yml +++ b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_parallel_job_with_input_bindings.yml @@ -20,12 +20,12 @@ outputs: # here the jobs in the pipeline jobs: - batch_inference: + hello_world: # We pass the trained model from the train step to use to parallel inference type: parallel compute: "azureml:cpu-cluster" inputs: - "score_input": ${{parent.inputs.job_data_path}} + test1: ${{parent.inputs.job_data_path}} outputs: scored_result: ${{parent.outputs.job_out_path_1}} diff --git a/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_sweep_job_with_input_bindings.yml b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_sweep_job_with_input_bindings.yml new file mode 100644 index 000000000000..2ed31fe86935 --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/pipeline_job_with_sweep_job_with_input_bindings.yml @@ -0,0 +1,72 @@ +type: pipeline + +name: simplePipelineJobWithInlineCommandJobgit a +description: The hello world pipeline job with inline command job +tags: + tag: tagvalue + owner: sdkteam + +compute: "azureml:cpu-cluster" + +inputs: + job_data_path: + type: uri_file + path: https://azuremlexamples.blob.core.windows.net/datasets/iris.csv + mode: ro_mount + +outputs: + job_out_path_1: + mode: mount + +jobs: + hello_world: + type: sweep + search_space: + component_in_number: + type: choice + values: [1, 2] + compute: azureml:gpu-cluster + + limits: + max_total_trials: 3 + sampling_algorithm: random + objective: + goal: maximize + primary_metric: accuracy + inputs: + test1: ${{parent.inputs.job_data_path}} + + trial: + command: >- + echo Hello World & + echo ${{inputs.component_in_number}}& + echo ${{inputs.test1}} + environment: azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1 + code: "./" + inputs: + test1: + type: uri_file + component_in_number: + description: Am integer + type: integer + default: 10 + optional: false + outputs: + job_output: + type: uri_folder + + hello_world_inline_commandjob_2: + type: command + + command: echo Hello World + environment: azureml:AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1 + name: "test2" + experiment_name: test-iris-example-2 + description: Train a model on the Iris dataset-2. + inputs: + "input_from_previous_node": ${{parent.jobs.hello_world.outputs.job_output}} + "test2": ${{parent.inputs.job_data_path}} + properties: + test_property: test_value + identity: + type: AMLToken From dc69e697ec0b7c67bf65d68a0aae5dcf34489b50 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 14:04:43 -0700 Subject: [PATCH 10/19] Fixed anonymous asset test to use proper version --- .../tests/command_job/unittests/test_command_job_schema.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py index e35e4c05cefe..c6dde1d46b5a 100644 --- a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py +++ b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py @@ -124,7 +124,7 @@ def test_anonymous_assets(self): assert internal_representation.environment.name != envName assert internal_representation.environment.name == "CliV2AnonymousEnvironment" assert internal_representation.environment._is_anonymous - assert internal_representation.environment.version == "79a6980e14dbe0dac98ed0e902413f88" + assert internal_representation.environment.version == "559c904a18d86cc54f2f6a9d6ac26c0d" assert internal_representation.inputs["test1"].path == input_path # Validate default dataset is mounted From 35e92f6cdb257d81b629782aaad9b2c1d03612fa Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 14:58:03 -0700 Subject: [PATCH 11/19] Added py.typed file --- sdk/ml/azure-ai-ml/azure/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 sdk/ml/azure-ai-ml/azure/py.typed diff --git a/sdk/ml/azure-ai-ml/azure/py.typed b/sdk/ml/azure-ai-ml/azure/py.typed new file mode 100644 index 000000000000..e69de29bb2d1 From b7d926d2a8873ea6b3aa6ba7776cbf5390321897 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 15:18:07 -0700 Subject: [PATCH 12/19] Moved py.typed file to namespace root --- sdk/ml/azure-ai-ml/azure/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 sdk/ml/azure-ai-ml/azure/py.typed diff --git a/sdk/ml/azure-ai-ml/azure/py.typed b/sdk/ml/azure-ai-ml/azure/py.typed deleted file mode 100644 index e69de29bb2d1..000000000000 From 5484b2d357da27d3b9d41141334d4479608b90e6 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 15:19:12 -0700 Subject: [PATCH 13/19] Typo fix from previous commit --- sdk/ml/azure-ai-ml/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 sdk/ml/azure-ai-ml/py.typed diff --git a/sdk/ml/azure-ai-ml/py.typed b/sdk/ml/azure-ai-ml/py.typed new file mode 100644 index 000000000000..e69de29bb2d1 From 4dfe2a83054b6c7492c4bf29472038035020c2a9 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 7 Jul 2022 15:38:04 -0700 Subject: [PATCH 14/19] Moved py.typed file to furthest non-code depth of ml package --- sdk/ml/azure-ai-ml/{ => azure/ai/ml}/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sdk/ml/azure-ai-ml/{ => azure/ai/ml}/py.typed (100%) diff --git a/sdk/ml/azure-ai-ml/py.typed b/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed similarity index 100% rename from sdk/ml/azure-ai-ml/py.typed rename to sdk/ml/azure-ai-ml/azure/ai/ml/py.typed From ab32084342bcf2a940c4dca2b4d4904e26034a5f Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Fri, 8 Jul 2022 10:36:37 -0700 Subject: [PATCH 15/19] Removed extra comments --- .../tests/command_job/unittests/test_command_job_schema.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py index c6dde1d46b5a..d2512ef90014 100644 --- a/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py +++ b/sdk/ml/azure-ai-ml/tests/command_job/unittests/test_command_job_schema.py @@ -224,15 +224,11 @@ def test_inputs_types_command_job(self): assert reconstructed_entity.inputs["test_url"].type == AssetTypes.URI_FILE assert reconstructed_entity.inputs["test_url"].path == "azureml://fake/url.json" - # assert original_entity.inputs["test_string_literal"] == "literal string" assert rest_representation.properties.inputs["test_string_literal"].job_input_type == JobInputType.LITERAL assert rest_representation.properties.inputs["test_string_literal"].value == "literal string" - # assert reconstructed_entity.inputs["test_string_literal"] == "literal string" - # assert original_entity.inputs["test_literal_valued_int"] == 42 assert rest_representation.properties.inputs["test_literal_valued_int"].job_input_type == JobInputType.LITERAL assert rest_representation.properties.inputs["test_literal_valued_int"].value == "42" - # assert reconstructed_entity.inputs["test_literal_valued_int"] == "42" def test_outputs_types_standalone_jobs(self): original_entity = load_job(Path("./tests/test_configs/command_job/command_job_output_types.yml")) @@ -241,7 +237,6 @@ def test_outputs_types_standalone_jobs(self): rest_representation.properties.outputs["default"] = dummy_default reconstructed_entity = Job._from_rest_object(rest_representation) - # assert original_entity.outputs["test1"] is None assert rest_representation.properties.outputs["test1"].job_output_type == JobOutputType.URI_FOLDER assert rest_representation.properties.outputs["test1"].mode == OutputDeliveryMode.READ_WRITE_MOUNT From d0c887f096dc89d5fc3c13c8978f662f152e3764 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Fri, 8 Jul 2022 13:57:43 -0700 Subject: [PATCH 16/19] Fixing version number --- sdk/ml/azure-ai-ml/azure/ai/ml/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py index f38216c2a097..0cb735fbef9c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_version.py @@ -2,4 +2,4 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -VERSION = "0.0.139" +VERSION = "0.1.0b5" From b94c87f99fac9a148dc87e907d03b3e8ab5d9b26 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Tue, 12 Jul 2022 10:19:09 -0700 Subject: [PATCH 17/19] Fixes from code review --- sdk/ml/azure-ai-ml/CHANGELOG.md | 2 +- .../azure-ai-ml/azure/ai/ml/.bumpversion.cfg | 3 -- .../_artifacts/upload_process_flowchart.png | Bin 98532 -> 0 bytes .../azure-ai-ml/azure/ai/ml/requirements.txt | 26 ------------------ 4 files changed, 1 insertion(+), 30 deletions(-) delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/upload_process_flowchart.png delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/requirements.txt diff --git a/sdk/ml/azure-ai-ml/CHANGELOG.md b/sdk/ml/azure-ai-ml/CHANGELOG.md index 8d0f22bb5235..13d566a53a0d 100644 --- a/sdk/ml/azure-ai-ml/CHANGELOG.md +++ b/sdk/ml/azure-ai-ml/CHANGELOG.md @@ -23,7 +23,7 @@ - Reintroduced support for symlinks when uploading. - Hard coded registry base URL to eastus region to support preview. -## 0.1.0b4 (unreleased) +## 0.1.0b4 (2022-06-16) ## 0.1.0b3 (2022-05-24) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg b/sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg deleted file mode 100644 index a7f5a7e6285b..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/.bumpversion.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[bumpversion] -current_version = 0.0.139 -commit = True diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/upload_process_flowchart.png b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/upload_process_flowchart.png deleted file mode 100644 index a4d0ea3aa7635c957adc4725c557c6bc89627160..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 98532 zcmeEvc|6qJ-}hKj_GmG(mPBP4+1F9B7ip875VDMYA45X26_sQwid0BsXJoDH`%cKd zXPL2#dCrL6uj|*P-}BtB=eh6uxnKU7%XPlXIp?#!KkswSx4=tkiqsTGC?F6BwX)Lr z%Mi#uLkNU4cpn*PsSUS?1pgs&xU6^%l2*q&4uQ~iILhB}w06B^VPy*86qK`eRkSy< zHNS{3F}31612b{u6gEVJF~Y{i)Y$PaHyYX7BV7J^+bx9EUwgGRGTZL)XNQD_E5=4v zzqe{ym^hmM>_AL-Yj}jal}s(n%(r_MfNfo|Hrl>>bS!W(XT2EBSx6p0h_d*_fCTOvkcg1D@OjYwbCiUkZ9Z`3Z=|u+eTcb@;xByrEs|J7@Tm2&Eb7$nl&z=Lf{6{t8Y-;al zx)lR<(#`L(+si{L}vg&TO@r|KfWhTW$Z>g)hA8BwJDZ-#W==aQ|Jg z|2a;wHOOC_#K73x)Y#I%$i@VCi50@g#K4vimNzrfFCp&VILBW->A#h$2yWT@ma7Pe z|BbI~`^A5XuZZsY$`-)+x4t4m5Y>N`uWSwSS6|uk5J!Xo$R|b)jtKkTlgj_Uo^s~D zovVoNy2_>toPqsYR}uTKbd{|^{^BZIo?>M0XmQI36dnI_^2wQh!Cq(nE}{IVg0D^0 zpMm`%eK{)&Q1bBoRKout*$bW70-9UN{QsrwK@sxL%5$?K`K5dpl<(`mtqFm!LzK^- zz3Qqz)$5jXs5Mq}j`!4^nmgX?ckbP}f9{~^rDg?VChMvhHLh5ZvbSSPg?G)Q(-Vxj z#*ZpnCIlW4{K#=?e+4<$!9Ap;QF}iPXstc%Zij&nLba$%>$x!VZ^kBZCeq5fT8Bo!z@}@5Su%e;?t- zJU}LSh9LZicKjgd>p49!G4!!{#4+-}&uX$Kz|9vthuL_%H8xmN9uUSb!O!lkfoIh@ zy2HTsm%LtKHK#Uww-!w+WbN61?~eUwrQXfFSQN4*=@;$Cm3z<g(UKK+cf^Y`!$S$DG2vNzz^d6W@oY|_mIL=IuFZl_Wb)Oea?aF z6vNZF@5IoeSs#i`LEuSAVXNt**LO|dv_B}hMT z{ovQ`wJ&>M?)@Z(2(7Ys@_mMj2KA{=|3#JCiG818izhAkDg5+AkszU02ltn7o9Y~t z-$7gyhrsnmIhY7a!^Psgr#9uhabSPR7&=Zz;P-iNwrpw#{J8n@h7pFLauP`VYc}`& z9maI<6u8cfnB@W*LDF8`oGp$Em>Ehs=jPv=88AYP1!=+$nGP$V(?%nu(1v%nsrY-X%Yv&T9)ut2Y$?JpxLou=}Sa}p)AIOHvCf0{T-|yT&=|reweJ>2*bFf0=(gJw!Fp;V|or=8+^{q z1D*#_nDuzRiS5SVplxF^@6b;04LIX0xaA)n`3RWvE32S{ox^jJU+b~DItD0mlQ?Lw?wh_zdT z{1zAp@O-xh*{wl#i}c+h{Wn@e_O~K^F}&IlH!+QOlzHV*B>u0i`h)rn>W7fYn&wcgBl=^e^Qw&wc z9-dkGln=9cbW?kf$o^yL*P7e_f~sMWN4NJ)fYDxet;;@KKmB_ zn2y^L!r?_f{An$o+QWs#>V}$_M)<~0{c0wDg#bA~29AIM;#EGx&Q<`C)uIFB%)`wS(YQ4ExZ-x0SJc4Cd3!B{`I za2q3up^QY-Y$w>gjjrq;(+82?<86NINpFpuP;J`Z^#4=PV?2N+>wMj02F8_-e8r=` zNrGKfCa{xTPX)MDA;we>^zQ7Sl}8sqCnnTi3Ydw43*~FMyp3r8lRJ*_qBLZdc06W< zG}d$-{TOT1A6GVjZB#$L2@m8E9Cy%xS7f*rx*{4EIrLSA6LOtnOu{p_Q8%2so zMtKWe)v#?jntp;+@vP3s_waZ4=1HT`?69)Ycx2yG%K`b!vLDxiO7j+y{F>F6R~4LLS0Q?A3u&=;*JrPkG{pMrNZ@1nX#mB4ZiS za&Kuob*_775x44VoDwB?{(y5OePD2@ulT zz_c|o{j>R!4TzzNwDJ2U@pJC3L&Gd`cgUu@nAooJ>)3ha&LH?{%`TebZP9v=Aq0No z&D`Ls3n2#FmTIt;R)ZVs=B}+-mKFL52H>W?bH)2ps~0lr>!LpzBkktap*L=nbGynd0UBbQ~;p8*kImV(;x0rXHC^!K~ju`4x6j{(GrWeQU-N^Z< zc_+z*0Lk5Msc}|K@qlm?k0147HU746h=gpOzhTIXb)+Vqb2S38 zsL`CjT;9Ef!!ZX2qXh?(FUxe~=d7Ic^>Hws7VXKGD&L!z$ig8`<>}6uCRlf6MbupA#f)yxn;b<|D@i%B{F;;U}G6%`Lu|B6N*Y|`j-EGY!%|hzCwF@D?NiAkB z9o=wRD-jr&njfe{7krox405e)kyy#-lG$Sc$d8wuW?CrUAoIuy;>Fa4lujk31+70>RRcxpYscINW4$n_hXO4Is@(Mib#19&L#4~t=tv=|i zUzJy$-RId5Y~|DF*%j>}hqJyYhH}>O`bX<4qS4VWfh3l=t*z7GKSM_!K@J1u;9n)@U-DL4C*hbWoB&3K(KXQE*2`}LXGbl%o(*_*vZAOp=+bVc(~ zpKj-~VUoHpTv@b%#amGbD8X#YJw|5O-cip_^QJ0N*;J_Co9fFzuO!+m&nh9%jJ)0g zPcZh+APOwA!^cn>U>!Kc(q<=ssRjV1{W53Hn~5VO9cYY_>s(bml^d+zS}dVguH31= zkVn@E7oOx%076&ISx+S<{EwpMMs0(fczVXCm{=|z|HsfiaZ zlk15^pKiXqqceKL9^kX;B28*H*?#P!&m+?VWwbIh9?ny=Txu~CIrwH_BQ*vD8k%eK zcDOWX^ezJFaZSu&J@Rm>VmPGoi@FRo8dpDBw(NotNmi!6de673md=-=`a0j(n|Aq_ zgBZ1``Vu;Lh|;;ZqL9g-ckNmgM4=6RFf6wqXO=b$XK(ye$nD%yX_Wg@wC$~XpL~6CXaP*p zMNJMH4R-}eERVE&sV_~0_wUaxT59E@vB|Y|yOEKO^_HSh?}b2WOMr4O|Cn=X%(3gw$2({l|q| zn&RW7>`qCov(?kC+*L}h&(12S93ZJGBY}HKFn6KetqeJT3J38$(%({Uol2oIRG$nM zPRfzc?SoWg@(?Vh=3t%g3yXJ(%9(pxUsB#rGJjE$4Yk-BeJ0%BpwG+N%ykhhDx@Ra z#ijINS@6rIw5mMB-?}8v6GLO~uE*?9aJ3M29C%5yd`OT0g(90YBkTHnvz!c@Z58Hv zfP7@lm(=KKVyH`R&I#T3CeBrsSu!`m;%W2NiiHQ(D@L7}C9!uC1Jv*C-I$KD1^BiS zp5q}KV_r(9ruL&jMD5{sYG^8Lyjfi+f-#q`G5Xk`aKDFxi!0WwURZO#oL~mqffLU7 z%0Q`igUoER;ytdmY#5~{#aWc}yape+dt9p?%c)pX8y;MNz~5@{lR%Yy>5|pFjV_r zqC{>6-Jx=Q{T}Rk@QZH0IPL^26R$eu*}t!0)z*Gp%S>oPqBvUJBTFVLk9 zkeQ9_43{`xLSgVy&Q$P*J^uq&H>i*J_G(sb&wIy0P&$neI4@bF_wEJ}hOuNbz zF5Lp9Reoo2tpg2<*x|n34{ulc<%_6aMkhGibho<-DpOr5=ctui@tMq8&B`QT7g3>} z`TBPM`2%%RNj(D6$(lsLS%^L3R}q$TTo=7x3&WncKsctil$|#g4P3+AXkjPU_yKm} zJWA)vyMDDEQu%4_8?IgiZfQrT#ZZsE>ZVTSb#fxb#IdERv^5T=Pu+d+{mKaecH8w1 zt`JcXW^vC1y4;SBxS`5qhMT#@FFEQV3X>!=g*;7OUyp}Pj;<)&x9Pjl;9jJL`(hko zF*NXUTbjODRD@H#4)kNqfr4=^nC^5dA#N&E!W=q=4wb~qa0W1X{J6(L}8&c z@F<=}eMYgoW|EEL@$4x>a@|rGgGrJNXWG#{Fn_Ob&Y5PZbJTWmYD7V5@YxYERUOMl zjD1tT9ifaFH+bSe3i~j|sJcVh_=d23sJ7Ti?;}K^{i>YW1z;s}-}FggYF3$d0i+X!5r>+T(7d6@-PPma&-sr0am9|rEWYC7*F|b|j{mM>2X$1hKuk8`Kr{5Su zjAzs22Da=x7)g5&qQLb> zqh@I}f1#+!bfZb<{XIsF6`5t?naTcv5tRYWYL=)1F68H!J509TP(SF)CjeROOg1RCF!_-cI1tD7j5X3<3#fSYnrt5PCj zDEX%twH*|7o3Izm_e5y+T@X_T4FchFi2A!EYoS7sURzOhJU@Cqy$OY>4jC81F6BPp(%F;NNwwR|PBGMgGr@m!S$jVSRkyHM8{k(n&pQ4oS{5!X|xKhY;srP()}_|#B{a_$&A(h~y z?dM!G(JuWtCt}dpp7W}!1U^VKl0d_Z~Qmu*7>Q5=gE&#&wZh%+5 zOfZuuUrn4S?}d1Aab2(0QkccW-H|zlI9EX_Ocie}J`ONRwiwzr|CHwXnGyOB2kn01 zh8aPqFPG{Mvf#EH`SQKPE*sU)Y&ZNe^V9g_uw6-*GEnJAUx<@{bq%)i1<$&c%(E;jH>4LcN1q|e|0=`Y(-!pyN|a8U zG4GVl(m*zf#+&|&~E79uhq8*4NE0advr083;GKwIhE zK!Z^ARof3s&+4T%v^y3TyK7Nu2ZB51zUvs_*z^%~jjJg^^>F;L^?6Nxe_noMgOu5q z;e}FOjDD*V?5K?4#*%s*H5=0Py|`i=QLyuZLB9Rjp%?49%>|U|p7!fkvC}ANHs`_| z{w0Zm&I8JI7RY8fPma>qejUN%%*__XZSPAXaoc&Jih4JN%_1Jd1q>-D2!V?cD z4jo|eg-_G(Jx*>|eyM)CzeuDUc1=`qa`wT)O8hKBgm$G|J$cP|?&Fa3>S%z$ZPI&W zp`ggo@2OTsRkbeZ&)iCXDXj?MNR6@EsXWgI<$3G0p626rVeiHnC!DCKB|w6=>=Yy0 z>KV$3QW39}2+~Oq;Mw1=PuJfjU|ZgmRxtb_?atmcXSUmg@*G|^rr&bE-(M)JbxF7K zKP|JixIidP2ynqF&S9V_Z=elTC=p{fGS0#>NS=B$42y_mg>ku&D8%p==iMBNKnPC; zend`ozs=t;sR26GIW`Y(c5jex_%Z&xB0R4MhkFIKsN-SIpu?`^tZ;1{wGT z4z4H@L+uQD028#IL?$*A>l|f(U&|brGk|j(s1GydFPOMvB$tCO$g$(ETH|G%m&n;D zYdz{8E5TBbJqx%j6WjxJ(vM&}?x4Qff@E4K#y*+#)(Y4TdRW?crz9Z^)&t&j=A^w% zvxFq}8a#1XFvJJ(7w?OX_Yjp@D$tCkUF1n?b2g`jq*GgQso5 za*}G<*N;Jp)JC56AV{yHMEnSxDl=32VR)|qs+2rW0oXZ@xyJF-J*h;&YVrN%nD>%0Ps7eWbfXMNOa3^5WsK}!A6<~U!##iCIB@uh z{q0?E6ZKF-Owu>^jor6A9HM=ls#R(^C?rHwhKe}@4XXENwx7Z~%POw}|6bJ!%YCZ$ zDbX489da%lcyx1Z@$FG~7t@8VrTpm0@&~mD{DL1~Ff|d8fx@AS=5Hfl6Yb z<42t*ldldw#d?cw2;uR?N|v%Ex($t-#~4Q0RIP}eE_av=HHg7bk7U#3UKVHyP+ zl@Dq;t49Nza&+7Fkx^1Jbtr+rn{e@mC}?~b;oM|#dQq!x!25! zU}=r`6((9|mszp>GF?u#9o?*hT#_~`Yj6Ffp>-VI0@}!VvnysXEF=8ST632PHTC@2 zwQuU7R94;#{4uRqA^Ay?ZtMc|)Cvfd{UR3xK)!;Z25Mk-O_8+j#)gLV0eb^Tj|`&Y z9cCU?It@jO4hn36Y--B=R4tjq6|_eX7^r7?4*Wuth(Iuti}q90z&qQr)an4K@(@I) z>*{=asp@17O?SK9RGve!pZG$usLbqscr_h-4TfFdwY#+5DQti~{%&=ZGi3770Mr6s zp>0+Z)8ksENJT^d*npcwJVol3DS6l$BCk;xQcbNWZmju@a7mK2(anCzjWnL)CbJr3 zl3kvaY@*UHk4IVehR^+=ob^9Yr6%IfNp5(4)TjP#d4>Qx9R^icuTX%ND0*T1eB+_J zG*+T2%!?jS&!Xjc$tHozB^K{yhn?f48EvB%+igDsR-S1&<#B#rwfwrwzav|RUGHnz zB&uj?Vjv1ub$e$$53&NXe?+d^hUqXI1E>%KOR|GxVb!O-AHFIU6OjR+Wx^a8UHVOX z$c)Wvfvx@{&T_4h=jPnN#3VmWq=C~}*RkZuvSR4~rexHL@{^B7Z9U8J0v4E&ho9O5 zh5JFpCc^4XD>LZZO+Xk{u0K+<5t7i z!A~v*7!--|Fbg&9VUpH&J1drjufC)qrQlIkbECgH9gRIZ6v8g>vTX)BzHrXMT?&Re zf=N4~T^a#aV>=FV?nFD?A)FNprW4 z$%p$~-KyT_*k*Ne^tOn(>Uvt9_k3^!j%*meM=Q&qcZN&XrP?$kM=xtl6}z>ZE;V>H zQTkpef;krx(C&A#nJb%x4NkHNv zsz;_1{XIWSr2zbW7cRYcxypmLuWX*RfWYo`@Qe6cBc^Q@^P9Q6Du3vQBBIcBRTAZ-_eT8(2#uy`i>KP!OgA2TYVX6txx5q%g8J)M7*6d((xz z14W|F0Z29$8?PTnWvF5aYaV3{oRPJZW*3=Po&B$)!P*(g0vp`@3}J=#9+5wG#qS0} z>L&NiA1&lY%WlnOA0AVd-l-TBkx2~yevh^Y-sJ`>TpnCAsw(g(`-H1{W)y~H43El8 zz8l0B&2X4MmT7QQnT&0|mPnvPz_!LfNU&!k)>6^wK5c@i^pbPOa$Ver#2^-{{I~#( z&Yp*PzT{|gH=KR=e)!e&0M2Du=(hAqyZZBTr>gx`dl+hIRMVy4HP0l4W1O{xy{=Vw z*=#M0hlt*%^1Od6&0@7i*=QhKJVagMxYm_JU^d7wTI^7=q<-gI-~O;%{779q9Ut1= zXejeI2}1Y_SieDh3{<4TfP`Ey&@1X9>ajmPZ!V!TWNosLvhGV(GTF?>+%(!g95=JL zV*(xa9x`+w6sntRva()@bC~#U7E*uQiK*n?QrK`2bNa{WROx{Vvwp_qz=Vf=w*;}p zX_G}U-(6;vMkx%xzc8oSiAv!0xh>WD?ukXpo9P^gLdx7BId9pc#Hla*T_0k}f_)$g zM;U}q`rHLz@PDV&1Gcewm}T(=K;3L2MR!yWm%KJ?ezeocsxyG5G$G|#vnA}bP(qUK zI}UP+(y5VkeZcrGp6eCf2{<8uYr0PwU|aXVD7c&f_OHAl3o#D?_1C$IVT~zpFnZpO7KilP&e`Mv++K z(+nB6Unm0R2vz_^6B5b^=Fj)~GS1T`t zzgGgp=M=D)s~MK$w-1*nc@JOR9MgjcFhaElNteDE>u>J`IJyI5IYdzQrOc*Br@S93 z)Ixtge^C~~5got75kuMM_rWx-O=;hGwg*OUDE@eJHs6nfZBqwcTI^(Het`Tr)$&g0 z6@}qT4Z)uhFaf9YK986Q?Nq2HfN2~mc=G(#FY#3z=wL2R$*S5_&#roQz2`p~=yttl*L!xoXV-gn zp}{V4+eL1q`*wjf`*sbq3k`Om!7en|g$BFO;C})%0IRb?4$v*m&g8(>hrv!Wz1Wef zS|nyAdsHmnNdeBmZBtF3$>$5^KR-(OKnpx0 z_=0>Verp`Cs4D#lR(mK6O%?>7{p7@s0!}O-L42oY^1w4+d)Zp)*c+;dQ;AN|+OD|@ z-jMjmKz{o}_Gd50_RzC8qyrKT*X1_3(^rnbE{4}X3AChJ;Im=p0JX^9W%Y1}%?$(= zbZeo4*B9cmN?=FRldqrOfgJ@ix?}*Dw$Y z@DLGSBT$Z)b6Qk(etE+zVRpqN@8TgoO@xiR*}@bYJ204p8NtBuz_KK8T2xkLz`O60_!*Lc?mPV3}zfIqM-E$8IpDZqPXKQ zxPvw8i#+=`Q{ilE!9T^j5d1@xI=~T8M3BI z?x25O{OvheCL$uLZo+v=PQt3Rh4G`EKAc9F{PPfRh`%-1VdxW15Om>zb9I{45iO2i`BV z0JvbkLPQRYB2&otO{u@+jD!1>N*Z{;9S>5xNrNwtB;NSt;wJw272;5k9yc^6z^u<# zWr-D%d^G+Dv-W_|3K3wI$tiXsj^+~XKf)|xFxqDXn04mR1Mi0xh6n!$vwXZsscH!@ zD~dv?s?*8_X7NDQrZ>$Z!vh+(tq`~O}~}>|HRUFt$R0)6L7cB z|HwGLYu&rny=&cnEvmQC;Vvlr*XXc+7lZ$1D!V1mZ^htls{Fk&-3=zY!Q{VlLI1DX z*)8ICF5*i_NjIwcQKK;!XSZ2OkJ*hGJSGG`iMG-Ee6;eh_k0KnTV>ka--|;}=Nq?M z;@kVzSCEt^gRFlK3%|5L`2ZV62{ss&+$G&uo<%RcWjPsJRLwVz*Hb#C&`TO@GguMI zGLD`NZ^EH4q6YYmyKipi{E5}l=4U5LT)GGWIV*x5+l?R9VN;cC;y1=iUhdKsrZav) z4n+nsR7)&%b0XJgH&!Rn&#vlC@AM^G0xXj42DDyQ(Vo|EBH{yw?rFg;tFTS8zEnA; z&`KIyS{{UC=L_(_T42muN*?`GD}e#c~k2 zLPy#T$C5U)uOnOa!%fT@NU>Odr_tlRQ6Myt9;R?YzYxVv=so|By?;Xz2^yM{1HnCg zU-F#Lzs{q30^CH2CWG>rQ}}uG+GKi_*(IJoNp|0bOR~h3q;}JUm3ciMqfEoaQ%fgq zTA|D^n(p{)4C^dwF0Qv1-@T=<5A^xB(A+*IhknbIzwjbMfZ!CMkzWIIE7|9B2E217 zA>v+Rq>08NLXNgT^~{>z6XoBm@}G|T{3u`=sn1GMfp>nbyCZPoWTrtdd6UgAHbxNJ zFFk^VV!8+38VX+|z+|DmvaMRM+bm5}3?H#pI~^z^yzKcKK&igD*~SG7g4)Ob)+gAS zd4WHs}`ZQ)R{$(_Oc?9C)N2yeDBR7^8 z7{hyZAM*M;M$18a3?&QSVy@}D|i^5#!Nt31t4WLy}x z48BI<(?o0>x2J51;ni^J=07gC!*hn~fNHKp+ztcRxjA8NTCf@~jjuf_JKVmh+!LIj zo68@T!B>TR=!uP=5xU`gm%7upX$k5v3>Wo%M0^D-ju=zid3ld?UzXzYmd%-{`(C>8hnb`RXBH5CarqK4Y1c8>O@bX~y*m1u z>#yeiYYZs43f$%C8{3m$AC2{XsKn+}8c16n?yM9;3LviwMhyp}AU>T8P#EEnTkyr( ze=uX%D_~wpLbmrWdY4#H9;_qyaVJfF*B=b*!bjkfDKzn=2iRfN#K_HX@x(XP=1&3x z+0J-E?llll&6|=!f{X>;8eQlAgTNT+KFbghKDDePd8=Go_#Y;6n0?L#wXifbtHp4M zH`?UXbNsLCHVqHFHx%U>XtvLKKGpD<+M7x3U1wf>g?FnrS!E)U=bSIzi5w=vg=N6H z$hUN0cze$xJW0fnVmm*6J_i<@4jWzCw>?boHF`4Hk0B?3l!a2Jw^yxv)=3xPKY{}3 zw`G>33<8jlysFlAl;S-IgLIWX=fC-w(~##_vD~)(p4s1k|IEJc{&wVldebLGwImmt z=%FrEMbtxE;}*tzwZ%Mg+k7oR%$&zMhe@{%m_ zG1LW}@<-ba`5*@vQ{hme9BSM3tM!_(|g1ZFBUA1zOGe#|d5^JFs>KO-@2?D{EiNVHiX^ zx`F04Se1P3W47(YB?l=$hnv3ivxf&6V5GsmiRRlQ5}^PiHEvl*$s`T-y!qnzcKZon z4$m$*Q*E1@2S~43Ty3|vH^Dun03*FZmlVbOk~BDqI`#hcP$=gpi_1itM20e-AV=2 zsk_zR{@pqi@MycW|1L|h%lzzeRK5{c8v8tPnDM>%(I<;vRnuMWf5a4fB=K)lR9LLs z=M+CASn#0-wH7g+19vk|(=~f1BZ8cAFf-Lu+oS!oB;9LK)*T$(7!fI+`YPDe$?#6Q zqs~hB(sTUkfzw_amJCUW>SB+y$%)!$>6L;C@Au!2B{76JK4dbjrsXFBRI8!N8+=oe zX0vdAp|CY-!5Wvjr@RK&NZgQQeqcT}!SLiMp9_F@JVSo>=JsOBt@D6}4kop4YK3`c zz>H{H508lO>L{0(pSVm9n-DMXYF~Jpxzxs+p1gPFMtmLq^B0SF+%j|SY4jnk4-r^_1vt~CusCym*0a;La z^SvCth!MdL&7vLZ@T<{qG=G3L*vWB};wwxAOrHD20IGgvgPZjNe64)MvHBuyGB1DO zvziZ@(rw~ZqPH_wR;m}+1`h9Tx|_wQqX8=c`$c{ARDs_2{X_5hPIwXla_F@^{RaJzvC^I-x3`G( zGB{uNM%|41t4B|g9(jFy8VChTLwTGZlc&IQ6%wOs9LQkzjF41^P(*jtJ}`jQ)Na-j zoX_zBC$-kLR}`|Y0F#j%($8xXr!~BqcEOhks%yUAgO0}3HjB}b9J&;vhn`%y)~ijd za4lnSiP}4~%C2kqM|`H?dVQQdzsfOx(qIcq@oAoP(qMNxpgkqFM13pc$(W(;Wq~`{ z?P#yb>;PrLDuPOz*S1AX9|Vg5b-9{pPIV;E$ETaWk&!VRDc8R%$H;6=ruR<;J@MAK z_RbYa@_fiHCc9{{_D-YDy21egAR?V=3o|!P!aaO|IQ{zzDjub5&Ac)^9Nc$XDE!`0BI6@EYHMG4h!XW{9E#yhRj(PkJ0wQ2*~sAM(@W|7`O`+1 ziC5Vo!ao{puOmo;MV&0cna7r~3lN3juR^&#pZv%&7zQqpDMp`-^wy~FTq=k&Xieob zyn5)gK2WPGbFv-3+^tVzsdeW!Cuv-5VGu=_q}%_&BpJijn9^<3)n)QeO3%+#W(`-g zH|ZeOi?0kxWBNE)JUQ4uaSPx?HQj{CRb@v$M=i(D~Y=e3J(DyLxK3)rCXfPQ_!ocQWADUMj= zr#E$|SrGHA`*L}u9ow&rcP>RYPXa*!4}Iv|Z=91g^$jO64_}H&RLmYVYMsi?uKD6z0U)Re)+BEsOyyjO9Orc$%B7>z`2WrM7;wdL9V>6rA%8I<$P zv;CtD;ihXzLu&dD>uw+~XgkksywdIwqlXP@Lj{miFQ^HDccklf&NQ<_snPhO%&-g9 zPi76?#L}F?>L3bZ~FcxNf2N@)}qA4O^XQ z{bbf6eBR>X0sf@d20V7sQZp~hnxfcb_*m?*O?Bvye6L7~Qqt>3oYSh7hEw{B9IztR3bjHj-j8Ry!O&)2UikOe|R=UM-?PcyBl@)EFFK-Rn*51x3U;rb(#tc zpvGB`r&q($iZH!`$a#Lg8QlW~)$jZ7=gvsuYpN}Jk@a;!PFcOav^wExG7F#87%{!5 zbn^2S^Uf9!$N4D-u^(WElY5ZZ!}C<+Q1Prg^G{yjB|K^Z174lpaCGW2uJ2W0dHr?y zVlUgZJhRhpOxBCbWd^-?XLRzBaWlB{IjD~lkII^5)~2<-S~RE(t=F<#e_&ArcORXG zzhv5&PE%w4%45g2m{A`q?AUr@NGGXg=tlAvY-mAxcRzx8L}O%CXyylXzJe8Ud?|}> zl_?AjLr^8%HbC_(h5b-VK#9Ia(f5VP%vt5HjZ|F;W+)eH@TtbAbH(Y~tiMRfcxM-5 zX5AOCp%|T*KC>OW=P7yCEwI??%ejgq&nbPqhCWVP)Yh{phsop`^_8I$isVNHSf%uT zq%=3{doXbEQ@MLH&=$>NmlDT|yW^PMVq7mduC%hLRBIOEFTJ1o;8Z<19@*X-FCz8P z^!o{oNC!UM$JbHcUuKQ!7li4eUHqUg-R*mmK(?WM)t+4}Y~M}tWI95|t5uv}{EWFa zVB;9{YW{FiKr_xVE+(fwCY*op(`r$U%)AHd^Tiuge>J96i^r~uTa2Gwqt;7TBUaFb560XNSScIq;*~wl>-qhwpkj9O z2C=5JniZS%(o4bXNkRN@-f5STx#PKgLltFM102>z?Pm4!R~V^#L}hI2@j4RZ@)-v? zA;A(Tu6fMa(N)#x>{roVwzCbZ@@^9CkLqI|)4*?Ig^&zVN0Amfd7=y7wd2WjxSg$&~9>#7oUK=LUB&*+~jD%rd7Lh_T;q91sBe2MTZp z9~Y|-%lA(2a@$X@%}?g>VuqNOlXwL=&Oi?;fBZ6mlu68eP*NQKPFXvP}%cEg#)ZAE6{8CY=Ifs z=XXHnqPWrg;dYJEe%kamQp{ob6*_8~Uc+UvQKYMp38cZ*=`y0d$OU5($pm@*`K0pX z4Qx*x4{_oXsFXW4&P*cntfN^A^y5X$?7T?nAp;romE<$kvpMJpFR$97SkmBAs#4zz z#||VXIq+#;6Jt_%Od5Rl3qx|e%Z2=@`+^dgFrH=l%-210=`K%>yyHD(N%c)ZuTBcz z;dAZekwr1{6{7J<6j~C%-03FLfK(}%-pb=v5aZW^drpuodacg7xZFC;ompP?qkf|^ zpt%eq(;O9&Sg=^vrf~}%al`C|?|kovtDVJTL~>G7VK*L2n{hCHYew1YOWamE)_m>& z>}Gb5!N!=+*Y;BLWCrAq7RTj1RUt9MCwc}YmZIaOZsgbD?BIsuGUJ)};X7`E#}0lB z$yEU0e$}zNo{ceTuB}%l?c+mT7aY1r;1D@9fX(gF>O8vV;jyD{YUtZ5l%9%D_arK& zv3j{|jJS-a^~+p8#h7d!;G1?jLa9V-@t*19?$-HamN=OeKiX?*vdn=w`%Xh4dZddr zG+IUgA%ur<(#a7k+zK(Gs?eGu4OZ?UM6pTwN?a!|TrZk6e{K9L+RqOCBLyy=el>jJ zILcuyx{RlXE#x#&VhT2N;1N$z(1{_*FU2!r^!ra}7590+GssD~j7lq*exLEst_2<{ zIKH8Wm&Wa3gNqmC+Uc0J)t{}dKsjnlV44n$;PSp?yldi$IHU;nKm;HEK)kib!drF^ zK*ERKCmFT6R!b)FPouDmvzQ z2|_()K34T`T`Wyni}H6}J4(hlMD3%QU0B@wP5-#O?$Rhj5-ySk1=}c@o@_=SJk67* zIRb|o$|9Znyh2Sc=?xxrYk4I}jem}<&^a!OtCIGMop@S@e|@c5vU#=2ohi=!WYOx^ z=mqCPar2P(+$qnkh!v=KDtM0N;07PME!Cb}8C=rrSAnn2u$=5uo7GFOu1Hq6&g|6C zBZr3FUv9;Hzc{;^*jGHuwsov^wndO63x$BaYjHH+Vrp2I(-*7Aqk{ZrmW8vlf+X4$nw` ziX8F_(HTW1q+klU1zYFdTMlM$v36O6$&bn%H?ne@Z_SHBD#ZF;#I-HOPY&RER%>JH zO`RgmndY^k#oCPz$B6Z>qWTAOxRM^SJ?;?woW92Rb_Sv1qB1(eb#GPre%fh`k$_n@ ze5S@c<1;SS&W=2ra(Y443a59`=X2I(3Y@(7-BBcKsME(3H0t~Em-R7&M+yeUdHue~ zKm6{z>^F5@AKubN6eB+4rsorjygeRw9U)N@)|-DSqnGLxU( z4qmLx`QlKJf>p~;XNq5Svm0uo(OV33H@hM>)<7bGdo@FkOKz^0zSZ~cRs^2`ro(r& za-OjySD_r~z~eh3pIGlw5qUj%NKH#1J*p)YHT5oVLl1?2*^RzGgbAfRA3q8Hk?RW{ zQyDs|g9iFNE6mEXO)WRBM~vQN3Vnrd^ydcE*-!laTpXBwt(N+_(!+amWr*}B411oW zwflvO#qBZ+aDMts@^Yq2ldP^beRB8(KFN%QWtZ8@i;%1}4D?>X+)(mMrNj4VTxRL7 z@NKMjtvNrNC>rDS>6^aKdb<)c4gV1(*!(6!b81mmi1l)6(b{-j^gt2qJ2K8kj$uoE z9+DFAvAMI^TqBPqSS-esw{^pARoA}F7=jAAg9>(fz?*&N43aLKIEGU;7t{c_ole9?AZq*yDpw?;T zM>*ovu0+Opd?Amm=R3$-)^|Ki{krF3rr9fBnkaataye(8{|cx?V=%I3So;oB-*XIG zTg&NFZb!|Ay2T3zx>tB>#*pAfFVsAso8}C?mX#$BJV03uP_$YjvTAV#TxAUgbr zPz8Dl)R?LVMR^OPtRyUNc^$Wv`(UL(JMI(bH!GKMF z$WOtFbhcBYKNyqhO(uP1z&sIfUMScbA5G~{=s1_cpUM}bB@{lvrc)q*TU}j2&W+W3 zj?~LY*)k{I#wEfoF3pmK$d}{RlhKZSh4teKxSXONw?-YF74drt2|sE5p4aijcpR0* z*)*}er`=ouRP};&2jD7>Ux$ozF8KB94O(|U?LyxtkmW$zL%7{ht9@=kU7MnezNWt>1LsgF5E6z@1)cs%%Qpw=~dSVEB=Cbqkol1V>G(kiMx zru?gX^D2!>+Wn;=cc$s&f^y-ZH|csEQR1d8^qKkPHK&Wn4>5QSK62bpRO_;6hEm}= zg)w!i>*HQzT*cy2=8TI;x-;a^GMRwZRLjPe&-0AoO0T(}&7Yu8p2-`Y7k>lg4ms|p zfj*gW_P8!?5k3>uemx!gW8z@HFP}ERgmi*hy7ns;a%M*3c+~9C{12 zI?a>#6|FiAe!<1Q$o@H8!N@ICQ27sjPh7&m_NNK2ZsVe-`VWQ3pYK08A$a>%>ubvh zW37v^LDmwXpv*}~P1`PWzJQ|xPYdlh6uBoJMaKFUzK-a?vuBMFjXfXd3_8_=e}1N~ zp=dRokB@n7(jezTxZ9CWjRzk!3fSqj7qi{;A>lvo-BzaR!*c;Bt0v!Z!p`T;$^!*7h;B{Z>&)67zx9jDf{&oh%Et9V^ko zHP&uuIk!_Sq_F{F2b`G5)qj`BRyRiP)i0%x-&cf!+YQ^8JE>KtpRe|yyycU&OE2GZ z^wco9XE471y~WLhg{2p5iQy;Albe@tI7mk*c`d}__CL{30@kHfp@H3AHhX)py9RsW z-6P`6cf6N=_@k=qhE=ZDH0q|dN@iNpUGzWuL-V;?odI#r2x`p<-9&7j_^`B zfmmP7h;-A_2gJr6^*Zn<+d|1-jc%Q2ms`MFM5PX=d_xWCSIX^f1GK8vL*k}>r2B{; zL7gGOH8Gl&+ew#HBQa%s5^1)GO6zPMok_MAnmtLftM6);o^>HUl8qy>?g?7xy@tRt z-*Pq`b=|k- zovzh9^K%IRB;>NjZP-;#h3132a|c340r-B>s|qFc@KMUW;^*V)(pP+zn0bz_ zRcEQFG?cYIPcus2kA@hrt3nvqYqp;ChT1Mw5Efx{iBdZgxc!+Frr}6X1 zoUVNr^uE#E+i~^lhh3IbQ*-k=6;AwzbzSUm@|?rMyX$xRtHAn|kAFyVC{pCg7|N+5 zD13w3B)}d{EuXGb4&Sse3Ba_z4aRV&e+w&raK1_v=bAjG!zTJxz!bLhIamK2aT`aX zOC`RTuj6$J(akzAKwn{JOa0vxvMLUfq^1B2sCCt>>pdqy+aFB-JYxhHuwX#T|2lEB z4dhuj%Xg*|&$#Wct(lGyw?L5aYGNs*4cBu-%DOBd1z4HflA{EDVhM~wKccW)_5R3U z1lKLjL&aqJSglrB0iQAEspiz8q>usg_7X(8vIWfZkf_TgbHndGKfA9~#bISUiP0Gwkefc>0BeXywBmV)Z!4xAT$8(`vm+*{Rl%XI zM26n7@8Jc7`42w_T+vMY)+1=+#xrIW#Vc9ZCouSF>ITL+HkwOxR>lP8)S7`%yR1>Y`VF437~h)^GS_GC0;%l)k}@y^vMrcwY!8^cb@diEw5GKi#GMKPk9=9 ze$iHn-tyZ_#LG@t*Pc7p23{@jh8~HwM4C#<&F4)mj9Z-ZE}!@Sz!xfH;t3Eo=LX3K-N(_5A~|Ya3zZFhA@Q9dpZ46u zvUu(x8r|09B9@3KRi7oS*OW1`kQI-RC;3=XciNkRm12-hG>|p^=8^irI})f;H~S)T zKV*!>Du;*EIk|vvNyOG6O5IKFbfquVqJmP}5^g+fy3&m1aWT4e{uagI2Jk_3!rz^^ zOX?*&BU=YQV7KDl;ffavIU@Rr%pG}OIe8{#5VhBL@^Dz4P}@@~ir-a11v`U|VQzC) zF1Y45;z+Yhj{BrAyfi=jCV!iYW;Pk;gHBUdpE7dsPB{^Mod zn-J(M7Ij-jl;8Lr#VAh>$6U5U_kNB}6Z^a{_a)N8=P9Y)z(;e!;pFYN`yK2dOr+VLvp`PsuO@9VqUrd>_$ttExB zUPxYWcThC%!}UAlxD4I)Pr3EOz_IHpGr{EnfZkGx5Lelnjvsn=yPX>e6TnJFIOqQ^ ziziRr*G|Oco7i%0JD4n>x6Tw6)VXv>`KH~#npgkMbkY;m6gLx#l%$;@i)*inAaXJ# zEBfr5(XvhxZ%*UxLL-wL%K|V!H!fLQ#ESLs1omw-_fBaMV~w)zkYcT`Uclk zD+N|24Y%NYa2T#pX`o9)3#>XL$3Xwe+0@7-O|R^R|t$;}`ff+*Yv z3*yG4TMOi(SJjeojscUn|9P2OlD8wpYqpIW9pR|Kdb31j@x|*2f1%rH3C=EH;i&0^ zDmR`!clRSK(iW3`NiP+qYb|L0-I3}J>FB~tV*on)zh?^__iF4(jsf9B$9;GAIv_)fVnG~oVZs`8VVq`DO_u{W89ys5*Unm zuj39^qa;oG=_$AOeSfL6-E+Kq9{s*NK`B&r)yQ)tQiA>92#|*04?m6m;`U>&vJ6es z@i`vnWyn95ud~%XXVhe}bg81YXmeO@X|9LXnMQBbGN ze(;<;ed?F*s1gpCcR#SlH$TMI*Mj32;G}rORVEHaPUlC2WD)3DByL%;!;-}=W^fI< zb1r`DxSO|)c!|~fZDNaSF{=^HvpH3f7S_IcRHngcOjDIHJ4<}!^Lm@hlPVN9N~vmA z0-=o0^2on)z9Yd46Nc-Dw0R^>@?Y4cHz8g7WXE>rczVmO>v2hLhQ)TZ#bU~BSrRgF zQ2h7jVGh_8?sM5qj-**G-zZd4zIx%>3#nE*j)@AD9N!AoUSo}mxhm3>9M!&p7A*2$8c&$R4p## zGT~j}`p=a67$4jaT2&itjvP+!kmqx_qN=!BFSkCPUozb9LlWi}iq$BSe!&jI_nK04 z<|rE{Ry{*4cLXzilqemfI#z*NAaJ`7+Bj683XIM@T)`Qrvkgc~888>VU?%vFTe)Lg zk7`57w<`Np?c4GJE3v|+ntR3-0@*KBbi6q-{>`;5G;R?S7Zjq-ZVELRaP5uv=gjCr zG!{X$2W<&#r^1f{)1eMCE%hZge3^ep!)$nS3&3;x69;gZ{ajQ?mYeEnvvryW zqq_it$?doAa$s>c;&`Oaf@udgt4C~+vz%CIj6J^fDJCgGvKViEt zCbs}oKkq!q33t-BIUM6YjikEDndJc0DX?AOg6Eun7DQhUB6#Ya-t+Zk-9)xR3tlWW z($8J7{L!O4_(zg^>`WgUi|S4;_^J52DPCb|cSKoft>9^i^+Dr!+v^yo>UHfW%`r`E2wp1(d%A@@jh2%}+Y?@leViol7{zGC!fg<-4T zYrS)*hKsuMLJHr9Aj))M;{mj~XRvG&JMe*|oVt&{q)bMAB&P441enZ(aI9~;i?`yN zkXg*8hH&UqVr=2wR%4~4tdh>v|(^; z++?AA{slFEY0AwDyXgPn9Ks1ojm9hU2uMv!kVb|JDX%M~Z|?#oSwm_R8|m!e8F9oY zK>v6)v2{LPLJ8i~@dT%Ax*8PJ0n#0!gquFx4fc=jr8IHHqZP}Qz3-gL+$WPX6qK&B zqkU$N=Z&>SdrH&ng1RjR>Y1%Z-Ek78ZQ4H7auhrsA_uuT5B>l}G~90bUOnWq03r+n z0Jx7u|I=4PAmb`u(UYJxY-(~RjG-%K%db6viecMQ^S<}{4ttKzyR^t5Uyto=UFb+O zqB=0(+u4rd=3@%Iw}9tY9QAo(0T!T(?awhUT7czVdbnXvhD}ib59ySVkdxJ| ze0HW3k2-l6X(PT8E8T=wJdh6XggpE3MZ1~C?;xhUF#Fv6jo9Pt6^n?R2XV>4CF=n2 zta6>R)Nq1E5+wQ&8Rw%k{^_E6i5&WfIB<+!lFycQGI>`78(?EA|7_8i-IJIdFj8{t zWTXDd)oEKp5LH`EGn`C)J>;B&>}0QkYD&$0-6lWvzdr>Q^`s6s+Y#k3-w)HD=W_`!Z4bZ9e{R)#PFA<6 zEua%~=QRDv)=q#v`)e7=H40koZ35yoZAjX~yyoT#iw&LCFk%@mIEYN444K~Q(1x*U zwAaKr8ljqG-|@2*IA(Vy3KoGB9_bf0VIw+jYb&8VF%rzTVJCFjr&8~7dp#mF>&nLA z2GWd@nrS-{X$QP3dm=CYvG>n%Z0KA@-3p2)Vr&o`Z{6FA!hOYTv+hl8M+&^?RY25% z-SZjhP8(k-r!OYU?!IDbaz6T=>7@Y@UY3M!RV7chE|D~Ala&~5YAp&4s@3P208xS< zn5V7!6it!Q&MSQFm_Hr@YF zgd2@i_&KsqIMZb=0}4LMbjtW8j^MH(g4kC!Wl= zwaZpOvWZNI_s>b=eM}srCq1J+6VYuVGR`XPdv-}S186c+!|i5V(*59!Q)aczEQ@rN zbIoT-&z}^F0NhfW{K=uTzUE0Rh#(KAy&~9*@q1QlxZ<8C*&^TN<4hH!a`awj6K0;> zN1W#IZoJpGhN2K(&Fu42JW@O!RSN71wh!yDkU7lr%z?U18ym#xya$mTWbfVv5ZTGI z*P1vN%ORJ^6%j9%4UG8=%5_$Uk3Jd1=2P1miaFIO$wFaw)oJO07r@@-tz*Kf=5C%H zrj9qIh+lM0h8lPE%i>?Nb<{<}e%g(sg3v~;m~$Nx;qx$is2r1L2p?0|jfRSnN8;5} zV$QfXfJh@Tz~3p1sF?0iK8mMJ;8j1p zz$#}0t4E{VSq^Tj*NQ9G>xbT-3_HV}&``y0i%!fho3Y`#B!gFiFAUmF`g}SL;t~Od z-fmMb6cZAYg^1SPrE=EKn&qQNq2zQ)fnoVFTO7e zJlJBd;}%sZfarIurn-7(%nq)2d}04|1U9l0nYc|EbgicbGJu5}xbJOI}XJ zx5ybwN9Sr_A7y_7DO?ZNMoa9K7u}p8FSb8bhoLJx66wxfd+GmBjvOsLS^4qGoS%v2 zn9`M7mFuBxT!bUf+|xZkYRemw0qkcLu0;(@hk9a|8*=4I=ck#GYgfKgK$cf*kV2Q( z4xtlAQXvR72Mw8>(-dvxb$;2Gcg~dYd0WnqhxO`ln3BA)Hj6GUEZ7=0zjmU=p@`Tf z^b}X;N7Fbt1?6_hPGb?E6K9nvHb=(RU))G=mzTN6|Dcnnc4q8gfiPA?fraD5D4K>O zR_oi2y?5L5v(5}Eq4}V1&naL*rd-D>d?_RwJ%NY zCh%YZr{8x^QIWzqe{^zFJTzn9>)cUCFZkjl3Orh;b|lQQ_}EpY=nL&~)e zZ;CiT?XsP$7S?)m1|#g(@#Ryb;o5zlntcz znt&~$?!^nN8UN670WPunkd-Y2EoyBv#|=Dc@R+w=k|~t%;!4*~cPM??AZWvx(WrN= zU9$a$UXMjwcmY&}6sjMZOQ5YEZ^7jW?V0!+p?ACbgZ#fzGpbs*Ws!F_E`AK*=+g6Q z>s;m*j)(NWh_~~MKEoyoeAfcKT7<%y+g5vF2e9>?g(nyL*Zof?44%1@5YU% z-eXzjYK#4Od^Dy;4ZJ_lFU-X1Jkx=>!yCY_UPSI_5`WQP|v z|M??1w!Wws%iMYfk;7qfD5#<3qH7lj#hBd~3S2Vgw^aKeIY8zx^4 zb_hvi#p69b43p|{x|HtI{MK*KrY0~DHb}+{i;19Z_2N&w`C%hhYBM|)2PZbm(4TCS zoaPL+{Sl9VUPP8yL+{k)N?-|cpF1k+6&aK%lH9N$=P{7eG~?Z4+r-SHQZ#%l+?-{Y zkj4<|=7LUB!xSbPyYW}eech^P=rdq2ijsOGuFB*UFd9OEI@+{9UR zjhA;6PFPl?AYlkryf*~3t~Z5Gl`}tIoblH8z#vF~q#C_bvkiRo7_c+&_#EIB5(>p- zQc-7V(#<+j?_1MMWv~J?S9mp=zo=BDOty}ZG`F8!xcVi(T+lS{qR(VotGaU3yrKJf z@k6N8wjXE423<@g_7H7~0-R?GV7SqPiGK5@Zv{=;n;np|cblu&ExAd`!8KPvR(8B& z%5d$pdBF*Qx8dDYv6wMEoqEKdo$KNehd)I~SbQ>NEk?=6>%N4g9F0@Odue<8-SUlcWZ2Gf%*J*Ak?tF{;QV%Ec zTjj02^T*3pMCP@pRt+oo53(5fw{Ra|!Hx+nu7uhT@XIbvE;U+4L%92@R_Za*EE8iC zuW0kz+{_;7cd<-Q1v+7BNnwXnD>)lpykXoY3f$l6Gs}+ksEW$jg~Z^pUK@e#ww-IkW#sVMSdA;7X?b-mc5brBOba)^mvOiMv9Y>()i7dAE#6$Qm22d7$ocEWr@aTTKjZ9a#z0Tanv!OuR<9-A3=rDI5>W`7zE=Tkok7kOQVx;W>CclfA(D z#%oJ_xK@qzIsUhgWkT5J%>i07ZKN(sUUQa0d_!6tM;xkjjPwn(d87&*<=vx>=YFW){s97T}=Wk_M(IA z;z?O>*r`>!Oiafz^0f8I()AgZ7MY84$YS*N;LC39HoF^p_;x7inz~u=0i4q1O2zMYP z`YP&amh^PqtND|TKT>&yq#Y*1G!MO`|K8aueFjP0duL1nyT>R%h^tSRa?PPHp)9qx z#;W82t-CybT@muyDNwdwbh>Zxr?jj{7iAKiJFC8K_kE8*<)Dl==7My~1F=S4GIwwA z<=!z^O*YTs2z6gWR7>nT;u_pHH zu{Ng@Zg_>!z~mJ7JsT>Tdly#+=5*Fu#-L|i1q5rz%zMAuD-ng z>88b_53*P9h`3x{N^Oqa)8rZU3G7JfKPhF1oq3?LNAs$}4SC*+Cn~WWO(|i9mUc1? z&Taiuag{g&uZ6YdHEB#lWv}-n<_&(hY?7Qk9TFddlBw;X< zlFBi~lgcn&e&0}mLCx7dOx4eMsszUz0C0e}^UCSdjH2))Ym%4Q zlFng=D?*i{b~dUB+W9!IG~#wqc+Llp!JPPLIEw^4$4#I3&&!pNg7wORr38-z-@K*a z1qq#;)rMnd)IUu_GhE9P7ur?~hkZ<>2&FZ}Ug#ePi2BG{<5cz_)>fg%1LytLTjf5k^8W3cGcp}PsU-dRFPD*8!tfmR z{u#_@Ru$B$UtgFwkaI|%ShYYfmQVG1e20CX0Yy6~I~qem2Bkq?-hsr=Uo)OX zH2Dm}&fR@*xSHd>CS2tJ!v$HA#qrNUhv?{oPegtB5;(me1h4$;M&Wkxds1TRc2#O+ zZv4^^^9qxh-~-r2b-Lv1#${7$glKyeb-Bf%;#MT$#S~(+%cb40O=Tx%DXKUJWAbVl zH5B>yK|fDc^AzqFMx8>y_p16R4H8(UdoP_M;na(74sT1zS+7|^5A$3(-zlSw$Jgpl#lM1^9_<&^8p~Dz0%j_AromKE@QCyIlDWhZ4UVLh~z0X-0+}F+wo`7s`*Bf|b z9MWMl`^vCWL1XlhYCXxb-ec`H9YVdR=!LTJQS6U#Ig@xG6i3J|*A8?;YC$Wk`^S^c zg>4iM7#wwXx~k(xF$S5qfFQPGo3+Z%`0qOxnjyD{wd}AJzQWJ7+*nh>W%Jel0c3c| zj4*~WDriW)UA}DuLav|ftRl|KV6Zz74h7=fE29C)`_%Xn^De|M4(QrXU&qH9C6Z9O zS>#DFdFNd0FwrY|ZjpnWUHaai`#K8cQ*?vf2tNOuh+DnT>62CaEsZA@ejOV*oWo-q z#nQG@!YqW@#a~;(#2Er7;{4CR+~DF>9XSha6iN1k2>4po`W_}TBhUV|@Fo|S&5-vz zGF(paTxhPzn%7jE>1BRbQ`ZhI;nt-Kr+o9ANWy2^S5^`Rd4_*NMl~Ba->OM1_hDz8 z)o&qn3$Y~hJ~+?XXgGRucHuT2wKo095fZ;(%Lh-p5J!Hio7&kbWzRCTfPk1s3RmYX zbxzGbPhBaJUZ4A-Zd2vfR@FMtS>?>LM93m%x*^bz*}Ztx$A;%jqr0>Eh0P#69yxE< zxjKDBd+;^|2vqs33v5S-3_0Lik-DVw)N>enxyE>rE_t2U*H-qk7K_81>Y<#(dch`Mn zHQ0#QiA1%t3{zHTdZ)71%aQYV`1gPC4sDH9lNVsn2_toI^3bpgGCWikGy6AG1%s zbkCn{6d`qHTJ<#-zT`l71Aa=$jr8629oN$UPcs!f?Vgt;?Aty}IObfBQ?z^l-^^k7 zdUAw(GPD2`BHoN?WPyoa*l~Xu1cj@=GRT^794X9eVlunf^E08-m2QA7^Kmp~4$U5& z%7wP%q5PcC6?9f&;ArX!l`AG`u5-Vor$#>u`9{del?kz8ze1sAVc>`?(^5Y~=g?(#DDB%))u%={ec??O@SxCa1 zN_V<^HA#_C^&;a26F`CPtDsRv@$3BHe*vdJqqmR(Id|!Umpt#>QP?N1Jaf~l2+-2I zEN1f%b}=<|6OQ-0!muE>botmb+Lk2{EUaJStHzckJ{-#_v;C2wh8Us;!A~gZx`UD; z4@56&aZkRt-wKbOFg&TGVrt0cb6uK{XN*7&>I;fd)+_+wV(LB02ZFmks#KGauQGW^ zk>|DSPojg`POf|yYxUP_+;01KzB4z}RTu?qxIccL(xTrSoNC%lKAVQKsQ1KvPV2Cd z^UxVs8D~Fyp&Fi%;MLBL?r3UlJAW5-bqIz#wCu^SJ!y9-C160(l~&lc@!YlcD=H2O23)-V zhB7H=eX}#?0W3nxOU#nVY<;MkG-c5flflZXkpPry$usE$jm!UlyS&57d%yT+pA0WE zVxiw@CX*Yr-f@^L3>5b@d;hK)k>wgu`rrdc^%hTa`raKHqSIol)>8?OT61W%u)aw8 zgj19(V!seYSKnrggb7=}EEAw-m|!ZUoXz1ApiPMzhxb=zEwbMHRWxUR8 zrv1H^QSCGRG=rnDry&@K-HQq{W_seurp@94wQP?l2Xbr#Xji*O2!iXrogiJH4*7=W z-P5S1zxPB{N z(@dKio!f_Cdh8ohn%aRSxBxu(Bii8i4^J35efOPl@uLTdx$S2~__9P*>cy;xsAftEncUO{=>8^*Asf{`q1wCe2d~ zNT=-uT1A7WKrxRdsdgXc_tmtsRSo{d6j7aOlK-a7qIMvf3Z`Mq+iY4*W?oUfYWiuj zYwR4rR*KQJ8PbTRXB>iUfB5z_?QcfrU1kS|I8$h7n!&+GbORQ>O>YY5Zc0{a(5h4O z`UAD*Y^r|23c~0he?Pff+8!5*Pc$glcWTl*9ikb1w$)$-5^0=VsT6NZRUs3tWC zDw)vkRO8KkBfse1zfWGsgod;YGVGHdT!1a~ms7q9*?Qrbp+wq4Bby$N{4)JN9x``mccyI; z8zBN2L*rSWiacG-01d1A)ol&q zcG2g@;J>SB(VF5d1R&Wojz59+!^T*4gSO9pIMDd7Mz-$~RF^4chVlCO}jB~bF32pK>ZU2{@rub30~j4gKke)|D;LY3heD7#k*X7 zzeaoT0sJBaddU1zi(l&4cb3=JT<1Sz9%Toj@TQNE-ST!Y=uvEUa?pQX9|O>L?R>M-?RQw;NRp{-vQi>`eF}mZ=?+lJfs65C+9zZ3of+$^Jj4FW)9$@saOuU zg!Ok5;NcjcC-e7BbhMA;#{vB>Md&)WyzQq6dc0}wa^PQ0`^p=EF_ubG*I>N+1GT>2v~gHlm*?{lVAGFO-4)>+LST zx>Z8w0ribXA8dvt|DegQ3UJYp6xe5j~vh1 z75?+OBM{sePHkcfof=eV`wxKr_h8KU!BCXVCC+Yi>Buc~%1Z(||A;pHCf6?om>j`p$ah;fs{umDH?8)Hb8YDo z3`pJ-aIwn5ME$qSHXbekdh`x$X#%LaUp4K{~GhZ#+FD?;T`leDSD|r-xX+n$*&XQ477MBKdtIdwa ztv#u9!>r(8OJOV~66!+3|Jr~j0fgYjd>nksMw7al%CW6Z^U1o_Ekp=|JAp`T9*;S$ zZ{s-kIS5(a$+K_^O$dekSW*`M7x+^MFrjvXhL-4mVM6I@BgvF;t>{!F8U=qbuZDh6 zbv4*y8^`Qpc10C4$cGZxdQG&cCRaWa4eI^{8rcD;M)1FoAtM7)OZ1MCgW(&cMBbYy zy85n>I`y4oh&m$OOzh28Ji7TbKuptROqR1B3+8A-)JcXZQY1cgo z3i?6U#J6A|uXF$=e6Hm4zch!QA&5tW(8?xnLe08-g;~L;3H4iRGoT_+OF=IC4;iWU=Fv$kry|pvjqqoc@0|`I4!52{d^s zoVOnSs;-dYpnOvuaqnT|8{yyJ6!k#`06MwC|6xfaQ#0XUa9HX*n*HQe_?|7F)KU}5 zk#gNz5HPBuT-t^8*$$hLJU18KgWD+fzn(B;@KhJFWBwZh%ZIcUck8K$(NRzLqzLr4kq$TGZ-4@3EAApwH!lFwXbS$k{ zcy9SXG!dZO-uA=4)4gd3-L8|OGt^H`sGtsQ1>HS>3N3ja`+ewzMtkl-zp#x_DWem% zqTRfYfjFNEf21C>+1k5?S;PkE)xz%5TmIKY&C_;L(01gKt~@`HC7Q$|-68 zkNztkikao-NPy;bB^=p`QComf)lP>C{KwKJGhw}VKTk6aK7+Zor4x+;z^oRexWrag z0o_07XQKZ3CzjbZU@IS+PXYC13uph2`p&S%EK-N1sXN$03odTT_b>=P<;fdG)6(L9 zD`SL!Xy=Bebm2Y*ZRut=2tl0914{zz@|Io5eFlBmCWfIYQ;?Ndm%F_XKn@;u4iGOzj&Qp0^q3X)v z+w%ikI?>1jl+?9|i~lLc9|x(+YmxMnd;KpqUfS)well8mxkz-E;H_#X(U;j8LkiC=5z||Y+b7v`QSqo&=?y)r)?-M)Ko~{q zX9{O&ET`%$h`Q|TpoQrCPHb0_dnL?p`@r_egWzMf*1-+Et`6Az_~yf6`^=JrSvp4nMQO<30o40jMrrP2d&{1<^##TcNaQ z#k*+v!fj}i@MQ~zXH0#V!#(>asMN}{F|^~#88}o0mBaruw$)>Rb0WiiyEb`O2|m8h zb!xwUHB`b0-?cF)O~#~oz_+CAi@y(dZSYMLkp4{kh9vdN?@y_{o_TBWx_fsY=I*ca zK7NlkKizGjqC`9YvoV-c;L|X8%EsZIbOqi4KNq2e$@WxVl6|a?ge{e6dfD=Yq-?r+ zr{19UKh1>3OW$qi2L|^~PUh6MMk<;q(-V!EH$yi+*aM1DhpN9KZU+D9pZEy?9_jYq4X3K7-xtOt zu3PM_AFqHZ*0VV!&#G+5B{`Sz1LvQbeS;e3`gJBw{vW@4fat7lwQ6YZ(M{ZPZIeAq zIR{Su88v^W1lXff-2GvL&5x+xJ#!Wat&U>Al^@;=1AZ0RZ-+SN8wPvHv1R?x*?sv{ zc7E2Gjg{*e^9|`&!f{$2z~W?XPEw7S4YuAiXTG%|w@t$bKq3LK4i`DkP!-GXD?|If z4<`*X;(bZk;+t^A#hdh+I<+9MyyuD`o7+!_aSkH1 ze}fgTOgDIQl5$jRi=p^*@DZ=Em7(fJUHIMX0dBm;m(!N0%hfijfaKqa`rV^WnlFc6 zIZ^+nEvWmc9_WLqu8qk=g_Zv*zyD7?`~IroufF;@y~PG&9Zn2^hPs){n8&A5KtZ z5%Mt04ao49*8C4;SPbk@k^E+l&`sW~+tvnb4E+~#((hFJxkTED^HYa^)`&VeM&lY! z0w-|q=G6GIePb%`Hk5Dt(*u^*29`WMUQ2>X;f=#U7F_)X^T32}K0A8fPEU^KaP!Y66ci$xRJO70`Ztsx1DoEn)*Q{G~PjGa2Tl zfXT?XnRf9%n~WfO{A+3)JhNdmH2WCtY{_c*bK}dKPc@8Rdmius8UE6mzXtWMLH)nH zDE>95)TQvRL8WK-Yf!0<>tBQV*F*j5q0-#)t5>S#h|4{|G^h`4z4=*|QckvUJt zr34=wQku)D&ky~fGxT=!`jX^*aH(ZK&cH<8>;e__le*2D4=~Xm3n_(O_yug=b@eo9 z2QN{iGqDE!kq;J^;Paq&=EFX4rIAZ**v>RIH5sXRH=HASI^I8}5;vUD`=J+eL`~M& z#uM*g8i7MPVD|RDfxy}`ctx`Bjs&)$UfEYnW)BlPA6)fRYfeNYm?=X&HelD+?7^d3 zVAmi3o*GD%^?87*Ov{I=IVMZU$Tr!^bI3<4@#xHDlgN-OxbLbWvTgM9Y@sr-RalAM z4~pFGPMxy}o;{9eW5f#KHg9r$qP~x9oL8%)g7mA_`<8+Xmdt&a5TUnIo^5h?iy5*I zhTMVzWyY+zvG1|AqAh0K>6pOe?XBmTPEH`y0r+jnR zW3pT{*lFT5--qKr;lpyToCQK2?L;ZDAStU+jES(tkLWCnq(j{8LoSh3Rg96``4K=k z$HNX)YWv9%YSTp4!$ZA)!cz_}7JQzEC{KTx6L;a6L4hiTof4G!sO~h47V<(OVksSn z#gtSn@*z1?+Iz5s+3cZmVFNQf?e&mX!%1*SB`S{FYx*NL3Lh6MT)J8aWxtQ@X5gqf zih{u89$v(73v9qQVujNGTT!>?ikaPi?Uf&dvSIBQ2fN*`yYkY1xeWbhX7)sBLWGK72MPe9`{4{v4x zxqtWpn0x)iInR#_JS0paL2n>zILhsV9C_7W1Yz-}t`8%ug+TSS$q>B>pxUWAWSB6L z>R_~{l*=Uz&*8g`ev_7d<+@dcW+omdjl`Fyw^~<^#IQq05uCe)dyI%wdTOOIAF{=9MWdlhxc_O z`K^BT=Y9oUkuZriah++e;nP?>G`T3`tyCAy4&YjM4aUQKKl9{B1t;AqOWTb3Qx+Dt z=?Y#5Fh>{;EyWmUuzy%xr<8e$KP+2H;_{lmm2(CC)n)XUx;kH+lgoeu#phB5f(V)G zIkupTS|iLKtIGbs{IG(0duUpP=Tz!`1`blsSAtA9UHu#sNWoCm%L+vz<+fLNrEi8Y zzK3|pZDOgvJYf|OvoYLN0PybSq{1~@?^2oO94~y*-Yv!Y?gmw^a@nMrde3I~v-6DX z4#Mq~e^prvl&{}!kKVKDwuHxvs2&w-{;PY4$fam4yttkzx!hLb%GwyU$lvf{6?5kz zAD3k3sRY!D1z|?c$APt8kLWx-x^r?sAc~d?kcu}Z*LN8^45%m4)vG+yCZC`WUOOh} zD~2$}t5LP748^q(ZSZ==u_fRiNsTrX=ABemrnBToXDRVtbH$hxjuz6@TOq)`-3TFR z@4FL5=IvDz&4qDF0IH8ZnfCe?!)(Iiykcx7b;zNtvpSq>R-M7eeB_ zeQx1(%zU6G1_>HHn9zhvx?$o1-LeO21l=mdoMGJs~>5&tdDBzg=t&(qs~fN3G}){MBI> zDzRhgcbR3#wI@5vDV+)*iBh>9&;-~*9abIY;X0FDxzy=%=$n_E`)#Swh~!H~cj%H6 z^XYEN<;ajb*s5cA+So*Q^b3u*Y=r2V^iJ1r39>u`nt$%$3`6QEU$H&iw1H~Sjjv(f z$RWV(jnfyxt+^h3hoXDsu&FLsNB_MJ3FlM}0UST;<@U)#ePjF_UfyTk{GPqTj>Vpd zq=ggO4P}$+CZ3X{WfYVTxyZwkX_8Bnm?lLd#JHT-N1yB{n=SV5U?%U8(r!<rFE#e6o& zIe*bNkGcR(KT%hmDFan3JHkULmt8~s7ohM4B_7wg_vmI&wl*!^$6E@N$=@M9@rEx^ z88VF1fKL%w(iC|-&&w=#Wfu}Bkm0s&%W^+*w{^5jv#8rt;3Clx7a*x5mLW*?BAmce zidRJU%wm)wsKrN~xR1se%aSYI(w!a-mt9w$JW=r9Ir+BwjDx{b6yeczJZY9^l_Zh& zWM99W%wpdKbgXIFX;+@tF1Ce+EL01E9q3KG!bI=AAX4?ljkNmgO=>Cm#i2o&BzF=F zW&cY58dK2E!OZN15-$v#^d3u!g~o70M_zP`_mQ4s4k&WPsmWT}NKLV;4Po0n@HR^Y zi4_m*QS%k+_$+GNYnIRNIK$u-M$7pW_N5zOG95=6&Ux{WN7>LAs^wqLX4l>6iHvQ# zSD_x?Pk4J-b02qN@ZZ6g9jSwwn)TQue3 z6TEP1w}lGGM47#*gLpTu)W}3yeNk1iO$iI^hrha76>e-;nnxdbEGHOJZ)@_Vwe05y zm!Iaba(4e1Xt-wVSP_5Ymh~Rb<1>sP7x5*QU8tlQP2xfc{zD;f56=5wW~A|b%NR&? z6}oSnCDHrYSN@z~Ni<4OZnox}Pu3+vIi&6^Cpqv*2U8k%JUnMmaOu7#rVKxO2)^8UzSWN!U`J)j?kw%RiCHGM zFmT|97H<)v+l-_=9#r3zL!OL5QElz0B6r2aEP>N3yHW!(;?cv^?@c>B!!HzJ-RvEGc8M#_QB`Ki_gE& zk6Oza_l?gjn?^#>$SJjbqRKFM^$F4?W;2)fK{?P-FRv?3-dR(14su>jUymO5&AWwI zvCSmPqu=HrqwH6E;$sK0D$)%7`4ZezJR7`3Zuw^tbHS<`B{v`Br3yklb+=2Mm_eCv zMoXc(bgD^q($z0ftia8pOoeiZuZp|~b*;LU77wTv6+vAME!i_aO|TygsY=p#ihh$ub)MJLGxsJ?xN*f7f??=F+>jib>0L{ zK-rKkv_C6`O>E0i=8K00)73Y_ttv#RNfgP!uE|@DYd>Xw#47`3CUTCb9jk03rhyXi zEJOsh1l5mw(jS>uj+%k>I#kKVyA#A>Et=V?rxUCnO~|3Zr9Q1lLvB5_V}-=5S_pY^ z)<&v&HsQwNe9mxG_S(RFWrO|^uPLUSTbQ*r_LS@U2A4~Y_xAR@bS0H6sGwGRDKY4g z0xuZ;n4<%9%9an|r$5#!{o`ls;p)<#FVoU;d=~3fd++0qeL^}oK-VzYLQ=kAH4gJq zb5N~SX4u+178~BSLTH)=cX-{x5VCsHbQ9K>F_rAq(+ZafLCIXVLide4ZehZ@@2)q9 z)qKo>E4LCZS*Ss_QBWCt2xG)jCrR2B+zoKtmFM2n`jLD}!fD?{U};lDNG%ZS-KTQm zA$aX)B~?=uT|TajcjY=6I25^krhg=u_3+w4NG-Tpp&g>|pEC#_GU1!nfHeu7(DQx8 zcH;it&S42q!+{z0zqR+BVNGsLyTAtBVnI5JfC@^JCMun)D6a^Ke_a1^mKq(1Dq)DQo8A?JgsoxWDzX#8Ez90MiJJ*q4xdQ9qS+i#DnYm}J zSrGeyeh6cb!@d5pAw=PHC$DSKKzu6>E=vvgmcPTr9CS5lj#$}{Bl(4E zc&=!Z>=`2}HX-Kq-vNYCb)C=j>ZR?XVkvWH)xL!+G_|YT-(An;qCgpoQK1#jH?hVN zYo6D0p&Jao6f+11KAWm;=J^lF9!S$Xl&#?pr#yDwQkh*(y!Q`w{FvD&Z2@y*@7 z-lv6P(KPa!;#o*^nN+H`{nKf3*7S?rIyJ^LgPJ@}-z?iR&vRWX3589scpiD}g47zV0%nPy@;sSclbtzE_WVW%bV!ckT1i z9e?M!%TY(*R%Yu_iTkZbrw*Y*a)4wl?_=S|!os{EGt5Y|wHy!kJ#A z_Pw;SU9^G!q6m`OCrl^=uBCNJhkJx9U|sqR#KLU&Znv5XBTjUTi?)~8o^|d_yNcGX zA40!{bX8qg-1{PsZ*-uQ+wVBf+`UKh&ZgRpL*==Uxh9I2-ud7=Cmx-ohRnB>Cc zw~6LIiyvxqGl*WC72&>jB&78(8UsF*e-l;By{y(2#*nHxi(Z}dOHf}+bUNEKq+KSq zn^mu_aoYbxtj}J>9sp01Q7e<~ z=)>2Amc9s+-de*pM)^?Z?-_WzC$(6V{IC}*yVZDgtr~Mk6N!eRORMqE4_ps+Sk&Nd zzi1u!R%FzUgls?|64a*>S3ZkdQ;gB86qPKsA0wN4Cj>|`&8dvkhChBuH(o!Lc%2K{ z-)8j9yh@w-?z>4U9*2g8R8-CHL`lr#W`&L*C7!%ByU+cpZAr<5Z8^zQ+ZZxNHyjCC z?^pKXEwBou&${q+f>PFZ$4}g}S#H&}zP0j>8sSrC55|2F)YIWo+;Fm2jGfgxD24W@LJrL!qbr$_4 zUcoPygl`1!B=u)KuOJOQ#g%#+>pxjdzi4`CI*j-le=ictP0=CMkIL))b&({6d9VJ^ zkKbWm^tDx`X#Q-)8$-QM?JLEQ;%1>r8*=X#GzgW9k@DQelrX-+ukiLH0$n3BO$lSF zIsb9xnL8%!yU2Q0Yt19Ik;!wu7Y!l7r!=ko*m#TYDh<7V#HrNU&>gp^D)%gA7k*?z zn7`o9GhtnlbkSATI|IAB5poO1ca6AbJ(msJ7jtZUlwn~9$M)2|?kIN=vOb`{uL<7+ z5_s2!$u5!<8+ig-P1^0k=ZWU$ZLgXUu4`}ni<#26WxCnGg6g0}#%RNIsodTc z@6L*n*IBrYhC>{U+*x|uy`|uzZPLwA^9SJ6d#lQvT6ntqV3BmS=A$CDT?s$>-?mr3 zVNgGjTs-v-6<}+8#v5cHayMhy6TM}cK} z*rz>u)?foZEymK+?!Fvh70iZ@2k%OqtZ5d1e_I2vAcEHG`I|=rr026BiTGuR=ygPCqoqSn0ve*lWWgdd4Fi|0Hs;AI3_!8KZNds zTiDPaIg6`vZOi9q(O2a`GZwvtZ)q|H=}TYzKS&qcvgN^%DZ_C{*(aY7;+d1P@BkQa zL+wpn$=*_L$bhtHD!nT`to<$ZA-}@~C-r3M_|`R>b_ou)y|&?iwhVdHj=%FC#E24< z^{!C+N;0iNFIL&~oN>Kw_ez}8wwONk0Hvb3JHaS)`mi!cOfJ#-{74W_>ceJDi}ylv zA1ogT;m6!!u`47qx`qfmr+9NsPr14?4OTD!NjIfx&HeE$pQ{QwD%C1uofB_){o9%_ z%%ZToMDg4m+zPiBNGPU5P(>p$g)}XM>v;b+W!9$U< z6KzNa3F=aKgxi{c(lDyiIY%_BVab`HAu`UV*LrrHHub5JKAo^uB9?qAlr+W_?R_7mQWDahLccMV4!yCCE*gPQ z`#MdH7w1ct%ED&R{{8DDVhM~}4Aw5%TGqI1V-PAn_ZD6YZL3!FlxlYEE6HbZEwY`jW35(ruPYclso2m zYR{#M4x9xJ`vTvXAA1-CvC2^yqdaApDBKD?1GtQ#-OZC+RkI!^PD>pTn=_yekFJRq zBk>Ao-Qp=k&-|Jg6z#J-W-^(sZ~ z8C$FrU%Z^&lzKj%=14jbuvf8zB-R^QugC4<6-;l13!4wohuXUJ%7RHkYbL5)$Ew-u zRxfTrY6UjdDtqF{A&ZW_e3?f|J+embS*fswkV>))*Qxk2dYJQSxGZCa z`j{fc6`0+4<%1Kfxo6fQ2?o|8)Qk!p4sgL(^=1Q#Q4Bt{ zYgD?u-XN>?bl5zwa?rUt>k~s)q?(0t|9o}3D#`I-01o6dSs}#0Dw)7T23s5Y6kv^I z@B~MtiDVah`D*z`AJ$at;JH$_qvY6TMJFL0j`NLm%VYj6>%>dmjcX=8vu1jFU{8yp z&#zc=h*EOs;5t+Tr=oyMrt3eKDQvNO8N7*!ne@c?q~@aAVd=A-)A3fs>inXOw$XVN z4?|O}#wy16&|Qh@U`IV(Nf_Adz?>D~D5~AhfDXipL%)sZ(0BW`Z_@Q4|MLBif_CmCs ziJc`s<6USk(K;Tsia{T$OW$x{93KwrWYp(MU5WN?5iJYRQ=d|ny;^v_Pvhib#>4kl znhd=55Z-8-XNHm4uG(=pm)?8E1UW*&_cKIGx<)NEapMd1gfyp8n)Vbg+#-y{hMy99 z^OzRJFXI!(81U5fvyen_y)ZQ+}jHw%HIxxjgVCwnfLeAt7pgoF)@*8K=k@ zoh;Ibi@iA03J1%s#!=|pamjl{@yM{rk&3=gv*v?of&r%a*5PLIGNS9YN}RSZpZ>O$ zp+8JWsm7XeWNs&en$BkpUx}~h5JJ)k_oUaR=S5nY-l$k)b?oVSAY+-A#3Cmt zFFBm0X}(&!2!*`0s77AbB%jy9*cA8EgGG?8euu@*U%GO%=K))6f;P2wsD<*py=FDi zd&pdE`4A599Q~Q@S33X*>iYB2Rfh<3z;WPX&+l6#o-yzaS(gzMlHY=$h8e(Fc$4jy zi}v=LwTq|=d>i#{O7YsDH1we@urkHM0+Gpk6(4%CDitm+tzJa1IDchJz9W|aE+k$) zI~9PRwx~f~FO(5l9M1E}az72`A62;U{`?qQtnXu*1Sy(pjN(BaFoogZD-VJ)>uhL^ zO*&iHPj;UDg{V6ybnNIcEgAuc0c-MCeTo&bG(PRFn7r>kP{TFL=kVFqI43@6pr*=+ zBlfK15%cM#$J4D97b4nuxtbC);UNamT(wXGFdJ#ovm{p-35SG`GPt7qPhJ;opdKRw zz@cd0>_f{&04}A3Hd#lwf7N_|hz1<8=c#6)!IBvWDc{>BF<#B>re}k?qzT@7x(R(q z{<4W-#c>^nzu2SC$F8`MC%=s;(5V0qocbbO@-kfmk-FF$iYMi7lr{S5@tGV!1`fo| zBWGOo_8pQPKz(sRL$>Wo@Gn9wL#-C;>sASnKA~r?5p_HHoQCa28U2wl6&GmJHfZAH6uq~vN)4-6n_zHK=QfbtKmv< z-~%C|0VBd^eS1?NI2h6}TmST(_Ugg@>h6-agZ5C#u%$(~++Lf2YbO?F`0_qK*Sq)W z3yK>4%EUgGAUF^1j`P`j`=snu?=?P9=08IBovDr?F_@Jc+EAT|Ih)s#QHsdGH zMh~y+&byYNpOgjgr1t-vua}iS@n>_xlw%?*yOE#m?Yjr{; zW1+(bzpl=aImy){IcGk3lT&e~D9U1*_GY(@Hl$e2@?o4y20mJS8t`syHl)*KGk+J} zE?XoT>v2=^@T-moI2GS+3TLe&_#7s*U?KloZH`l_IrX4d>rG}b<)w^Xz4(sBc_^8G z(X+6@3ofCx*pp1ZQW>Bhfc#_4WU;6ii|z2~zH+MB(iM4U-j}uQJVm-WZ+SF*T8&e2 z>4*vfZSoD&FHi9H{HqV`Q!wJ|^+ayZ+eQE7N66XMg#I7SBvDR9K8IN4tPVfuK=1C{ z0y>#mIgN#O)fzVndWLvdlQCITSR19+#&f=Yw3rZnZ_UUXvFKGMXLN*9aq5b-I>;it z%)f0UX=kJ#-W(pz7E3UV^x>%m7bW=7&TuMrK=)aM$>yU~J_>|z#O4oZIc<(wGd8hw zSa*xQAM%fNlTI!yNbiHsKpHpJxHOXVgZLU-!15OZof)) zsbOndVF+{uv;!f5pH@N^uaLnvZw8V9A8rsM(!1oP<%Fl}mC$mW3UneejAG#v7X;Z? zx63q+%iSC4s2ei#y@+NW8K$b~R-+w6YVjXMVzbtUAtnA@MzS%McW|`zw19@qR$Kru zN2>sbzUf(86{5iL$+b2_)2wykrE)g?fzV=^WVa=ncv~I%Ydlz82P_P1)Lx;Wb}uPLX?o-GvzizQtaS;}+QxeI)$(o<{HYhMH%ywR?Or=; z=b90aAQ}}UfX?WZLl@LKMBXK_WVCUzPinm?c=E(qynFfXTWaU3a? zdu!5DnDTq;FA9V!U@#zUGZH2SuJ-XG19|2wF6gWGWg0n`>n#qqe5q)*^Ap~PQxrdH!VHyRP|5Uq8QZ|IVPzf< zK-KWO0(e3LT4yDSRJ6!WWI_UQRnGt;UKv3(> zq8Bt5>#l$V^cQO@jW5D-bMU0t!K_9C1~=h+0;8WjYnKzmv<;3xQ1L_v-M8(V1y@Rr&kMy^8tKyU|S(_x%@dju-6{#N1g16`M8? z>)q8o2L5szeox80iqTd_t%ck(5Cr}h-|l11D$AwH-uTa6073sq3t;k&57cvDNlyE` z=-;(y_yzvRHkreMn31KoK%fMk3w)nqV#4I)%3#ks^^W=Y1cT$3qInkQQW(Q(()jLr zgJ3G|uP6VcIPjqAg(K_Ge*d4p0@*y57#UA#p*Jthm^zX@=&m(Q^0s14K0fvet_y$qp*893aHd)!O zuOR6#tP)|_9+bmi0B|=hpm!~7+}Dz=B>gF9V&RYe0bK1J^CjqzAF3Q#rfx{Goa0wa zTbs3F)$fN*4xGc|o*4dU7tjcZUm^vA%dJ`UMLrQSldR8&)SHsLE`lAR3+zA3H2zQn zmPc#68nIwizqxzX#e?^>m!s$Ao^fkh4P*4-AsjWq?;+|AI5;cd&{~(9jyM_iJzv?qsPzii@XQ<; zz<(4~+Ee5-kz(*QN3)~=`&S3YN)n&m(`j-kBqK`28TA72K*L%6IxJ-cEXRqn0Wx;L9KQ3a)kqJWVR}p{sHT`OqKb zid+w^a2pauAhbiNy^<(%X;baWH@KqlLD9!eWS8l#`f7mg0WbM{z2P_f5^yBe55SaQ zu4VefxM`n)H|(fZz0Y#&)rTh*@}tknQk@H=#E7HF>W0t~IGNb~-b*Ufz9NTGs- z?8sAC&vpM8BzTe4%fnPL=!jomhyQR^W=6J7P`Wt2QNtSuu1aD3v7P3kb(*%c?!D1P z-#XgQ^Er3~uRU{GjkYu{f}{t3_aQv#GB=UoZA9ptd1H&7Qjd*6aG?Q$0pOC9V2{4oOcT7d4!jEto19l( zAA^_p|Ksn9+vQ<)V4brwwl3NajKHK0PQ}~%`?E8iS~nK3IQPDdezv1LQ($0Ql3CVUWEKt^qPp4^n;NE+WW`OF9X4tAJ#)!$2 zAg#fpcaMY)vFbfJ(^PeKCh9{{1Dx)<7*~p|SVx0a({uU-OSCJNmTITyIL%?kb%~qe ztfCwWE%EC(O6kVh%~PEkL8k(lhP2l#oEv$utYoc&yE7>Tbm`Eumm;|d#uU_7>5^<7 z7;_Nh8D>cRs-#PmUud~1}+ zdL}bOt)Bz}5(RH)SX&&IU9bcB!yv}cpzyj`!F#~FaMfl7hmOWBi+eRJ0jWwF?}36w z46xeQ_%#L}_2=9JD^29x4F{AC$ZcZP|F9BGQQR1@Pq*G|>TAH2ZfL-?0c1-TBMDmA zz+qN_3ozXVO2?WC)4g~{-<`)Q&CrSfr5~U^`YzcxPZV?7?dVK87 z4CLkQf@D|m=m%5k*@hFj41ZFrCk&s)2pO!C67k*mtHR5y>g;8zon7GAb+;_q+ToFj zQpimXA-}jjJY91e6w!i3JNr1iR0cqCukzDLgj>$DjhEWV-E!V~OY{b@w-i#)X-^xZ zw4URIO`#eV@#2IDq7h)XbIP_}3Ov4m0WHKnQ^2QH&6*3Mc7t(wV!1%tQ6n@ydaW$( zqmQImX5jG6TeroDNV;Ic8v_+>P>$4bvM5xm9X2+ZLyhpKYCc&ub6q@KoZ-@3>w6>e&MUjG)7~0S)OmB)i5P$qJ zj!sTr}y0-0*@U6163YKT!Gj#nO0>IVvn zE^X0Vh}u9P*KixtYtI!=P`hE?#SfnjY$6Aijr2Qnd1QjSY;s+Fj&UkxlfVU*t#Huy z&tO8fLWyzN6`ReV#Z90D1zVzAU!2JsP)-9B1)w$bQWtJ8`5`0?0N5 zZw(bZdC+?$)Isc1t&~*MfS}z-xiYZNH3a0tzo)7~UR|4+6T-bmeK-{ZM?D8uKILYu zC0*~u0^5R~0?1hy)FY^ZOtXU#;;zerT5FPu$!(ju7I{LsEaD@Fs!4%V4-8`65ZDlK zNBz!blXTNCKdvie{#nhfRkvdA+Wpp6s{5S1=fMVIHWEMfP8Vs!T$uaTaoEQ$v_P)u z0Y5FuxqC74t!yz!VZLmd_>9+iX!jqzgF=IbN7^d9gV!ck+T2O|GHhZHZ2JC%Mo2fH zG9okvYk9cxxy9P0wk;Ico5>(of90^(tlQAZfo${7i0MP;7`xyYXS;Trgvpn?r=ee+ zKa(~kn)WUOI8*1Mw&nf}MvPg>HPWB{CT;RbLfPrgelGAsvHTD6f(#lodDsXuZO z`mMbiRb=DmGQ{BG3bdJbgq7ul=U^7iAj|`q=BRT{Cn~Ms{i7)F^zkw0PQQ|_nX6x&iwA@Xs@I}skE6BcqV7dQ z(pB+wIdK3nuIXggM(O!RG3D#EC@K^h+&_9UkE)?`>aBMh zZ#9NPy79;A^+pZC4>@cAx31SH)yj$yW6=L)+Fqm}RRv!dg*@ICH^4n}1Sl7Ched7p z&@f`g3831WKdSfN3lotv_pS$&ZF54`x)0+$qUwJ$VYUfsr0y`RO*2TV1lIGi${N8F z!!1T{|C@i(M9qJ_8p6_|EQxFiM(kJR8u&mQ+%0PR9 zyenx{0pEWUY4g4ARL3asWY?s`H}(!OY;gjX+GbT*+#R zN8D?NPz~89`emB#q3$yAn*t7@Kt4iV55CdjM|iZmW~FB1hn#rBTH#p~Kw2jCB?&mJ z__rL@{sCY5UgXxPtdI+k)wx+gSW+{h-_hWdY<0ZqRO>Uh63{7WE%e8g*zxN%n(Irk zZ}O(=;e1y-YM0-^`~b>Io+r;tQ)#daV7K~o*R@hIp-XS*%BXQXQf9H zoE3*h-U2bi=y?jT_TuD`(b16cG1J_6g(mHsk9kzj&Xb&qX$!*VxNz|G7Uss77%m;B zo3YsP(M4|~iFXMan+yI+hYBcucZxSPi@h@_YA&@pUVGe}jU28-sF@=zdp^Y2WuY39 z;_Wnwq3iQpeaoia3izU-_}TdyY}VpB$Z6)L?3!#fA_^H6S*TfRene%%-L zcx5OCg(D3;EUC%R<>Q)TMAmLh1Qc8?JkBvTo4q>u{xcL5yduph z-lp|oqYUS5g3+d53Ax$+f`+R~jmaU36=Z+iKs1mG*ojr^qJ$Z5TgyUK)j{rYA=%{z ziw>8Bk)s7)3Ze4!MwQoe&ey;B<7$QVcxbcD zaF`q=T@b>OBXTW0mUY2Ma3k9cBX`vWOB71@u$D34y*op+h^V?-J{H$LSJJl z&h9_k@PQ9Up-eGGUjFZ53>IW>ZLk&6!{|7CX9vnC=|vg}p4+c>5!w_Iva?uo$yn!n z$nB>wzT(V6j`n~)i27~Ze>Q=55u>%)ca5YmcdL_Y7-YMy0ua0FxPr%PXU>P~R57h> z-0-Li>!KGrCzr%7V5h^{_CeU)(QP}*uuNP2e066A{`t_a{!gfI7Ijz(~^;cffxu|${wOWMW7wpV;R9(J6HXxK?GIeY@V+_csNQ<}&h z2)0;idk^oy)C)&z?ypE-ejH}=6&d*U>^CuLrazDEY)LZ0 z99WndC!v$X`)sj~2sS{kun0Y-XF=py9+p%zWnCm{vf7%w90%S=UAtM-yO-TJ2Ey-f zYUEn4;!abkKXLTfSkdCj875ho7|EQH=C#!)I#uC8gI(K4C%U(H^tr;~U4LoxmBE*o zNSr7Qkz%fjYaMfFvfqp-5S8I=dAGZno!QO|ayHen^>(hE{}sSKlUin8G1ctlBwO8y z*VgEFP+-YZf41Jbxvl9|=OX-nX=zjk6tg}COss5mwDJ`IW}Hz`)x>*{l=gFA@K1BY zaLetK{mUd^6*G>Qb8Ve(m1FM68S^z66IZ;V)98M8Tc%SSTQWKWGN}Be_nHZ?#|+|Y zskPO8tI`HoF8@xofP`?ieSX`A;3_X|D|+Y2MB>(?eoFYL1z@;!@#!$8KIx#D)AXzU z0s-XnbKt*~k3MG<#CW@@Sx@6OMompe3G7U|dW-<$GC5EXxz#Byc`pD2UTV2S?#tnh z2ezk~{Ikw5%-;9Q%|#P3FImI~ANPARSL)++<}}e?$R>b%eh&PX@@Y0?T`buI>e7JveR5pA%v8S~UnZQ&O@G#6+R?TQdroh!r`!vj`d_+m*)Snb z-Iu)8giVb(P4uu8Zch}X`{%%aDIX!}qu8^5ZNZ+-@=ut}61V#06qF>8&uXX@if%QG zvyun);?+;4*Bj`tK2+N_POFn!mY;Kz*ATMxsGoJ5!~y_lZ46A1%#}Lo!kng>^{fUU zpPvK&t$f5eS{qJo_u+H3+g841zY8%;DgFZ!KVFC5_m_+u(A{c`*U9uQHR)H^|CDN0 z+`T=`&TUMn^k5_W(u~WO|8n@_*^h*+UThPmfN83`xQ9ss^7%RNU&_bfH2@~;+bz!i zC71yAGqo*4*XP^%o@{({?3c!I#elN-im1`H`Ua*mr#bCWL<0QCKLh`zd}8?kFxhU$ z_wrwY$xD`&Bir(qLoJv?Wtx9UV9mc-8)#ji_#KFmHCn*!2Fl ztDkmmRo;4^qgMge=J|niuk9DvF}oe?)HY2r573gmnPok{ANt=!vvX+y&hWQPf2-7Q oi~0|_{Xc#94JiMA=5.1.0 -azure-identity -msrest>=0.6.18 -azure-core<2.0.0,>=1.8.0, !=1.22.0 -azure-mgmt-core<2.0.0,>=1.2.0 -marshmallow<4.0.0,>=3.5 -jsonschema<5.0.0,>=4.0.0 -tqdm<=4.63.0 -# Used for PR 825138 -strictyaml<=1.6.1 -# Used for PR 718512 -colorama<=0.4.4 -pyjwt<3.0.0 -azure-storage-blob<13.0.0,>=12.10.0 -azure-storage-file-share<13.0.0 -azure-storage-file-datalake<=12.6.0 -pydash<=4.9.0 -pathspec==0.9.* -isodate -# Used for local endpoint story. -docker -azure-common<2.0.0,>=1.1 -typing-extensions>=4.0.1 -applicationinsights<=0.11.10 From ab008598df61ddad8f61a685319395d86f305b8e Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Thu, 14 Jul 2022 11:41:57 -0700 Subject: [PATCH 18/19] Updated 0.1.0b5 with hotfixes --- sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore | 1 - .../ml/entities/_job/pipeline/pipeline_job.py | 28 +++++++++++++------ .../azure/ai/ml/operations/_job_operations.py | 9 +++++- sdk/ml/azure-ai-ml/azure/ai/ml/py.typed | 1 + 4 files changed, 28 insertions(+), 11 deletions(-) delete mode 100644 sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore b/sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore deleted file mode 100644 index bee8a64b79a9..000000000000 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__ diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py index 4d470c512dec..44dda0f712e9 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/pipeline_job.py @@ -247,20 +247,30 @@ def _get_skip_fields_in_schema_validation(self) -> typing.List[str]: # jobs validations are done in _customized_validate() return ["jobs"] - def _customized_validate(self) -> ValidationResult: - """Validate that all provided inputs and parameters are valid for current pipeline and components in it.""" - validation_result = super(PipelineJob, self)._customized_validate() + def _validate_compute_is_set(self): + validation_result = self._create_empty_validation_result() + if self.compute is not None: + return validation_result + if self.settings is not None and self.settings.default_compute is not None: + return validation_result no_compute_nodes = [] for node_name, node in self.jobs.items(): if hasattr(node, "compute") and node.compute is None: no_compute_nodes.append(node_name) - if not self.compute: - for node_name in no_compute_nodes: - validation_result.append_error( - yaml_path=f"jobs.{node_name}.compute", - message="Compute not set", - ) + for node_name in no_compute_nodes: + validation_result.append_error( + yaml_path=f"jobs.{node_name}.compute", + message="Compute not set", + ) + return validation_result + + def _customized_validate(self) -> ValidationResult: + """Validate that all provided inputs and parameters are valid for current pipeline and components in it.""" + validation_result = super(PipelineJob, self)._customized_validate() + + # Validate compute + validation_result.merge_with(self._validate_compute_is_set()) for node_name, node in self.jobs.items(): if isinstance(node, BaseNode): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py index ae0a23a82442..e717fc6a71f4 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py @@ -342,6 +342,13 @@ def validate(self, job: Job, *, raise_on_failure: bool = False, **kwargs) -> Val :return: a ValidationResult object containing all found errors. :rtype: ValidationResult """ + return self._validate(job, raise_on_failure=raise_on_failure, **kwargs) + + @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.INTERNALCALL) + def _validate(self, job: Job, *, raise_on_failure: bool = False, **kwargs) -> ValidationResult: + """Implementation of validate. Add this function to avoid calling validate() directly in create_or_update(), + which will impact telemetry statistics & bring experimental warning in create_or_update(). + """ git_code_validation_result = _ValidationResultBuilder.success() # TODO: move this check to Job._validate after validation is supported for all job types # If private features are enable and job has code value of type str we need to check @@ -422,7 +429,7 @@ def create_or_update( if job.compute == LOCAL_COMPUTE_TARGET: job.environment_variables[COMMON_RUNTIME_ENV_VAR] = "true" - self.validate(job, raise_on_failure=True) + self._validate(job, raise_on_failure=True) # Create all dependent resources self._resolve_arm_id_or_upload_dependencies(job) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed b/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed index e69de29bb2d1..e5aff4f83af8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file From aed80ae69f4d791e6e9970208c0dcea74f0ab894 Mon Sep 17 00:00:00 2001 From: Njuguna Thande Date: Fri, 15 Jul 2022 16:17:54 -0700 Subject: [PATCH 19/19] Updated release date on changelog --- sdk/ml/azure-ai-ml/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ml/azure-ai-ml/CHANGELOG.md b/sdk/ml/azure-ai-ml/CHANGELOG.md index 13d566a53a0d..d0ebba8c1a25 100644 --- a/sdk/ml/azure-ai-ml/CHANGELOG.md +++ b/sdk/ml/azure-ai-ml/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 0.1.0b5 (2022-07-06) +## 0.1.0b5 (2022-07-15) ### Features Added