diff --git a/sdk/ml/azure-ai-ml/CHANGELOG.md b/sdk/ml/azure-ai-ml/CHANGELOG.md index 87dba2d9d872..59ab796de7f6 100644 --- a/sdk/ml/azure-ai-ml/CHANGELOG.md +++ b/sdk/ml/azure-ai-ml/CHANGELOG.md @@ -3,6 +3,7 @@ ## 1.16.0 (unreleased) ### Features Added +- Add experimental support for working with Promptflow evaluators: `ml_client.evaluators`. - Many changes to the Connection entity class and its associated operations. - Workspace Connection `list`, `get`, and `create_or_update` operations now include an optional `populate_secrets` input, which causes the operations to try making a secondary call to fill in the returned connections' credential info if possible. Only works with api key-based credentials for now. - Many workspace connection subtypes added. The full list of subclasses is now: diff --git a/sdk/ml/azure-ai-ml/assets.json b/sdk/ml/azure-ai-ml/assets.json index 94a3c268347e..1a6797eed53f 100644 --- a/sdk/ml/azure-ai-ml/assets.json +++ b/sdk/ml/azure-ai-ml/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ml/azure-ai-ml", - "Tag": "python/ml/azure-ai-ml_8c61dc0136" + "Tag": "python/ml/azure-ai-ml_ce8aa03671" } diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py index d754279a38d5..9689fd22dde3 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py @@ -56,6 +56,7 @@ FeatureSetOperations, IndexOperations, ModelOperations, + EvaluatorOperations, ) from azure.ai.ml.operations._code_operations import CodeOperations @@ -463,7 +464,12 @@ def _update_gen2_metadata(name, version, indicator_file, storage_client) -> None def _check_and_upload_path( artifact: T, asset_operations: Union[ - "DataOperations", "ModelOperations", "CodeOperations", "FeatureSetOperations", "IndexOperations" + "DataOperations", + "ModelOperations", + "EvaluatorOperations", + "CodeOperations", + "FeatureSetOperations", + "IndexOperations", ], artifact_type: str, datastore_name: Optional[str] = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py index c8bdbaa59653..b588090cd6c8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py @@ -86,6 +86,7 @@ JobOperations, MarketplaceSubscriptionOperations, ModelOperations, + EvaluatorOperations, OnlineDeploymentOperations, OnlineEndpointOperations, RegistryOperations, @@ -537,6 +538,25 @@ def __init__( registry_reference=registry_reference, **app_insights_handler_kwargs, # type: ignore[arg-type] ) + # Evaluators + self._evaluators = EvaluatorOperations( + self._operation_scope, + self._operation_config, + ( + self._service_client_10_2021_dataplanepreview + if registry_name or registry_reference + else self._service_client_08_2023_preview + ), + self._datastores, + self._operation_container, + requests_pipeline=self._requests_pipeline, + control_plane_client=self._service_client_08_2023_preview, + workspace_rg=self._ws_rg, + workspace_sub=self._ws_sub, + registry_reference=registry_reference, + **app_insights_handler_kwargs, # type: ignore[arg-type] + ) + self._operation_container.add(AzureMLResourceType.MODEL, self._models) self._code = CodeOperations( self._ws_operation_scope if registry_reference else self._operation_scope, @@ -948,6 +968,16 @@ def models(self) -> ModelOperations: """ return self._models + @property + @experimental + def evaluators(self) -> EvaluatorOperations: + """A collection of model related operations. + + :return: Model operations + :rtype: ~azure.ai.ml.operations.ModelOperations + """ + return self._evaluators + @property def online_endpoints(self) -> OnlineEndpointOperations: """A collection of online endpoint related operations. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py index 74eaba207d74..a30d9fc3bb78 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/utils.py @@ -1419,3 +1419,11 @@ def extract_name_and_version(azureml_id: str) -> Dict[str, str]: "name": name, "version": version, } + + +def _get_evaluator_properties(): + return {"is-promptflow": "true", "is-evaluator": "true"} + + +def _is_evaluator(properties: Dict[str, str]) -> bool: + return properties.get("is-evaluator") == "true" and properties.get("is-promptflow") == "true" diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/__init__.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/__init__.py index b44a11511b14..a5373ae8d876 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/__init__.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/__init__.py @@ -30,6 +30,7 @@ from ._connections_operations import ConnectionsOperations from ._workspace_operations import WorkspaceOperations from ._workspace_outbound_rule_operations import WorkspaceOutboundRuleOperations +from ._evaluator_operations import EvaluatorOperations from ._serverless_endpoint_operations import ServerlessEndpointOperations from ._marketplace_subscription_operations import MarketplaceSubscriptionOperations @@ -38,6 +39,7 @@ "DatastoreOperations", "JobOperations", "ModelOperations", + "EvaluatorOperations", "WorkspaceOperations", "RegistryOperations", "OnlineEndpointOperations", diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_evaluator_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_evaluator_operations.py new file mode 100644 index 000000000000..f1de0457b119 --- /dev/null +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_evaluator_operations.py @@ -0,0 +1,230 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +# pylint: disable=protected-access,no-value-for-parameter,disable=docstring-missing-return,docstring-missing-param,docstring-missing-rtype,ungrouped-imports,line-too-long,too-many-statements + +from os import PathLike +from typing import Any, Dict, Iterable, Optional, Union, cast +from azure.ai.ml._restclient.v2021_10_01_dataplanepreview import ( + AzureMachineLearningWorkspaces as ServiceClient102021Dataplane, +) +from azure.ai.ml._restclient.v2023_08_01_preview import ( + AzureMachineLearningWorkspaces as ServiceClient082023Preview, +) +from azure.ai.ml._restclient.v2023_08_01_preview.models import ( + ListViewType, +) +from azure.ai.ml._scope_dependent_operations import ( + OperationConfig, + OperationsContainer, + OperationScope, + _ScopeDependentOperations, +) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity +from azure.ai.ml._utils._logger_utils import OpsLogger +from azure.ai.ml._utils.utils import ( + _get_evaluator_properties, + _is_evaluator, +) +from azure.ai.ml.entities._assets import Model +from azure.ai.ml.entities._assets.workspace_asset_reference import ( + WorkspaceAssetReference, +) +from azure.ai.ml.exceptions import ( + UnsupportedOperationError, +) +from azure.ai.ml.operations._datastore_operations import DatastoreOperations +from azure.core.exceptions import ResourceNotFoundError + +from azure.ai.ml.operations._model_operations import ModelOperations + +ops_logger = OpsLogger(__name__) +module_logger = ops_logger.module_logger + + +class EvaluatorOperations(_ScopeDependentOperations): + """EvaluatorOperations. + + You should not instantiate this class directly. Instead, you should create an MLClient instance that instantiates it + for you and attaches it as an attribute. + + :param operation_scope: Scope variables for the operations classes of an MLClient object. + :type operation_scope: ~azure.ai.ml._scope_dependent_operations.OperationScope + :param operation_config: Common configuration for operations classes of an MLClient object. + :type operation_config: ~azure.ai.ml._scope_dependent_operations.OperationConfig + :param service_client: Service client to allow end users to operate on Azure Machine Learning Workspace + resources (ServiceClient082023Preview or ServiceClient102021Dataplane). + :type service_client: typing.Union[ + azure.ai.ml._restclient.v2023_04_01_preview._azure_machine_learning_workspaces.AzureMachineLearningWorkspaces, + azure.ai.ml._restclient.v2021_10_01_dataplanepreview._azure_machine_learning_workspaces. + AzureMachineLearningWorkspaces] + :param datastore_operations: Represents a client for performing operations on Datastores. + :type datastore_operations: ~azure.ai.ml.operations._datastore_operations.DatastoreOperations + :param all_operations: All operations classes of an MLClient object. + :type all_operations: ~azure.ai.ml._scope_dependent_operations.OperationsContainer + """ + + # pylint: disable=unused-argument + def __init__( + self, + operation_scope: OperationScope, + operation_config: OperationConfig, + service_client: Union[ServiceClient082023Preview, ServiceClient102021Dataplane], + datastore_operations: DatastoreOperations, + all_operations: Optional[OperationsContainer] = None, + **kwargs, + ): + super(EvaluatorOperations, self).__init__(operation_scope, operation_config) + + ops_logger.update_info(kwargs) + self._model_op = ModelOperations( + operation_scope=operation_scope, + operation_config=operation_config, + service_client=service_client, + datastore_operations=datastore_operations, + all_operations=all_operations, + **{ModelOperations._IS_EVALUATOR: True}, + **kwargs, + ) + self._operation_scope = self._model_op._operation_scope + self._datastore_operation = self._model_op._datastore_operation + + @monitor_with_activity(ops_logger, "Evaluator.CreateOrUpdate", ActivityType.PUBLICAPI) + def create_or_update( # type: ignore + self, model: Union[Model, WorkspaceAssetReference] + ) -> Model: # TODO: Are we going to implement job_name? + """Returns created or updated model asset. + + :param model: Model asset object. + :type model: ~azure.ai.ml.entities.Model + :raises ~azure.ai.ml.exceptions.AssetPathException: Raised when the Model artifact path is + already linked to another asset + :raises ~azure.ai.ml.exceptions.ValidationException: Raised if Model cannot be successfully validated. + Details will be provided in the error message. + :raises ~azure.ai.ml.exceptions.EmptyDirectoryError: Raised if local path provided points to an empty directory. + :return: Model asset object. + :rtype: ~azure.ai.ml.entities.Model + """ + model.properties.update(_get_evaluator_properties()) + return self._model_op.create_or_update(model) + + def _raise_if_not_evaluator(self, properties: Optional[Dict[str, Any]], message: str) -> None: + """ + :param properties: The properties of a model. + :type properties: dict[str, str] + :param message: The message to be set on exception. + :type message: str + :raises ~azure.ai.ml.exceptions.ValidationException: Raised if model is not an + evaluator. + """ + if properties is not None and not _is_evaluator(properties): + raise ResourceNotFoundError( + message=message, + response=None, + ) + + @monitor_with_activity(ops_logger, "Evaluator.Get", ActivityType.PUBLICAPI) + def get(self, name: str, version: Optional[str] = None, label: Optional[str] = None) -> Model: + """Returns information about the specified model asset. + + :param name: Name of the model. + :type name: str + :param version: Version of the model. + :type version: str + :param label: Label of the model. (mutually exclusive with version) + :type label: str + :raises ~azure.ai.ml.exceptions.ValidationException: Raised if Model cannot be successfully validated. + Details will be provided in the error message. + :return: Model asset object. + :rtype: ~azure.ai.ml.entities.Model + """ + model = self._model_op.get(name, version, label) + + properties = None if model is None else model.properties + self._raise_if_not_evaluator( + properties, + f"Evaluator {name} with version {version} not found.", + ) + + return model + + @monitor_with_activity(ops_logger, "Evaluator.Download", ActivityType.PUBLICAPI) + def download(self, name: str, version: str, download_path: Union[PathLike, str] = ".") -> None: + """Download files related to a model. + + :param name: Name of the model. + :type name: str + :param version: Version of the model. + :type version: str + :param download_path: Local path as download destination, defaults to current working directory of the current + user. Contents will be overwritten. + :type download_path: Union[PathLike, str] + :raises ResourceNotFoundError: if can't find a model matching provided name. + """ + self._model_op.download(name, version, download_path) + + @monitor_with_activity(ops_logger, "Evaluator.List", ActivityType.PUBLICAPI) + def list( + self, + name: str, + stage: Optional[str] = None, + *, + list_view_type: ListViewType = ListViewType.ACTIVE_ONLY, + ) -> Iterable[Model]: + """List all model assets in workspace. + + :param name: Name of the model. + :type name: str + :param stage: The Model stage + :type stage: Optional[str] + :keyword list_view_type: View type for including/excluding (for example) archived models. + Defaults to :attr:`ListViewType.ACTIVE_ONLY`. + :paramtype list_view_type: ListViewType + :return: An iterator like instance of Model objects + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.ml.entities.Model] + """ + properties_str = "is-promptflow=true,is-evaluator=true" + if name: + return cast( + Iterable[Model], + ( + self._model_op._model_versions_operation.list( + name=name, + registry_name=self._model_op._registry_name, + cls=lambda objs: [Model._from_rest_object(obj) for obj in objs], + properties=properties_str, + **self._model_op._scope_kwargs, + ) + if self._registry_name + else self._model_op._model_versions_operation.list( + name=name, + workspace_name=self._model_op._workspace_name, + cls=lambda objs: [Model._from_rest_object(obj) for obj in objs], + list_view_type=list_view_type, + properties=properties_str, + stage=stage, + **self._model_op._scope_kwargs, + ) + ), + ) + # ModelContainer object does not carry properties. + raise UnsupportedOperationError("list on evaluation operations without name provided") + # TODO: Implement filtering of the ModelContainerOperations list output + # return cast( + # Iterable[Model], ( + # self._model_container_operation.list( + # registry_name=self._registry_name, + # cls=lambda objs: [Model._from_container_rest_object(obj) for obj in objs], + # list_view_type=list_view_type, + # **self._scope_kwargs, + # ) + # if self._registry_name + # else self._model_container_operation.list( + # workspace_name=self._workspace_name, + # cls=lambda objs: [Model._from_container_rest_object(obj) for obj in objs], + # list_view_type=list_view_type, + # **self._scope_kwargs, + # ) + # ) + # ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py index 2c96a297525d..ed75f0690852 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py @@ -50,7 +50,7 @@ get_storage_details_for_registry_assets, ) from azure.ai.ml._utils._storage_utils import get_ds_name_and_path_prefix, get_storage_client -from azure.ai.ml._utils.utils import resolve_short_datastore_url, validate_ml_flow_folder +from azure.ai.ml._utils.utils import resolve_short_datastore_url, validate_ml_flow_folder, _is_evaluator from azure.ai.ml.constants._common import ARM_ID_PREFIX, ASSET_ID_FORMAT, REGISTRY_URI_FORMAT, AzureMLResourceType from azure.ai.ml.entities._assets import Environment, Model, ModelPackage from azure.ai.ml.entities._assets._artifacts.code import Code @@ -72,6 +72,7 @@ module_logger = ops_logger.module_logger +# pylint: disable=too-many-instance-attributes class ModelOperations(_ScopeDependentOperations): """ModelOperations. @@ -94,6 +95,8 @@ class ModelOperations(_ScopeDependentOperations): :type all_operations: ~azure.ai.ml._scope_dependent_operations.OperationsContainer """ + _IS_EVALUATOR = "__is_evaluator" + # pylint: disable=unused-argument def __init__( self, @@ -102,7 +105,7 @@ def __init__( service_client: Union[ServiceClient082023Preview, ServiceClient102021Dataplane], datastore_operations: DatastoreOperations, all_operations: Optional[OperationsContainer] = None, - **kwargs: Dict, + **kwargs, ): super(ModelOperations, self).__init__(operation_scope, operation_config) ops_logger.update_info(kwargs) @@ -119,6 +122,7 @@ def __init__( # Maps a label to a function which given an asset name, # returns the asset associated with the label self._managed_label_resolver = {"latest": self._get_latest_version} + self.__is_evaluator = kwargs.pop(ModelOperations._IS_EVALUATOR, False) @monitor_with_activity(ops_logger, "Model.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update( # type: ignore @@ -136,6 +140,43 @@ def create_or_update( # type: ignore :return: Model asset object. :rtype: ~azure.ai.ml.entities.Model """ + # Check if we have the model with the same name and it is an + # evaluator. In this aces raise the exception do not create the model. + if not self.__is_evaluator and _is_evaluator(model.properties): + msg = ( + "Unable to create the evaluator using ModelOperations. To create " + "evaluator, please use EvaluatorOperations by calling " + "ml_client.evaluators.create_or_update(model) instead." + ) + raise ValidationException( + message=msg, + no_personal_data_message=msg, + target=ErrorTarget.MODEL, + error_category=ErrorCategory.USER_ERROR, + ) + if model.name is not None: + model_properties = self._get_model_properties(model.name) + if model_properties is not None and _is_evaluator(model_properties) != _is_evaluator(model.properties): + if _is_evaluator(model.properties): + msg = ( + f"Unable to create the model with name {model.name} " + "because this version of model was marked as promptflow evaluator, but the previous " + "version is a regular model. " + "Please change the model name and try again." + ) + else: + msg = ( + f"Unable to create the model with name {model.name} " + "because previous version of model was marked as promptflow evaluator, but this " + "version is a regular model. " + "Please change the model name and try again." + ) + raise ValidationException( + message=msg, + no_personal_data_message=msg, + target=ErrorTarget.MODEL, + error_category=ErrorCategory.USER_ERROR, + ) try: name = model.name if not model.version and model._auto_increment_version: @@ -753,3 +794,24 @@ def package(self, name: str, version: str, package_request: ModelPackage, **kwar package_out = environment_operation.get(name=environment_name, version=environment_version) return package_out + + def _get_model_properties( + self, name: str, version: Optional[str] = None, label: Optional[str] = None + ) -> Optional[Dict[str, Any]]: + """ + Return the model properties if the model with this name exists. + + :param name: Model name. + :type name: str + :param version: Model version. + :type version: Optional[str] + :param label: model label. + :type label: Optional[str] + :return: Model properties, if the model exists, or None. + """ + try: + if version or label: + return self.get(name, version, label).properties + return self._get_latest_version(name).properties + except (ResourceNotFoundError, ValidationException): + return None diff --git a/sdk/ml/azure-ai-ml/pyproject.toml b/sdk/ml/azure-ai-ml/pyproject.toml index 18a46824005a..e99df4291338 100644 --- a/sdk/ml/azure-ai-ml/pyproject.toml +++ b/sdk/ml/azure-ai-ml/pyproject.toml @@ -28,6 +28,7 @@ exclude = [ "azure/ai/ml/_schema/", "azure/ai/ml/_arm_deployments/", "tests", + "downloaded", "setup.py", "samples", "azure/ai/ml/_utils", diff --git a/sdk/ml/azure-ai-ml/tests/evaluator/__init__.py b/sdk/ml/azure-ai-ml/tests/evaluator/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ml/azure-ai-ml/tests/evaluator/e2etests/__init__.py b/sdk/ml/azure-ai-ml/tests/evaluator/e2etests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ml/azure-ai-ml/tests/evaluator/e2etests/test_evaluator.py b/sdk/ml/azure-ai-ml/tests/evaluator/e2etests/test_evaluator.py new file mode 100644 index 000000000000..b4746874a7d4 --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/evaluator/e2etests/test_evaluator.py @@ -0,0 +1,225 @@ +import os +import re +import uuid +from typing import Callable + +import pytest +from devtools_testutils import AzureRecordedTestCase, is_live +from test_utilities.utils import sleep_if_live + +from azure.ai.ml import MLClient, load_model +from azure.ai.ml._restclient.v2022_05_01.models import ListViewType +from azure.ai.ml.constants._common import LONG_URI_REGEX_FORMAT +from azure.ai.ml.entities._assets import Model +from azure.core.paging import ItemPaged +from pathlib import Path +from azure.ai.ml.exceptions import ValidationException + + +@pytest.fixture +def uuid_name() -> str: + name = str(uuid.uuid1()) + yield name + + +@pytest.fixture +def artifact_path(tmpdir_factory) -> str: # type: ignore + file_name = tmpdir_factory.mktemp("artifact_testing").join("artifact_file.txt") + file_name.write("content") + return str(file_name) + + +# previous bodiless_matcher fixture doesn't take effect because of typo, please add it in method level if needed + + +def _load_flow(name: str, version: str = "42", **kwargs) -> Model: + """Load the flow supplied with the tests.""" + return Model( + path="./tests/test_configs/flows/basic/", + name=name, + type="custom_model", + description="This is evaluator.", + version=version, + properties={"is-promptflow": "true", "is-evaluator": "true"}, + **kwargs, + ) + + +@pytest.mark.e2etest +@pytest.mark.usefixtures("recorded_test") +@pytest.mark.production_experiences_test +class TestEvaluator(AzureRecordedTestCase): + def test_crud_file(self, client: MLClient, randstr: Callable[[], str]) -> None: + model_name = randstr("model_name") + + model = _load_flow(model_name, version="3") + model.name = model_name + model = client.evaluators.create_or_update(model) + assert model.name == model_name + assert model.version == "3" + assert model.description == "This is evaluator." + assert model.type == "custom_model" + assert "is-promptflow" in model.properties and model.properties["is-promptflow"] == "true" + assert "is-evaluator" in model.properties and model.properties["is-evaluator"] == "true" + assert re.match(LONG_URI_REGEX_FORMAT, model.path) + + model = client.evaluators.get(model.name, "3") + assert model.name == model_name + assert model.version == "3" + assert model.description == "This is evaluator." + assert "is-promptflow" in model.properties and model.properties["is-promptflow"] == "true" + assert "is-evaluator" in model.properties and model.properties["is-evaluator"] == "true" + + models = client.evaluators.list(name=model_name) + assert isinstance(models, ItemPaged) + test_model = next(iter(models), None) + assert isinstance(test_model, Model) + + # TODO: Enable this test when listing without name will be available. + # models = client.evaluators.list() + # assert isinstance(models, Iterator) + # test_model = next(iter(models), None) + # assert isinstance(test_model, Model) + + def test_crud_evaluator_with_stage(self, client: MLClient, randstr: Callable[[], str]) -> None: + model_name = randstr("model_prod_name") + model = _load_flow(model_name, stage="Production", version="3") + + model = client.evaluators.create_or_update(model) + assert model.name == model_name + assert model.version == "3" + assert model.description == "This is evaluator." + assert model.type == "custom_model" + assert model.stage == "Production" + assert re.match(LONG_URI_REGEX_FORMAT, model.path) + + model = client.evaluators.get(model.name, "3") + assert model.name == model_name + assert model.version == "3" + assert model.description == "This is evaluator." + assert model.stage == "Production" + + model_list = client.evaluators.list(name=model.name, stage="Production") + model_stage_list = [m.stage for m in model_list if m is not None] + assert model.stage in model_stage_list + + def test_evaluators_get_latest_label(self, client: MLClient, randstr: Callable[[], str]) -> None: + model_name = f"model_{randstr('name')}" + for version in ["1", "2", "3", "4"]: + model = _load_flow(model_name, version=version) + client.evaluators.create_or_update(model) + assert client.evaluators.get(model_name, label="latest").version == version + + @pytest.mark.skip( + "Skipping test for archive and restore as we have removed it from interface. " + "These test will be available when the appropriate API will be enabled at " + "GenericAssetService." + ) + def test_evaluator_archive_restore_version(self, client: MLClient, randstr: Callable[[], str]) -> None: + model_name = f"model_{randstr('name')}" + + versions = ["1", "2"] + version_archived = versions[0] + for version in versions: + model = _load_flow(model_name, version=version) + client.evaluators.create_or_update(model) + + def get_evaluator_list(): + # Wait for list index to update before calling list command + sleep_if_live(30) + model_list = client.evaluators.list(name=model_name, list_view_type=ListViewType.ACTIVE_ONLY) + return [m.version for m in model_list if m is not None] + + assert version_archived in get_evaluator_list() + client.evaluators.archive(name=model_name, version=version_archived) + assert version_archived not in get_evaluator_list() + client.evaluators.restore(name=model_name, version=version_archived) + assert version_archived in get_evaluator_list() + + @pytest.mark.skip(reason="Task 1791832: Inefficient, possibly causing testing pipeline to time out.") + def test_evaluator_archive_restore_container(self, client: MLClient, randstr: Callable[[], str]) -> None: + model_name = f"model_{randstr('name')}" + version = "1" + model = _load_flow(model_name, version=version) + + client.evaluators.create_or_update(model) + + def get_evaluator_list(): + # Wait for list index to update before calling list command + sleep_if_live(30) + model_list = client.evaluators.list(list_view_type=ListViewType.ACTIVE_ONLY) + return [m.name for m in model_list if m is not None] + + assert model_name in get_evaluator_list() + client.evaluators.archive(name=model_name) + assert model_name not in get_evaluator_list() + client.evaluators.restore(name=model_name) + assert model_name in get_evaluator_list() + + @pytest.mark.skipif( + condition=not is_live(), + reason="Registry uploads do not record well. Investigate later", + ) + def test_create_get_download_evaluator_registry( + self, registry_client: MLClient, randstr: Callable[[], str] + ) -> None: + model_name = randstr("model_name") + model_version = "2" + + model_entity = _load_flow(model_name, version=model_version) + model = registry_client.evaluators.create_or_update(model_entity) + assert model.name == model_name + assert model.version == model_version + assert model.description == "This is evaluator." + assert model.type == "custom_model" + + model_get = registry_client.evaluators.get(name=model_name, version=model_version) + assert model == model_get + assert model_get.name == model_name + assert model_get.version == model_version + assert model_get.description == "This is evaluator." + assert model_get.type == "custom_model" + + registry_client.evaluators.download(name=model_name, version=model_version, download_path="downloaded") + wd = os.path.join(os.getcwd(), f"downloaded/{model_name}") + assert os.path.exists(wd) + assert os.path.exists(f"{wd}/basic/flow.dag.yaml") + + @pytest.mark.parametrize("use_registry", [True, False]) + @pytest.mark.skipif( + condition=not is_live(), + reason="Registry uploads do not record well. Investigate later", + ) + def test_list_evaluator( + self, + registry_client: MLClient, + client: MLClient, + randstr: Callable[[], str], + use_registry: bool, + ) -> None: + ml_cli = registry_client if use_registry else client + model_name = randstr("model_name") + model_version = "1" + model_entity = _load_flow(model_name, version=model_version) + model = ml_cli.evaluators.create_or_update(model_entity) + assert model.name == model_name + assert model.version == model_version + assert model.description == "This is evaluator." + assert model.type == "custom_model" + + # Check that we only can create evaluators with the same name. + model_path = Path("./tests/test_configs/model/model_full.yml") + model_version = "2" + + model_entity = load_model(model_path) + model_entity.name = model_name + model_entity.version = model_version + with pytest.raises(ValidationException) as cm: + ml_cli.models.create_or_update(model_entity) + assert "previous version of model was marked as promptflow evaluator" in cm.value.args[0] + + # Check that only one model was created. + model_list = list(ml_cli.evaluators.list(model_name)) + assert len(model_list) == 1 + assert "is-promptflow" in model_list[0].properties and model_list[0].properties["is-promptflow"] == "true" + assert "is-evaluator" in model_list[0].properties and model_list[0].properties["is-evaluator"] == "true" diff --git a/sdk/ml/azure-ai-ml/tests/evaluator/unittests/__init__.py b/sdk/ml/azure-ai-ml/tests/evaluator/unittests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ml/azure-ai-ml/tests/evaluator/unittests/test_evaluator_operations.py b/sdk/ml/azure-ai-ml/tests/evaluator/unittests/test_evaluator_operations.py new file mode 100644 index 000000000000..b81f9efb6560 --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/evaluator/unittests/test_evaluator_operations.py @@ -0,0 +1,349 @@ +from pathlib import Path +from unittest.mock import Mock, patch +import pytest + +from azure.ai.ml._restclient.v2022_05_01.models._models_py3 import ( + ModelContainerData, + ModelContainerDetails, + ModelVersionData, + ModelVersionDetails, +) +from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope +from azure.ai.ml.entities._assets import Model +from azure.ai.ml.entities._assets._artifacts.artifact import ArtifactStorageInfo +from azure.ai.ml.exceptions import ErrorTarget, UnsupportedOperationError +from azure.ai.ml.operations import DatastoreOperations +from azure.ai.ml.operations._evaluator_operations import EvaluatorOperations +from azure.ai.ml.entities._load_functions import load_model +from azure.core.exceptions import ResourceNotFoundError + + +@pytest.fixture +def mock_datastore_operation( + mock_workspace_scope: OperationScope, + mock_operation_config: OperationConfig, + mock_aml_services_2023_04_01_preview: Mock, + mock_aml_services_2024_01_01_preview: Mock, +) -> DatastoreOperations: + yield DatastoreOperations( + operation_scope=mock_workspace_scope, + operation_config=mock_operation_config, + serviceclient_2023_04_01_preview=mock_aml_services_2023_04_01_preview, + serviceclient_2024_01_01_preview=mock_aml_services_2024_01_01_preview, + ) + + +@pytest.fixture +def mock_eval_operation( + mock_workspace_scope: OperationScope, + mock_operation_config: OperationConfig, + mock_aml_services_2022_05_01: Mock, + mock_datastore_operation: Mock, +) -> EvaluatorOperations: + yield EvaluatorOperations( + operation_scope=mock_workspace_scope, + operation_config=mock_operation_config, + service_client=mock_aml_services_2022_05_01, + datastore_operations=mock_datastore_operation, + ) + + +@pytest.fixture +def mock_model_operation_reg( + mock_registry_scope: OperationScope, + mock_operation_config: OperationConfig, + mock_aml_services_2021_10_01_dataplanepreview: Mock, + mock_datastore_operation: Mock, +) -> EvaluatorOperations: + yield EvaluatorOperations( + operation_scope=mock_registry_scope, + operation_config=mock_operation_config, + service_client=mock_aml_services_2021_10_01_dataplanepreview, + datastore_operations=mock_datastore_operation, + ) + + +def _load_flow(name: str, version: str = "42", **kwargs) -> Model: + """Load the flow supplied with the tests.""" + return Model( + path="./tests/test_configs/flows/basic/", + name=name, + description="F1 score evaluator.", + version=version, + properties={"is-promptflow": "true", "is-evaluator": "true"}, + **kwargs, + ) + + +@pytest.mark.unittest +@pytest.mark.production_experiences_test +class TestModelOperations: + def test_create_with_spec_file( + self, + mock_workspace_scope: OperationScope, + mock_eval_operation: EvaluatorOperations, + ) -> None: + eval_name = "simple_flow" + + with patch( + "azure.ai.ml._artifacts._artifact_utilities._upload_to_datastore", + return_value=ArtifactStorageInfo( + name=eval_name, + version="3", + relative_path="path", + datastore_arm_id="/subscriptions/mock/resourceGroups/mock/providers/Microsoft.MachineLearningServices/workspaces/mock/datastores/datastore_id", + container_name="containerName", + ), + ) as mock_upload, patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + model = _load_flow(eval_name) + path = Path(model._base_path, model.path).resolve() + mock_eval_operation.create_or_update(model) + mock_upload.assert_called_once_with( + mock_workspace_scope, + mock_eval_operation._model_op._datastore_operation, + path, + asset_name=model.name, + asset_version=model.version, + datastore_name=None, + asset_hash=None, + sas_uri=None, + artifact_type=ErrorTarget.MODEL, + show_progress=True, + ignore_file=None, + blob_uri=None, + ) + mock_eval_operation._model_op._model_versions_operation.create_or_update.assert_called_once() + assert "version='42'" in str(mock_eval_operation._model_op._model_versions_operation.create_or_update.call_args) + + def test_create_autoincrement( + self, + mock_eval_operation: EvaluatorOperations, + mock_workspace_scope: OperationScope, + ) -> None: + eval_name = "eval" + model = _load_flow(eval_name, version=None) + assert model._auto_increment_version + model.version = None + + with patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", + return_value=None, + ), patch( + "azure.ai.ml.operations._model_operations._get_next_version_from_container", + return_value="version", + ) as mock_nextver, patch( + "azure.ai.ml.operations._model_operations._check_and_upload_path", + return_value=(model, "indicatorfile.txt"), + ), patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", + return_value=model, + ), patch( + "azure.ai.ml.operations._model_operations._get_default_datastore_info", + return_value=None, + ): + mock_eval_operation.create_or_update(model) + mock_nextver.assert_called_once() + + mock_eval_operation._model_op._model_versions_operation.create_or_update.assert_called_once_with( + body=model._to_rest_object(), + name=model.name, + version=mock_nextver.return_value, + resource_group_name=mock_workspace_scope.resource_group_name, + workspace_name=mock_workspace_scope.workspace_name, + ) + + def test_get_name_and_version(self, mock_eval_operation: EvaluatorOperations) -> None: + mock_eval_operation._model_op._model_container_operation.get.return_value = None + with patch( + "azure.ai.ml.operations._evaluator_operations.Model._from_rest_object", + return_value=None, + ): + mock_eval_operation.get(name="random_string", version="1") + mock_eval_operation._model_op._model_versions_operation.get.assert_called_once() + assert mock_eval_operation._model_op._model_container_operation.get.call_count == 0 + + def test_get_no_version(self, mock_eval_operation: EvaluatorOperations) -> None: + name = "random_string" + with pytest.raises(Exception): + mock_eval_operation.get(name=name) + + @patch.object(Model, "_from_rest_object", new=Mock()) + @patch.object(Model, "_from_container_rest_object", new=Mock()) + def test_list(self, mock_eval_operation: EvaluatorOperations) -> None: + mock_eval_operation._model_op._model_versions_operation.list.return_value = [Mock(Model) for _ in range(10)] + + mock_eval_operation.list(name="random_string") + mock_eval_operation._model_op._model_versions_operation.list.assert_called_once() + + def test_list_with_no_name_raises(self, mock_eval_operation: EvaluatorOperations) -> None: + """Test that listing evaluators without values raises an unsupported exception.""" + with pytest.raises(UnsupportedOperationError) as cm: + mock_eval_operation.list(None) + assert "list on evaluation operations without name provided" in cm.value.args[0] + + @pytest.mark.skip( + "Skipping test for archive and restore as we have removed it from interface. " + "These test will be available when the appropriate API will be enabled at " + "GenericAssetService." + ) + def test_archive_version(self, mock_eval_operation: EvaluatorOperations) -> None: + name = "random_string" + model_version = Mock(ModelVersionData(properties=Mock(ModelVersionDetails()))) + version = "1" + mock_eval_operation._model_op._model_versions_operation.get.return_value = model_version + with patch( + "azure.ai.ml.operations._evaluator_operations.ModelOperations.get", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + mock_eval_operation.archive(name=name, version=version) + mock_eval_operation._model_op._model_versions_operation.create_or_update.assert_called_once_with( + name=name, + version=version, + workspace_name=mock_eval_operation._model_op._workspace_name, + body=model_version, + resource_group_name=mock_eval_operation._model_op._resource_group_name, + ) + + @pytest.mark.skip( + "Skipping test for archive and restore as we have removed it from interface. " + "These test will be available when the appropriate API will be enabled at " + "GenericAssetService." + ) + def test_archive_container(self, mock_eval_operation: EvaluatorOperations) -> None: + name = "random_string" + model_container = Mock(ModelContainerData(properties=Mock(ModelContainerDetails()))) + mock_eval_operation._model_op._model_container_operation.get.return_value = model_container + with patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + mock_eval_operation.archive(name=name) + mock_eval_operation._model_op._model_container_operation.create_or_update.assert_called_once_with( + name=name, + workspace_name=mock_eval_operation._model_op._workspace_name, + body=model_container, + resource_group_name=mock_eval_operation._model_op._resource_group_name, + ) + + @pytest.mark.skip( + "Skipping test for archive and restore as we have removed it from interface. " + "These test will be available when the appropriate API will be enabled at " + "GenericAssetService." + ) + def test_restore_version(self, mock_eval_operation: EvaluatorOperations) -> None: + name = "random_string" + model = Mock(ModelVersionData(properties=Mock(ModelVersionDetails()))) + version = "1" + mock_eval_operation._model_op._model_versions_operation.get.return_value = model + with patch( + "azure.ai.ml.operations._evaluator_operations.ModelOperations.get", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + mock_eval_operation.restore(name=name, version=version) + mock_eval_operation._model_op._model_versions_operation.create_or_update.assert_called_with( + name=name, + version=version, + workspace_name=mock_eval_operation._model_op._workspace_name, + body=model, + resource_group_name=mock_eval_operation._model_op._resource_group_name, + ) + + @pytest.mark.skip( + "Skipping test for archive and restore as we have removed it from interface. " + "These test will be available when the appropriate API will be enabled at " + "GenericAssetService." + ) + def test_restore_container(self, mock_eval_operation: EvaluatorOperations) -> None: + name = "random_string" + model_container = Mock(ModelContainerData(properties=Mock(ModelContainerDetails()))) + mock_eval_operation._model_op._model_container_operation.get.return_value = model_container + with patch( + "azure.ai.ml.operations._evaluator_operations.ModelOperations._get_latest_version", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + mock_eval_operation.restore(name=name) + mock_eval_operation._model_op._model_container_operation.create_or_update.assert_called_once_with( + name=name, + workspace_name=mock_eval_operation._model_op._workspace_name, + body=model_container, + resource_group_name=mock_eval_operation._model_op._resource_group_name, + ) + + def test_create_with_datastore( + self, + mock_workspace_scope: OperationScope, + mock_eval_operation: EvaluatorOperations, + ) -> None: + eval_name = "eval_string" + + with patch( + "azure.ai.ml._artifacts._artifact_utilities._upload_to_datastore", + return_value=ArtifactStorageInfo( + name=eval_name, + version="3", + relative_path="path", + datastore_arm_id="/subscriptions/mock/resourceGroups/mock/providers/Microsoft.MachineLearningServices/workspaces/mock/datastores/datastore_id", + container_name="containerName", + ), + ) as mock_upload, patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + model = _load_flow(eval_name, version="3", datastore="workspaceartifactstore") + path = Path(model._base_path, model.path).resolve() + mock_eval_operation.create_or_update(model) + mock_upload.assert_called_once_with( + mock_workspace_scope, + mock_eval_operation._model_op._datastore_operation, + path, + asset_name=model.name, + asset_version=model.version, + datastore_name="workspaceartifactstore", + asset_hash=None, + sas_uri=None, + artifact_type=ErrorTarget.MODEL, + show_progress=True, + ignore_file=None, + blob_uri=None, + ) + + def test_create_evaluator_model(self, mock_eval_operation: EvaluatorOperations): + """Test that evaluator operations add correct tags to model.""" + p = "./tests/test_configs/model/model_with_datastore.yml" + plane_model = load_model(p) + eval_name = "eval_string" + + with patch( + "azure.ai.ml._artifacts._artifact_utilities._upload_to_datastore", + return_value=ArtifactStorageInfo( + name=eval_name, + version="3", + relative_path="path", + datastore_arm_id="/subscriptions/mock/resourceGroups/mock/providers/Microsoft.MachineLearningServices/workspaces/mock/datastores/datastore_id", + container_name="containerName", + ), + ), patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", + return_value=Model(properties={"is-promptflow": "true", "is-evaluator": "true"}), + ): + mock_eval_operation.create_or_update(plane_model) + assert plane_model.properties == { + "is-promptflow": "true", + "is-evaluator": "true", + } + + def test_get_non_evaluator_raises(self, mock_eval_operation: EvaluatorOperations): + """Test that evaluator operations add correct tags to model.""" + p = "./tests/test_configs/model/model_with_datastore.yml" + plane_model = load_model(p) + mock_eval_operation._model_op._model_container_operation.get.return_value = None + with patch( + "azure.ai.ml.operations._evaluator_operations.Model._from_rest_object", + return_value=plane_model, + ): + with pytest.raises(ResourceNotFoundError) as cm: + mock_eval_operation.get(name="random_string", version="1") + assert f"Evaluator random_string with version 1 not found." in cm.value.args[0] diff --git a/sdk/ml/azure-ai-ml/tests/model/e2etests/test_model.py b/sdk/ml/azure-ai-ml/tests/model/e2etests/test_model.py index e968cd33fb99..a7ba78abeb40 100644 --- a/sdk/ml/azure-ai-ml/tests/model/e2etests/test_model.py +++ b/sdk/ml/azure-ai-ml/tests/model/e2etests/test_model.py @@ -201,7 +201,7 @@ def test_list_model_registry(self, registry_client: MLClient, randstr: Callable[ model_list = [m.name for m in model_list if m is not None] assert model.name in model_list - @pytest.mark.skipif(condition=not is_live(), reason="Registry uploads do not record well. Investigate later") + @pytest.mark.skip(reason="_prepare_to_copy method was removed") def test_promote_model(self, randstr: Callable[[], str], client: MLClient, registry_client: MLClient) -> None: # Create model in workspace model_path = Path("./tests/test_configs/model/model_full.yml") @@ -210,7 +210,7 @@ def test_promote_model(self, randstr: Callable[[], str], client: MLClient, regis model_entity = load_model(model_path) model_entity.name = model_name model_entity.version = model_version - model = client.models.create_or_update(model_entity) + client.models.create_or_update(model_entity) # Start promoting to registry # 1. Get registered model in workspace model_in_workspace = client.models.get(name=model_name, version=model_version) diff --git a/sdk/ml/azure-ai-ml/tests/model/unittests/test_model_operations.py b/sdk/ml/azure-ai-ml/tests/model/unittests/test_model_operations.py index 49b306f4341c..f55198e5a202 100644 --- a/sdk/ml/azure-ai-ml/tests/model/unittests/test_model_operations.py +++ b/sdk/ml/azure-ai-ml/tests/model/unittests/test_model_operations.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import Iterable +from typing import Dict, Iterable, Optional from unittest.mock import Mock, patch import pytest @@ -14,7 +14,7 @@ from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope from azure.ai.ml.entities._assets import Model from azure.ai.ml.entities._assets._artifacts.artifact import ArtifactStorageInfo -from azure.ai.ml.exceptions import ErrorTarget +from azure.ai.ml.exceptions import ErrorTarget, ValidationException from azure.ai.ml.operations import DatastoreOperations, ModelOperations @@ -318,3 +318,183 @@ def test_model_entity_class_exist(self): from azure.ai.ml.entities import WorkspaceModelReference except ImportError: assert False, "WorkspaceModelReference class not found" + + @pytest.mark.parametrize( + "old_properties,new_properties", + [ + (None, {}), + ({"test": "test"}, {}), + ({}, {"test": "test"}), + ({}, {}), + ({"is-promptflow": "true", "is-evaluator": "true"}, {"is-promptflow": "true", "is-evaluator": "true"}), + (None, {"is-promptflow": "true", "is-evaluator": "true"}), + ], + ) + def test_create_success( + self, + mock_datastore_operation: DatastoreOperations, + mock_operation_config: OperationConfig, + mock_workspace_scope: OperationScope, + tmp_path: Path, + old_properties: Dict[str, str], + new_properties: Dict[str, str], + ): + mock_model_operation = ModelOperations( + operation_scope=mock_workspace_scope, + operation_config=mock_operation_config, + service_client=Mock(), + datastore_operations=mock_datastore_operation, + **{ModelOperations._IS_EVALUATOR: True}, + ) + """Test that new version is created if models are of the same type.""" + model_name = f"model_random_string" + p = tmp_path / "model_full.yml" + model_path = tmp_path / "model.pkl" + model_path.write_text("hello world") + p.write_text( + f""" +name: {model_name} +path: ./model.pkl +version: 3""" + ) + + with patch( + "azure.ai.ml._artifacts._artifact_utilities._upload_to_datastore", + return_value=ArtifactStorageInfo( + name=model_name, + version="3", + relative_path="path", + datastore_arm_id="/subscriptions/mock/resourceGroups/mock/providers/Microsoft.MachineLearningServices/workspaces/mock/datastores/datastore_id", + container_name="containerName", + ), + ) as mock_upload, patch( + "azure.ai.ml.operations._model_operations.Model._from_rest_object", return_value=Model() + ), patch( + "azure.ai.ml.operations._model_operations.ModelOperations._get_model_properties", + return_value=old_properties, + ): + model = load_model(source=p) + model.properties = new_properties + path = Path(model._base_path, model.path).resolve() + mock_model_operation.create_or_update(model) + mock_upload.assert_called_once_with( + mock_workspace_scope, + mock_model_operation._datastore_operation, + path, + asset_name=model.name, + asset_version=model.version, + datastore_name=None, + asset_hash=None, + sas_uri=None, + artifact_type=ErrorTarget.MODEL, + show_progress=True, + ignore_file=None, + blob_uri=None, + ) + mock_model_operation._model_versions_operation.create_or_update.assert_called_once() + + @pytest.mark.parametrize( + "old_properties,new_properties,message", + [ + # ({"is-promptflow": "true", "is-evaluator": "true"}, {}, "because previous version of model was marked"), + ({}, {"is-promptflow": "true", "is-evaluator": "true"}, "because this version of model was marked"), + ], + ) + def test_create_raises_if_wrong_type( + self, + mock_datastore_operation: DatastoreOperations, + mock_operation_config: OperationConfig, + mock_workspace_scope: OperationScope, + tmp_path: Path, + old_properties: Dict[str, str], + new_properties: Dict[str, str], + message: str, + ) -> None: + """Test exception if pre existing model is not of a correct type.""" + mock_model_operation = ModelOperations( + operation_scope=mock_workspace_scope, + operation_config=mock_operation_config, + service_client=Mock(), + datastore_operations=mock_datastore_operation, + **{ModelOperations._IS_EVALUATOR: True}, + ) + model_name = f"model_random_string" + p = tmp_path / "model_full.yml" + model_path = tmp_path / "model.pkl" + model_path.write_text("hello world") + p.write_text( + f""" +name: {model_name} +path: ./model.pkl""" + ) + new_model = load_model(source=p) + new_model.properties = new_properties + with patch( + "azure.ai.ml.operations._model_operations.ModelOperations._get_model_properties", + return_value=old_properties, + ): + with pytest.raises(ValidationException) as cm: + mock_model_operation.create_or_update(new_model) + assert message in cm.value.args[0] + + @pytest.mark.parametrize( + "label,version,get_raises,latest_raises,expected", + [ + ("lbl", None, False, False, {"model": "from_get"}), + ("lbl", "1", False, False, {"model": "from_get"}), + (None, "1", False, False, {"model": "from_get"}), + (None, None, False, False, {"model": "from_latest"}), + ("lbl", None, True, False, None), + (None, "1", True, False, None), + (None, None, True, False, {"model": "from_latest"}), + (None, None, True, True, None), + ], + ) + def test_return_properties( + self, + mock_model_operation: ModelOperations, + tmp_path: Path, + label: Optional[str], + version: Optional[str], + get_raises: bool, + latest_raises: bool, + expected: Optional[Dict[str, str]], + ) -> None: + model_name = f"model_random_string" + p = tmp_path / "model_full.yml" + model_path = tmp_path / "model.pkl" + model_path.write_text("hello world") + p.write_text( + f""" +name: {model_name} +path: ./model.pkl""" + ) + get_model = load_model(source=p) + get_model.properties = {"model": "from_get"} + latest_model = load_model(source=p) + latest_model.properties = {"model": "from_latest"} + get_kw = {"side_effect": ResourceNotFoundError("Mock") if get_raises else None, "return_value": get_model} + get_latest = { + "side_effect": ResourceNotFoundError("Mock") if latest_raises else None, + "return_value": latest_model, + } + with patch("azure.ai.ml.operations._model_operations.ModelOperations.get", **get_kw): + with patch("azure.ai.ml.operations._model_operations.ModelOperations._get_latest_version", **get_latest): + assert mock_model_operation._get_model_properties(model_name, version, label) == expected + + def test_model_operation_raises_on_evaluators(self, mock_model_operation: ModelOperations, tmp_path: Path): + """Test model_operation raiese if evaluator is being created.""" + model_name = f"model_random_string" + p = tmp_path / "model_full.yml" + model_path = tmp_path / "model.pkl" + model_path.write_text("hello world") + p.write_text( + f""" +name: {model_name} +path: ./model.pkl""" + ) + model = load_model(source=p) + model.properties = {"is-promptflow": "true", "is-evaluator": "true"} + with pytest.raises(ValidationException) as cm: + mock_model_operation.create_or_update(model) + assert "please use EvaluatorOperations" in cm.value.args[0]