diff --git a/CHANGELOG.md b/CHANGELOG.md index 701f6961..f2a88df5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update pretrained_models_all_versions.json (2023-10-18 18:11:34) by @dhrubo-os ([#322](https://github.com/opensearch-project/opensearch-py-ml/pull/322)) - Update model upload history - sentence-transformers/paraphrase-mpnet-base-v2 (v.1.0.0)(BOTH) by @dhrubo-os ([#321](https://github.com/opensearch-project/opensearch-py-ml/pull/321)) - Replaced usage of `is_datetime_or_timedelta_dtype` with `is_timedelta64_dtype` and `is_datetime64_any_dtype`([#316](https://github.com/opensearch-project/opensearch-py-ml/pull/316)) +- use try-except-else block for handling unexpected exceptions during integration tests by @rawwar([#370](https://github.com/opensearch-project/opensearch-py-ml/pull/370)) ### Fixed - Enable make_model_config_json to add model description to model config file by @thanawan-atc in ([#203](https://github.com/opensearch-project/opensearch-py-ml/pull/203)) diff --git a/tests/ml_commons/test_ml_commons_client.py b/tests/ml_commons/test_ml_commons_client.py index bb1adcde..418fa3c8 100644 --- a/tests/ml_commons/test_ml_commons_client.py +++ b/tests/ml_commons/test_ml_commons_client.py @@ -165,35 +165,32 @@ def test_train(iris_index): def test_execute(): - raised = False try: input_json = {"operation": "max", "input_data": [1.0, 2.0, 3.0]} result = ml_client.execute( algorithm_name="local_sample_calculator", input_json=input_json ) + except Exception as ex: + pytest.fail( + f"Raised Exception during execute API testing with dictionary. Exception info: {ex}" + ) + else: assert result["output"]["result"] == 3 - except: # noqa: E722 - raised = True - assert ( - raised == False - ), "Raised Exception during execute API testing with dictionary" - raised = False try: input_json = '{"operation": "max", "input_data": [1.0, 2.0, 3.0]}' result = ml_client.execute( algorithm_name="local_sample_calculator", input_json=input_json ) + except Exception as ex: + pytest.fail( + f"Raised Exception during execute API testing with JSON string. Exception info: {ex}" + ) + else: assert result["output"]["result"] == 3 - except: # noqa: E722 - raised = True - assert ( - raised == False - ), "Raised Exception during execute API testing with JSON string" def test_DEPRECATED_integration_pretrained_model_upload_unload_delete(): - raised = False try: model_id = ml_client.upload_pretrained_model( model_name=PRETRAINED_MODEL_NAME, @@ -203,43 +200,45 @@ def test_DEPRECATED_integration_pretrained_model_upload_unload_delete(): wait_until_loaded=True, ) ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception during pretrained model registration and deployment. Exception info:{ex}" + ) + else: assert ml_model_status.get("model_state") != "DEPLOY_FAILED" - except: # noqa: E722 - raised = True - assert ( - raised == False - ), "Raised Exception during pretrained model registration and deployment" if model_id: - raised = False try: ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception in getting pretrained model info. Exception info: {ex}" + ) + else: assert ml_model_status.get("model_format") == "TORCH_SCRIPT" assert ml_model_status.get("algorithm") == "TEXT_EMBEDDING" - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in getting pretrained model info" - raised = False try: ml_client.unload_model(model_id) ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception in pretrained model undeployment. Exception info: {ex}" + ) + else: assert ml_model_status.get("model_state") != "UNDEPLOY_FAILED" - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in pretrained model undeployment" - raised = False try: delete_model_obj = ml_client.delete_model(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception in deleting pretrained model. Exception info: {ex}" + ) + else: assert delete_model_obj.get("result") == "deleted" - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in deleting pretrained model" def test_integration_pretrained_model_register_undeploy_delete(): - raised = False try: model_id = ml_client.register_pretrained_model( model_name=PRETRAINED_MODEL_NAME, @@ -249,39 +248,42 @@ def test_integration_pretrained_model_register_undeploy_delete(): wait_until_deployed=True, ) ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception during pretrained model registration and deployment. Exception info:{ex}" + ) + else: assert ml_model_status.get("model_state") != "DEPLOY_FAILED" - except: # noqa: E722 - raised = True - assert ( - raised == False - ), "Raised Exception during pretrained model registration and deployment" if model_id: - raised = False try: ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception in getting pretrained model info. Exception info: {ex}" + ) + else: assert ml_model_status.get("model_format") == "TORCH_SCRIPT" assert ml_model_status.get("algorithm") == "TEXT_EMBEDDING" - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in getting pretrained model info" - raised = False try: ml_client.undeploy_model(model_id) ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception in pretrained model undeployment. Exception info: {ex}" + ) + else: assert ml_model_status.get("model_state") != "UNDEPLOY_FAILED" - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in pretrained model undeployment" - raised = False try: delete_model_obj = ml_client.delete_model(model_id) + except Exception as ex: + pytest.fail( + f"Raised Exception in deleting pretrained model. Exception info: {ex}" + ) + else: assert delete_model_obj.get("result") == "deleted" - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in deleting pretrained model" def test_DEPRECATED_integration_model_train_upload_full_cycle(): @@ -308,25 +310,26 @@ def test_DEPRECATED_integration_model_train_upload_full_cycle(): ) print("Model_id:", model_id) except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when uploading model: {ex}" + pytest.fail(f"Exception occurred when uploading model: {ex}") if model_id: try: ml_load_status = ml_client.load_model(model_id, wait_until_loaded=False) task_id = ml_load_status.get("task_id") - assert task_id != "" or task_id is not None - ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail(f"Exception occurred when loading model: {ex}") + else: + assert task_id != "" or task_id is not None assert ml_model_status.get("model_state") != "DEPLOY_FAILED" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when loading model: {ex}" try: ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail(f"Exception occurred when getting model info: {ex}") + else: assert ml_model_status.get("model_format") == "TORCH_SCRIPT" assert ml_model_status.get("algorithm") == "TEXT_EMBEDDING" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when getting model info: {ex}" if task_id: ml_task_status = None @@ -334,11 +337,13 @@ def test_DEPRECATED_integration_model_train_upload_full_cycle(): ml_task_status = ml_client.get_task_info( task_id, wait_until_task_done=True ) - assert ml_task_status.get("task_type") == "DEPLOY_MODEL" print("State:", ml_task_status.get("state")) - assert ml_task_status.get("state") != "FAILED" except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when getting task info: {ex}" + pytest.fail(f"Exception occurred when getting task info: {ex}") + else: + assert ml_task_status.get("task_type") == "DEPLOY_MODEL" + assert ml_task_status.get("state") != "FAILED" + # This is test is being flaky. Sometimes the test is passing and sometimes showing 500 error # due to memory circuit breaker. # Todo: We need to revisit this test. @@ -346,30 +351,33 @@ def test_DEPRECATED_integration_model_train_upload_full_cycle(): sentences = ["First test sentence", "Second test sentence"] embedding_result = ml_client.generate_embedding(model_id, sentences) print(embedding_result) - assert len(embedding_result.get("inference_results")) == 2 except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when generating embedding: {ex}" + pytest.fail(f"Exception occurred when generating embedding: {ex}") + else: + assert len(embedding_result.get("inference_results")) == 2 try: delete_task_obj = ml_client.delete_task(task_id) - assert delete_task_obj.get("result") == "deleted" except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when deleting task: {ex}" + pytest.fail(f"Exception occurred when deleting task: {ex}") + else: + assert delete_task_obj.get("result") == "deleted" try: ml_client.unload_model(model_id) ml_model_status = ml_client.get_model_info(model_id) - assert ml_model_status.get("model_state") != "UNDEPLOY_FAILED" except Exception as ex: # noqa: E722 - assert ( - False - ), f"Exception occurred when pretrained model undeployment : {ex}" + pytest.fail( + f"Exception occurred when pretrained model undeployment : {ex}" + ) + else: + assert ml_model_status.get("model_state") != "UNDEPLOY_FAILED" try: delete_model_obj = ml_client.delete_model(model_id) assert delete_model_obj.get("result") == "deleted" except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when deleting model: {ex}" + pytest.fail(f"Exception occurred when deleting model: {ex}") def test_integration_model_train_register_full_cycle(): @@ -400,8 +408,8 @@ def test_integration_model_train_register_full_cycle(): deploy_model=True, isVerbose=True, ) - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred during first model registration: {ex}" + except Exception as ex: + pytest.fail(f"Exception occurred during first model registration: {ex}") try: model_id = ml_client.register_model( @@ -412,27 +420,29 @@ def test_integration_model_train_register_full_cycle(): ) print("Model_id:", model_id) except Exception as ex: # noqa: E722 - assert False, f"Exception occurred during second model registration: {ex}" + pytest.fail(f"Exception occurred during second model registration: {ex}") if model_id: try: ml_load_status = ml_client.deploy_model( model_id, wait_until_deployed=False ) + except Exception as ex: + pytest.fail(f"Exception occurred during model deployment: {ex}") + else: task_id = ml_load_status.get("task_id") assert task_id != "" or task_id is not None ml_model_status = ml_client.get_model_info(model_id) assert ml_model_status.get("model_state") != "DEPLOY_FAILED" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred during model deployment: {ex}" try: ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail(f"Exception occurred when getting model info: {ex}") + else: assert ml_model_status.get("model_format") == "TORCH_SCRIPT" assert ml_model_status.get("algorithm") == "TEXT_EMBEDDING" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when getting model info: {ex}" if task_id: ml_task_status = None @@ -440,11 +450,12 @@ def test_integration_model_train_register_full_cycle(): ml_task_status = ml_client.get_task_info( task_id, wait_until_task_done=True ) + except Exception as ex: # noqa: E722 + pytest.fail(f"Exception occurred in pulling task info: {ex}") + else: assert ml_task_status.get("task_type") == "DEPLOY_MODEL" print("State:", ml_task_status.get("state")) assert ml_task_status.get("state") != "FAILED" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred in pulling task info: {ex}" # This is test is being flaky. Sometimes the test is passing and sometimes showing 500 error # due to memory circuit breaker. @@ -453,126 +464,120 @@ def test_integration_model_train_register_full_cycle(): sentences = ["First test sentence", "Second test sentence"] embedding_result = ml_client.generate_embedding(model_id, sentences) print(embedding_result) + except Exception as ex: + pytest.fail( + f"Exception occurred when generating sentence embedding: {ex}" + ) + else: assert len(embedding_result.get("inference_results")) == 2 - except Exception as ex: # noqa: E722 - assert ( - False - ), f"Exception occurred when generating sentence embedding: {ex}" try: delete_task_obj = ml_client.delete_task(task_id) + except Exception as ex: + pytest.fail(f"Exception occurred when deleting task: {ex}") + else: assert delete_task_obj.get("result") == "deleted" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred when deleting task: {ex}" try: ml_client.undeploy_model(model_id) ml_model_status = ml_client.get_model_info(model_id) + except Exception as ex: + pytest.fail(f"Exception occurred during model undeployment : {ex}") + else: assert ml_model_status.get("model_state") != "UNDEPLOY_FAILED" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred during model undeployment : {ex}" try: delete_model_obj = ml_client.delete_model(model_id) + except Exception as ex: + pytest.fail(f"Exception occurred during model deletion : {ex}") + else: assert delete_model_obj.get("result") == "deleted" - except Exception as ex: # noqa: E722 - assert False, f"Exception occurred during model deletion : {ex}" def test_search(): # Search task cases - raised = False try: search_task_obj = ml_client.search_task( input_json='{"query": {"match_all": {}},"size": 1}' ) + except Exception as ex: + pytest.fail(f"Raised Exception in searching task. Exception info: {ex}") + else: assert search_task_obj["hits"]["hits"] != [] - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching task" - raised = False try: search_task_obj = ml_client.search_task( input_json={"query": {"match_all": {}}, "size": 1} ) + except Exception as ex: + pytest.fail(f"Raised Exception in searching task. Exception info: {ex}") + else: assert search_task_obj["hits"]["hits"] != [] - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching task" - raised = False try: search_task_obj = ml_client.search_task(input_json=15) + except Exception as ex: + pytest.fail(f"Raised Exception in searching task. Exception info: {ex}") + else: assert search_task_obj == "Invalid JSON object passed as argument." - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching task" - raised = False try: search_task_obj = ml_client.search_task(input_json="15") + except Exception as ex: + pytest.fail(f"Raised Exception in searching task. Exception info: {ex}") + else: assert search_task_obj == "Invalid JSON object passed as argument." - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching task" - raised = False try: search_task_obj = ml_client.search_task( input_json='{"query": {"match_all": {}},size: 1}' ) + except Exception as ex: + pytest.fail(f"Raised Exception in searching task. Exception info: {ex}") + else: assert search_task_obj == "Invalid JSON string passed as argument." - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching task" # Search model cases - raised = False try: search_model_obj = ml_client.search_model( input_json='{"query": {"match_all": {}},"size": 1}' ) + except Exception as ex: + pytest.fail(f"Raised Exception in searching model. Exception info: {ex}") + else: assert search_model_obj["hits"]["hits"] != [] - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching model" - raised = False try: search_model_obj = ml_client.search_model( input_json={"query": {"match_all": {}}, "size": 1} ) + except Exception as ex: + pytest.fail(f"Raised Exception in searching model. Exception info: {ex}") + else: assert search_model_obj["hits"]["hits"] != [] - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching model" - raised = False try: search_model_obj = ml_client.search_model(input_json=15) + except Exception as ex: + pytest.fail(f"Raised Exception in searching model. Exception info: {ex}") + else: assert search_model_obj == "Invalid JSON object passed as argument." - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching model" - raised = False try: search_model_obj = ml_client.search_model(input_json="15") + except Exception as ex: + pytest.fail(f"Raised Exception in searching model. Exception info: {ex}") + else: assert search_model_obj == "Invalid JSON object passed as argument." - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching model" - raised = False try: search_model_obj = ml_client.search_model( input_json='{"query": {"match_all": {}},size: 1}' ) + except Exception as ex: + pytest.fail(f"Raised Exception in searching model. Exception info: {ex}") + else: assert search_model_obj == "Invalid JSON string passed as argument." - except: # noqa: E722 - raised = True - assert raised == False, "Raised Exception in searching model" # Model Profile Tests. These tests will need some model train/predict run data. Hence, need