diff --git a/qa/L0_trt_error_propagation/trt_error_propagation_test.py b/qa/L0_trt_error_propagation/trt_error_propagation_test.py index 43a3b1cf78..300f8b9705 100644 --- a/qa/L0_trt_error_propagation/trt_error_propagation_test.py +++ b/qa/L0_trt_error_propagation/trt_error_propagation_test.py @@ -40,12 +40,17 @@ def test_invalid_trt_model(self): with self.assertRaises(InferenceServerException) as cm: self.__triton.load_model("invalid_plan_file") err_msg = str(cm.exception) - self.assertIn("Internal: unable to create TensorRT engine", err_msg, - "Caught an unexpected exception") - self.assertIn( - "Error Code 4: Internal Error (Engine deserialization failed.)", - err_msg, - "Detailed error message not propagated back to triton client") + # All 'expected_msg_parts' should be present in the 'err_msg' in order + expected_msg_parts = [ + "load failed for model", "version 1 is at UNAVAILABLE state: ", + "Internal: unable to create TensorRT engine: ", "Error Code ", + "Internal Error " + ] + for expected_msg_part in expected_msg_parts: + self.assertIn( + expected_msg_part, err_msg, + "Cannot find an expected part of error message") + _, err_msg = err_msg.split(expected_msg_part) def test_invalid_trt_model_autocomplete(self): with self.assertRaises(InferenceServerException) as cm: