Skip to content

Commit

Permalink
Check TensorRT error message more granularly (#5719)
Browse files Browse the repository at this point in the history
* Check TRT err msg more granularly

* Clarify source of error messages

* Consolidate tests for message parts
  • Loading branch information
kthui authored May 1, 2023
1 parent 23172b2 commit 5b4bbe9
Showing 1 changed file with 11 additions and 6 deletions.
17 changes: 11 additions & 6 deletions qa/L0_trt_error_propagation/trt_error_propagation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,17 @@ def test_invalid_trt_model(self):
with self.assertRaises(InferenceServerException) as cm:
self.__triton.load_model("invalid_plan_file")
err_msg = str(cm.exception)
self.assertIn("Internal: unable to create TensorRT engine", err_msg,
"Caught an unexpected exception")
self.assertIn(
"Error Code 4: Internal Error (Engine deserialization failed.)",
err_msg,
"Detailed error message not propagated back to triton client")
# All 'expected_msg_parts' should be present in the 'err_msg' in order
expected_msg_parts = [
"load failed for model", "version 1 is at UNAVAILABLE state: ",
"Internal: unable to create TensorRT engine: ", "Error Code ",
"Internal Error "
]
for expected_msg_part in expected_msg_parts:
self.assertIn(
expected_msg_part, err_msg,
"Cannot find an expected part of error message")
_, err_msg = err_msg.split(expected_msg_part)

def test_invalid_trt_model_autocomplete(self):
with self.assertRaises(InferenceServerException) as cm:
Expand Down

0 comments on commit 5b4bbe9

Please sign in to comment.