diff --git a/bentoml/service/__init__.py b/bentoml/service/__init__.py index 362e22e8558..244cc3a0b1c 100644 --- a/bentoml/service/__init__.py +++ b/bentoml/service/__init__.py @@ -66,14 +66,14 @@ def validate_inference_api_name(api_name: str): ) -def validate_inference_api_route(api_route: str): - if re.findall(r"[?#]+|^(//)|^:", api_route): +def validate_inference_api_route(route: str): + if re.findall(r"[?#]+|^(//)|^:", route): raise InvalidArgument( - "The path {} contains illegal url characters".format(api_route) + "The path {} contains illegal url characters".format(route) ) - if api_route in BENTOML_RESERVED_API_NAMES: + if route in BENTOML_RESERVED_API_NAMES: raise InvalidArgument( - "Reserved API route: '{}' is reserved for infra endpoints".format(api_route) + "Reserved API route: '{}' is reserved for infra endpoints".format(route) ) @@ -473,7 +473,7 @@ def _config_inference_apis(self): ): if hasattr(function, "_is_api"): api_name = getattr(function, "_api_name") - api_route = getattr(function, "_api_route", None) + route = getattr(function, "_api_route", None) api_doc = getattr(function, "_api_doc") input_adapter = getattr(function, "_input_adapter") output_adapter = getattr(function, "_output_adapter") @@ -495,7 +495,7 @@ def _config_inference_apis(self): mb_max_latency=mb_max_latency, mb_max_batch_size=mb_max_batch_size, batch=batch, - api_route=api_route, + route=route, ) ) diff --git a/bentoml/service/inference_api.py b/bentoml/service/inference_api.py index 6a11653f384..fa6f244ca0d 100644 --- a/bentoml/service/inference_api.py +++ b/bentoml/service/inference_api.py @@ -50,7 +50,7 @@ def __init__( mb_max_latency=10000, mb_max_batch_size=1000, batch=False, - api_route=None, + route=None, ): """ :param service: ref to service containing this API @@ -82,7 +82,7 @@ def __init__( self.mb_max_latency = mb_max_latency self.mb_max_batch_size = mb_max_batch_size self.batch = batch - self.route = name if api_route is None else api_route + self.route = name if route is None else route if not self.input_adapter.BATCH_MODE_SUPPORTED and batch: raise BentoMLConfigException( diff --git a/tests/server/test_model_api_server.py b/tests/server/test_model_api_server.py index ae3e05a7155..b01c6dc44b7 100644 --- a/tests/server/test_model_api_server.py +++ b/tests/server/test_model_api_server.py @@ -34,7 +34,7 @@ def test_api_function_route(bento_service, img_file): response = test_client.post("/v1!@$%^&*()_-+=[]\\|;:,./predict", data='{"a": 1}',) assert 200 == response.status_code - assert '{"a": 1}' == str(response.data) + assert '{"a": 1}' == response.data.decode() assert "predict_dataframe" in index_list data = [{"col1": 10}, {"col1": 20}]