Skip to content

Commit

Permalink
style
Browse files Browse the repository at this point in the history
  • Loading branch information
bojiang committed Dec 24, 2020
1 parent f3c6e7d commit 4055586
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 10 deletions.
14 changes: 7 additions & 7 deletions bentoml/service/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,14 @@ def validate_inference_api_name(api_name: str):
)


def validate_inference_api_route(api_route: str):
if re.findall(r"[?#]+|^(//)|^:", api_route):
def validate_inference_api_route(route: str):
if re.findall(r"[?#]+|^(//)|^:", route):
raise InvalidArgument(
"The path {} contains illegal url characters".format(api_route)
"The path {} contains illegal url characters".format(route)
)
if api_route in BENTOML_RESERVED_API_NAMES:
if route in BENTOML_RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API route: '{}' is reserved for infra endpoints".format(api_route)
"Reserved API route: '{}' is reserved for infra endpoints".format(route)
)


Expand Down Expand Up @@ -473,7 +473,7 @@ def _config_inference_apis(self):
):
if hasattr(function, "_is_api"):
api_name = getattr(function, "_api_name")
api_route = getattr(function, "_api_route", None)
route = getattr(function, "_api_route", None)
api_doc = getattr(function, "_api_doc")
input_adapter = getattr(function, "_input_adapter")
output_adapter = getattr(function, "_output_adapter")
Expand All @@ -495,7 +495,7 @@ def _config_inference_apis(self):
mb_max_latency=mb_max_latency,
mb_max_batch_size=mb_max_batch_size,
batch=batch,
api_route=api_route,
route=route,
)
)

Expand Down
4 changes: 2 additions & 2 deletions bentoml/service/inference_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
mb_max_latency=10000,
mb_max_batch_size=1000,
batch=False,
api_route=None,
route=None,
):
"""
:param service: ref to service containing this API
Expand Down Expand Up @@ -82,7 +82,7 @@ def __init__(
self.mb_max_latency = mb_max_latency
self.mb_max_batch_size = mb_max_batch_size
self.batch = batch
self.route = name if api_route is None else api_route
self.route = name if route is None else route

if not self.input_adapter.BATCH_MODE_SUPPORTED and batch:
raise BentoMLConfigException(
Expand Down
2 changes: 1 addition & 1 deletion tests/server/test_model_api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_api_function_route(bento_service, img_file):

response = test_client.post("/v1!@$%^&*()_-+=[]\\|;:,./predict", data='{"a": 1}',)
assert 200 == response.status_code
assert '{"a": 1}' == str(response.data)
assert '{"a": 1}' == response.data.decode()

assert "predict_dataframe" in index_list
data = [{"col1": 10}, {"col1": 20}]
Expand Down

0 comments on commit 4055586

Please sign in to comment.