Skip to content

Commit

Permalink
feat: adding serving_container_grpc_ports parameter to Model.upload…
Browse files Browse the repository at this point in the history
…() method

PiperOrigin-RevId: 590350209
  • Loading branch information
vertex-sdk-bot authored and copybara-github committed Dec 12, 2023
1 parent 4347c9c commit 6a00ed7
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 0 deletions.
16 changes: 16 additions & 0 deletions google/cloud/aiplatform/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2977,6 +2977,7 @@ def upload(
serving_container_args: Optional[Sequence[str]] = None,
serving_container_environment_variables: Optional[Dict[str, str]] = None,
serving_container_ports: Optional[Sequence[int]] = None,
serving_container_grpc_ports: Optional[Sequence[int]] = None,
local_model: Optional["LocalModel"] = None,
instance_schema_uri: Optional[str] = None,
parameters_schema_uri: Optional[str] = None,
Expand Down Expand Up @@ -3083,6 +3084,14 @@ def upload(
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
serving_container_grpc_ports: Optional[Sequence[int]]=None,
Declaration of ports that are exposed by the container. Vertex AI sends gRPC
prediction requests that it receives to the first port on this list. Vertex
AI also sends liveness and health checks to this port.
If you do not specify this field, gRPC requests to the container will be
disabled.
Vertex AI does not use ports other than the first one listed. This field
corresponds to the `ports` field of the Kubernetes Containers v1 core API.
local_model (Optional[LocalModel]):
Optional. A LocalModel instance that includes a `serving_container_spec`.
If provided, the `serving_container_spec` of the LocalModel instance
Expand Down Expand Up @@ -3238,6 +3247,7 @@ def upload(

env = None
ports = None
grpc_ports = None
deployment_timeout = (
duration_pb2.Duration(seconds=serving_container_deployment_timeout)
if serving_container_deployment_timeout
Expand All @@ -3256,6 +3266,11 @@ def upload(
gca_model_compat.Port(container_port=port)
for port in serving_container_ports
]
if serving_container_grpc_ports:
grpc_ports = [
gca_model_compat.Port(container_port=port)
for port in serving_container_grpc_ports
]
if (
serving_container_startup_probe_exec
or serving_container_startup_probe_period_seconds
Expand Down Expand Up @@ -3293,6 +3308,7 @@ def upload(
args=serving_container_args,
env=env,
ports=ports,
grpc_ports=grpc_ports,
predict_route=serving_container_predict_route,
health_route=serving_container_health_route,
deployment_timeout=deployment_timeout,
Expand Down
16 changes: 16 additions & 0 deletions google/cloud/aiplatform/prediction/local_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def __init__(
serving_container_args: Optional[Sequence[str]] = None,
serving_container_environment_variables: Optional[Dict[str, str]] = None,
serving_container_ports: Optional[Sequence[int]] = None,
serving_container_grpc_ports: Optional[Sequence[int]] = None,
serving_container_deployment_timeout: Optional[int] = None,
serving_container_shared_memory_size_mb: Optional[int] = None,
serving_container_startup_probe_exec: Optional[Sequence[str]] = None,
Expand Down Expand Up @@ -110,6 +111,14 @@ def __init__(
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
serving_container_grpc_ports: Optional[Sequence[int]]=None,
Declaration of ports that are exposed by the container. Vertex AI sends gRPC
prediction requests that it receives to the first port on this list. Vertex
AI also sends liveness and health checks to this port.
If you do not specify this field, gRPC requests to the container will be
disabled.
Vertex AI does not use ports other than the first one listed. This field
corresponds to the `ports` field of the Kubernetes Containers v1 core API.
serving_container_deployment_timeout (int):
Optional. Deployment timeout in seconds.
serving_container_shared_memory_size_mb (int):
Expand Down Expand Up @@ -156,6 +165,7 @@ def __init__(

env = None
ports = None
grpc_ports = None
deployment_timeout = (
duration_pb2.Duration(seconds=serving_container_deployment_timeout)
if serving_container_deployment_timeout
Expand All @@ -174,6 +184,11 @@ def __init__(
gca_model_compat.Port(container_port=port)
for port in serving_container_ports
]
if serving_container_grpc_ports:
grpc_ports = [
gca_model_compat.Port(container_port=port)
for port in serving_container_grpc_ports
]
if (
serving_container_startup_probe_exec
or serving_container_startup_probe_period_seconds
Expand Down Expand Up @@ -211,6 +226,7 @@ def __init__(
args=serving_container_args,
env=env,
ports=ports,
grpc_ports=grpc_ports,
predict_route=serving_container_predict_route,
health_route=serving_container_health_route,
deployment_timeout=deployment_timeout,
Expand Down
8 changes: 8 additions & 0 deletions tests/unit/aiplatform/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@
"loss_fn": "mse",
}
_TEST_SERVING_CONTAINER_PORTS = [8888, 10000]
_TEST_SERVING_CONTAINER_GRPC_PORTS = [7777, 7000]
_TEST_SERVING_CONTAINER_DEPLOYMENT_TIMEOUT = 100
_TEST_SERVING_CONTAINER_SHARED_MEMORY_SIZE_MB = 1000
_TEST_SERVING_CONTAINER_STARTUP_PROBE_EXEC = ["a", "b"]
Expand Down Expand Up @@ -1606,6 +1607,7 @@ def test_upload_uploads_and_gets_model_with_all_args(
serving_container_args=_TEST_SERVING_CONTAINER_ARGS,
serving_container_environment_variables=_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
serving_container_ports=_TEST_SERVING_CONTAINER_PORTS,
serving_container_grpc_ports=_TEST_SERVING_CONTAINER_GRPC_PORTS,
explanation_metadata=_TEST_EXPLANATION_METADATA,
explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
labels=_TEST_LABEL,
Expand Down Expand Up @@ -1634,6 +1636,11 @@ def test_upload_uploads_and_gets_model_with_all_args(
for port in _TEST_SERVING_CONTAINER_PORTS
]

grpc_ports = [
gca_model.Port(container_port=port)
for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
]

deployment_timeout = duration_pb2.Duration(
seconds=_TEST_SERVING_CONTAINER_DEPLOYMENT_TIMEOUT
)
Expand Down Expand Up @@ -1662,6 +1669,7 @@ def test_upload_uploads_and_gets_model_with_all_args(
args=_TEST_SERVING_CONTAINER_ARGS,
env=env,
ports=ports,
grpc_ports=grpc_ports,
deployment_timeout=deployment_timeout,
shared_memory_size_mb=_TEST_SERVING_CONTAINER_SHARED_MEMORY_SIZE_MB,
startup_probe=startup_probe,
Expand Down
25 changes: 25 additions & 0 deletions tests/unit/aiplatform/test_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@
"loss_fn": "mse",
}
_TEST_SERVING_CONTAINER_PORTS = [8888, 10000]
_TEST_SERVING_CONTAINER_GRPC_PORTS = [7777, 7000]
_TEST_ID = "1028944691210842416"
_TEST_LABEL = {"team": "experimentation", "trial_id": "x435"}
_TEST_APPENDED_USER_AGENT = ["fake_user_agent"]
Expand Down Expand Up @@ -1112,6 +1113,10 @@ def test_init_with_serving_container_spec(self):
gca_model_compat.Port(container_port=port)
for port in _TEST_SERVING_CONTAINER_PORTS
]
grpc_ports = [
gca_model_compat.Port(container_port=port)
for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
]
container_spec = gca_model_compat.ModelContainerSpec(
image_uri=_TEST_SERVING_CONTAINER_IMAGE,
predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
Expand All @@ -1120,6 +1125,7 @@ def test_init_with_serving_container_spec(self):
args=_TEST_SERVING_CONTAINER_ARGS,
env=env,
ports=ports,
grpc_ports=grpc_ports,
)

local_model = LocalModel(
Expand All @@ -1139,6 +1145,9 @@ def test_init_with_serving_container_spec(self):
assert local_model.serving_container_spec.args == container_spec.args
assert local_model.serving_container_spec.env == container_spec.env
assert local_model.serving_container_spec.ports == container_spec.ports
assert (
local_model.serving_container_spec.grpc_ports == container_spec.grpc_ports
)

def test_init_with_serving_container_spec_but_not_image_uri_throws_exception(self):
env = [
Expand All @@ -1149,13 +1158,18 @@ def test_init_with_serving_container_spec_but_not_image_uri_throws_exception(sel
gca_model_compat.Port(container_port=port)
for port in _TEST_SERVING_CONTAINER_PORTS
]
grpc_ports = [
gca_model_compat.Port(container_port=port)
for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
]
container_spec = gca_model_compat.ModelContainerSpec(
predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
command=_TEST_SERVING_CONTAINER_COMMAND,
args=_TEST_SERVING_CONTAINER_ARGS,
env=env,
ports=ports,
grpc_ports=grpc_ports,
)
expected_message = "Image uri is required for the serving container spec to initialize a LocalModel instance."

Expand All @@ -1175,6 +1189,7 @@ def test_init_with_separate_args(self):
serving_container_args=_TEST_SERVING_CONTAINER_ARGS,
serving_container_environment_variables=_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
serving_container_ports=_TEST_SERVING_CONTAINER_PORTS,
serving_container_grpc_ports=_TEST_SERVING_CONTAINER_GRPC_PORTS,
)

env = [
Expand All @@ -1187,6 +1202,11 @@ def test_init_with_separate_args(self):
for port in _TEST_SERVING_CONTAINER_PORTS
]

grpc_ports = [
gca_model_compat.Port(container_port=port)
for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
]

container_spec = gca_model_compat.ModelContainerSpec(
image_uri=_TEST_SERVING_CONTAINER_IMAGE,
predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
Expand All @@ -1195,6 +1215,7 @@ def test_init_with_separate_args(self):
args=_TEST_SERVING_CONTAINER_ARGS,
env=env,
ports=ports,
grpc_ports=grpc_ports,
)

assert local_model.serving_container_spec.image_uri == container_spec.image_uri
Expand All @@ -1210,6 +1231,9 @@ def test_init_with_separate_args(self):
assert local_model.serving_container_spec.args == container_spec.args
assert local_model.serving_container_spec.env == container_spec.env
assert local_model.serving_container_spec.ports == container_spec.ports
assert (
local_model.serving_container_spec.grpc_ports == container_spec.grpc_ports
)

def test_init_with_separate_args_but_not_image_uri_throws_exception(self):
expected_message = "Serving container image uri is required to initialize a LocalModel instance."
Expand All @@ -1222,6 +1246,7 @@ def test_init_with_separate_args_but_not_image_uri_throws_exception(self):
serving_container_args=_TEST_SERVING_CONTAINER_ARGS,
serving_container_environment_variables=_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
serving_container_ports=_TEST_SERVING_CONTAINER_PORTS,
serving_container_grpc_ports=_TEST_SERVING_CONTAINER_GRPC_PORTS,
)

assert str(exception.value) == expected_message
Expand Down

0 comments on commit 6a00ed7

Please sign in to comment.