Skip to content
This repository has been archived by the owner on Sep 19, 2024. It is now read-only.

Commit

Permalink
🦉 Updates from OwlBot post-processor
Browse files Browse the repository at this point in the history
  • Loading branch information
gcf-owl-bot[bot] committed Jan 12, 2024
1 parent 2ca1fd4 commit 24299c0
Show file tree
Hide file tree
Showing 1,982 changed files with 829 additions and 838,156 deletions.
10 changes: 10 additions & 0 deletions google/cloud/aiplatform_v1beta1/gapic_metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -1468,6 +1468,11 @@
"methods": [
"compute_tokens"
]
},
"CountTokens": {
"methods": [
"count_tokens"
]
}
}
},
Expand All @@ -1478,6 +1483,11 @@
"methods": [
"compute_tokens"
]
},
"CountTokens": {
"methods": [
"count_tokens"
]
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ async def sample_create_feature_online_store():
parent (:class:`str`):
Required. The resource name of the Location to create
FeatureOnlineStores. Format:
``projects/{project}/locations/{location}'``
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ def sample_create_feature_online_store():
parent (str):
Required. The resource name of the Location to create
FeatureOnlineStores. Format:
``projects/{project}/locations/{location}'``
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore

from google.cloud.aiplatform_v1beta1.types import llm_utility_service
from google.cloud.aiplatform_v1beta1.types import prediction_service
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
Expand Down Expand Up @@ -217,6 +218,130 @@ def __init__(
client_info=client_info,
)

async def count_tokens(
self,
request: Optional[Union[prediction_service.CountTokensRequest, dict]] = None,
*,
endpoint: Optional[str] = None,
instances: Optional[MutableSequence[struct_pb2.Value]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> prediction_service.CountTokensResponse:
r"""Perform a token counting.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1beta1
async def sample_count_tokens():
# Create a client
client = aiplatform_v1beta1.LlmUtilityServiceAsyncClient()
# Initialize request argument(s)
instances = aiplatform_v1beta1.Value()
instances.null_value = "NULL_VALUE"
contents = aiplatform_v1beta1.Content()
contents.parts.text = "text_value"
request = aiplatform_v1beta1.CountTokensRequest(
endpoint="endpoint_value",
model="model_value",
instances=instances,
contents=contents,
)
# Make the request
response = await client.count_tokens(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.aiplatform_v1beta1.types.CountTokensRequest, dict]]):
The request object. Request message for
[PredictionService.CountTokens][google.cloud.aiplatform.v1beta1.PredictionService.CountTokens].
endpoint (:class:`str`):
Required. The name of the Endpoint requested to perform
token counting. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
instances (:class:`MutableSequence[google.protobuf.struct_pb2.Value]`):
Required. The instances that are the
input to token counting call. Schema is
identical to the prediction schema of
the underlying model.
This corresponds to the ``instances`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.CountTokensResponse:
Response message for
[PredictionService.CountTokens][google.cloud.aiplatform.v1beta1.PredictionService.CountTokens].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, instances])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

request = prediction_service.CountTokensRequest(request)

# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if instances:
request.instances.extend(instances)

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.count_tokens,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)

# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)

# Done; return the response.
return response

async def compute_tokens(
self,
request: Optional[Union[llm_utility_service.ComputeTokensRequest, dict]] = None,
Expand Down
125 changes: 125 additions & 0 deletions google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
OptionalRetry = Union[retries.Retry, object] # type: ignore

from google.cloud.aiplatform_v1beta1.types import llm_utility_service
from google.cloud.aiplatform_v1beta1.types import prediction_service
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
Expand Down Expand Up @@ -444,6 +445,130 @@ def __init__(
api_audience=client_options.api_audience,
)

def count_tokens(
self,
request: Optional[Union[prediction_service.CountTokensRequest, dict]] = None,
*,
endpoint: Optional[str] = None,
instances: Optional[MutableSequence[struct_pb2.Value]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> prediction_service.CountTokensResponse:
r"""Perform a token counting.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1beta1
def sample_count_tokens():
# Create a client
client = aiplatform_v1beta1.LlmUtilityServiceClient()
# Initialize request argument(s)
instances = aiplatform_v1beta1.Value()
instances.null_value = "NULL_VALUE"
contents = aiplatform_v1beta1.Content()
contents.parts.text = "text_value"
request = aiplatform_v1beta1.CountTokensRequest(
endpoint="endpoint_value",
model="model_value",
instances=instances,
contents=contents,
)
# Make the request
response = client.count_tokens(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CountTokensRequest, dict]):
The request object. Request message for
[PredictionService.CountTokens][google.cloud.aiplatform.v1beta1.PredictionService.CountTokens].
endpoint (str):
Required. The name of the Endpoint requested to perform
token counting. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
instances (MutableSequence[google.protobuf.struct_pb2.Value]):
Required. The instances that are the
input to token counting call. Schema is
identical to the prediction schema of
the underlying model.
This corresponds to the ``instances`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.CountTokensResponse:
Response message for
[PredictionService.CountTokens][google.cloud.aiplatform.v1beta1.PredictionService.CountTokens].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, instances])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

# Minor optimization to avoid making a copy if the user passes
# in a prediction_service.CountTokensRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, prediction_service.CountTokensRequest):
request = prediction_service.CountTokensRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if instances is not None:
request.instances.extend(instances)

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.count_tokens]

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)

# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)

# Done; return the response.
return response

def compute_tokens(
self,
request: Optional[Union[llm_utility_service.ComputeTokensRequest, dict]] = None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from google.oauth2 import service_account # type: ignore

from google.cloud.aiplatform_v1beta1.types import llm_utility_service
from google.cloud.aiplatform_v1beta1.types import prediction_service
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
Expand Down Expand Up @@ -127,6 +128,11 @@ def __init__(
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.count_tokens: gapic_v1.method.wrap_method(
self.count_tokens,
default_timeout=None,
client_info=client_info,
),
self.compute_tokens: gapic_v1.method.wrap_method(
self.compute_tokens,
default_timeout=None,
Expand All @@ -143,6 +149,18 @@ def close(self):
"""
raise NotImplementedError()

@property
def count_tokens(
self,
) -> Callable[
[prediction_service.CountTokensRequest],
Union[
prediction_service.CountTokensResponse,
Awaitable[prediction_service.CountTokensResponse],
],
]:
raise NotImplementedError()

@property
def compute_tokens(
self,
Expand Down
Loading

0 comments on commit 24299c0

Please sign in to comment.