diff --git a/googleapis/googleapis b/googleapis/googleapis
index 1dcb40c3..f2d78630 160000
--- a/googleapis/googleapis
+++ b/googleapis/googleapis
@@ -1 +1 @@
-Subproject commit 1dcb40c35ab57aa74c14ca4072f27c2df9e5f66c
+Subproject commit f2d78630d2c1d5e20041dfff963e093de9298e4d
diff --git a/googleapis/src/bytes/google.api.rs b/googleapis/src/bytes/google.api.rs
index dfe3c5c7..5e8c610e 100644
--- a/googleapis/src/bytes/google.api.rs
+++ b/googleapis/src/bytes/google.api.rs
@@ -871,6 +871,19 @@ pub enum FieldBehavior {
/// a non-empty value will be returned. The user will not be aware of what
/// non-empty value to expect.
NonEmptyDefault = 7,
+ /// Denotes that the field in a resource (a message annotated with
+ /// google.api.resource) is used in the resource name to uniquely identify the
+ /// resource. For AIP-compliant APIs, this should only be applied to the
+ /// `name` field on the resource.
+ ///
+ /// This behavior should not be applied to references to other resources within
+ /// the message.
+ ///
+ /// The identifier field of resources often have different field behavior
+ /// depending on the request it is embedded in (e.g. for Create methods name
+ /// is optional and unused, while for Update methods it is required). Instead
+ /// of method-specific annotations, only `IDENTIFIER` is required.
+ Identifier = 8,
}
impl FieldBehavior {
/// String value of the enum field names used in the ProtoBuf definition.
@@ -887,6 +900,7 @@ impl FieldBehavior {
FieldBehavior::Immutable => "IMMUTABLE",
FieldBehavior::UnorderedList => "UNORDERED_LIST",
FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT",
+ FieldBehavior::Identifier => "IDENTIFIER",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
@@ -900,6 +914,7 @@ impl FieldBehavior {
"IMMUTABLE" => Some(Self::Immutable),
"UNORDERED_LIST" => Some(Self::UnorderedList),
"NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault),
+ "IDENTIFIER" => Some(Self::Identifier),
_ => None,
}
}
diff --git a/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs b/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs
index cb817789..49551c68 100644
--- a/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs
+++ b/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs
@@ -1038,6 +1038,17 @@ pub struct AppendRowsRequest {
::prost::alloc::string::String,
i32,
>,
+ /// Optional. Default missing value interpretation for all columns in the
+ /// table. When a value is specified on an `AppendRowsRequest`, it is applied
+ /// to all requests on the connection from that point forward, until a
+ /// subsequent `AppendRowsRequest` sets it to a different value.
+ /// `missing_value_interpretation` can override
+ /// `default_missing_value_interpretation`. For example, if you want to write
+ /// `NULL` instead of using default values for some columns, you can set
+ /// `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+ /// time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+ #[prost(enumeration = "append_rows_request::MissingValueInterpretation", tag = "8")]
+ pub default_missing_value_interpretation: i32,
/// Input rows. The `writer_schema` field must be specified at the initial
/// request and currently, it will be ignored if specified in following
/// requests. Following requests must have data in the same format as the
@@ -1335,7 +1346,8 @@ pub mod storage_error {
InvalidCmekProvided = 11,
/// There is an encryption error while using customer-managed encryption key.
CmekEncryptionError = 12,
- /// Key Management Service (KMS) service returned an error.
+ /// Key Management Service (KMS) service returned an error, which can be
+ /// retried.
KmsServiceError = 13,
/// Permission denied while using customer-managed encryption key.
KmsPermissionDenied = 14,
diff --git a/googleapis/src/bytes/google.iam.v1.rs b/googleapis/src/bytes/google.iam.v1.rs
index 6ee5f360..655d3d77 100644
--- a/googleapis/src/bytes/google.iam.v1.rs
+++ b/googleapis/src/bytes/google.iam.v1.rs
@@ -43,6 +43,7 @@ pub struct GetPolicyOptions {
///
/// **JSON example:**
///
+/// ```
/// {
/// "bindings": [
/// {
@@ -70,9 +71,11 @@ pub struct GetPolicyOptions {
/// "etag": "BwWWja0YfJA=",
/// "version": 3
/// }
+/// ```
///
/// **YAML example:**
///
+/// ```
/// bindings:
/// - members:
/// - user:mike@example.com
@@ -89,6 +92,7 @@ pub struct GetPolicyOptions {
/// expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
/// etag: BwWWja0YfJA=
/// version: 3
+/// ```
///
/// For a description of IAM and its features, see the
/// [IAM documentation]().
@@ -160,7 +164,7 @@ pub struct Binding {
/// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
#[prost(string, tag = "1")]
pub role: ::prost::alloc::string::String,
- /// Specifies the principals requesting access for a Cloud Platform resource.
+ /// Specifies the principals requesting access for a Google Cloud resource.
/// `members` can have the following values:
///
/// * `allUsers`: A special identifier that represents anyone who is
@@ -270,8 +274,8 @@ pub struct Binding {
/// }
///
/// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
-/// logging. It also exempts jose@example.com from DATA_READ logging, and
-/// aliya@example.com from DATA_WRITE logging.
+/// logging. It also exempts `jose@example.com` from DATA_READ logging, and
+/// `aliya@example.com` from DATA_WRITE logging.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AuditConfig {
@@ -392,7 +396,7 @@ pub struct BindingDelta {
/// Required
#[prost(string, tag = "2")]
pub role: ::prost::alloc::string::String,
- /// A single identity requesting access for a Cloud Platform resource.
+ /// A single identity requesting access for a Google Cloud resource.
/// Follows the same format of Binding.members.
/// Required
#[prost(string, tag = "3")]
diff --git a/googleapis/src/bytes/google.pubsub.v1.rs b/googleapis/src/bytes/google.pubsub.v1.rs
index 494ce8fc..b7bda092 100644
--- a/googleapis/src/bytes/google.pubsub.v1.rs
+++ b/googleapis/src/bytes/google.pubsub.v1.rs
@@ -568,11 +568,12 @@ pub mod schema_service_client {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MessageStoragePolicy {
- /// A list of IDs of GCP regions where messages that are published to the topic
- /// may be persisted in storage. Messages published by publishers running in
- /// non-allowed GCP regions (or running outside of GCP altogether) will be
- /// routed for storage in one of the allowed regions. An empty list means that
- /// no regions are allowed, and is not a valid configuration.
+ /// A list of IDs of Google Cloud regions where messages that are published
+ /// to the topic may be persisted in storage. Messages published by publishers
+ /// running in non-allowed Google Cloud regions (or running outside of Google
+ /// Cloud altogether) are routed for storage in one of the allowed regions.
+ /// An empty list means that no regions are allowed, and is not a valid
+ /// configuration.
#[prost(string, repeated, tag = "1")]
pub allowed_persistence_regions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
@@ -1734,8 +1735,8 @@ pub struct CreateSnapshotRequest {
/// in the request, the server will assign a random name for this snapshot on
/// the same project as the subscription. Note that for REST API requests, you
/// must specify a name. See the [resource name
- /// rules](). Format
- /// is `projects/{project}/snapshots/{snap}`.
+ /// rules]().
+ /// Format is `projects/{project}/snapshots/{snap}`.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Required. The subscription whose backlog the snapshot retains.
@@ -1964,7 +1965,7 @@ pub mod publisher_client {
self
}
/// Creates the given topic with the given name. See the [resource name rules]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names).
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).
pub async fn create_topic(
&mut self,
request: impl tonic::IntoRequest,
@@ -2196,16 +2197,16 @@ pub mod subscriber_client {
self
}
/// Creates a subscription to a given topic. See the [resource name rules]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names).
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).
/// If the subscription already exists, returns `ALREADY_EXISTS`.
/// If the corresponding topic doesn't exist, returns `NOT_FOUND`.
///
/// If the name is not provided in the request, the server will assign a random
/// name for this subscription on the same project as the topic, conforming
/// to the [resource name format]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated
- /// name is populated in the returned Subscription object. Note that for REST
- /// API requests, you must specify a name in the request.
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The
+ /// generated name is populated in the returned Subscription object. Note that
+ /// for REST API requests, you must specify a name in the request.
pub async fn create_subscription(
&mut self,
request: impl tonic::IntoRequest,
@@ -2433,7 +2434,7 @@ pub mod subscriber_client {
/// the request, the server will assign a random
/// name for this snapshot on the same project as the subscription, conforming
/// to the [resource name format]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The
/// generated name is populated in the returned Snapshot object. Note that for
/// REST API requests, you must specify a name in the request.
pub async fn create_snapshot(
diff --git a/googleapis/src/bytes/google.spanner.admin.instance.v1.rs b/googleapis/src/bytes/google.spanner.admin.instance.v1.rs
index a4c3d54a..4c48c336 100644
--- a/googleapis/src/bytes/google.spanner.admin.instance.v1.rs
+++ b/googleapis/src/bytes/google.spanner.admin.instance.v1.rs
@@ -271,6 +271,90 @@ pub mod instance_config {
}
}
}
+/// Autoscaling config for an instance.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct AutoscalingConfig {
+ /// Required. Autoscaling limits for an instance.
+ #[prost(message, optional, tag = "1")]
+ pub autoscaling_limits: ::core::option::Option<
+ autoscaling_config::AutoscalingLimits,
+ >,
+ /// Required. The autoscaling targets for an instance.
+ #[prost(message, optional, tag = "2")]
+ pub autoscaling_targets: ::core::option::Option<
+ autoscaling_config::AutoscalingTargets,
+ >,
+}
+/// Nested message and enum types in `AutoscalingConfig`.
+pub mod autoscaling_config {
+ /// The autoscaling limits for the instance. Users can define the minimum and
+ /// maximum compute capacity allocated to the instance, and the autoscaler will
+ /// only scale within that range. Users can either use nodes or processing
+ /// units to specify the limits, but should use the same unit to set both the
+ /// min_limit and max_limit.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct AutoscalingLimits {
+ /// The minimum compute capacity for the instance.
+ #[prost(oneof = "autoscaling_limits::MinLimit", tags = "1, 2")]
+ pub min_limit: ::core::option::Option,
+ /// The maximum compute capacity for the instance. The maximum compute
+ /// capacity should be less than or equal to 10X the minimum compute
+ /// capacity.
+ #[prost(oneof = "autoscaling_limits::MaxLimit", tags = "3, 4")]
+ pub max_limit: ::core::option::Option,
+ }
+ /// Nested message and enum types in `AutoscalingLimits`.
+ pub mod autoscaling_limits {
+ /// The minimum compute capacity for the instance.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum MinLimit {
+ /// Minimum number of nodes allocated to the instance. If set, this number
+ /// should be greater than or equal to 1.
+ #[prost(int32, tag = "1")]
+ MinNodes(i32),
+ /// Minimum number of processing units allocated to the instance. If set,
+ /// this number should be multiples of 1000.
+ #[prost(int32, tag = "2")]
+ MinProcessingUnits(i32),
+ }
+ /// The maximum compute capacity for the instance. The maximum compute
+ /// capacity should be less than or equal to 10X the minimum compute
+ /// capacity.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum MaxLimit {
+ /// Maximum number of nodes allocated to the instance. If set, this number
+ /// should be greater than or equal to min_nodes.
+ #[prost(int32, tag = "3")]
+ MaxNodes(i32),
+ /// Maximum number of processing units allocated to the instance. If set,
+ /// this number should be multiples of 1000 and be greater than or equal to
+ /// min_processing_units.
+ #[prost(int32, tag = "4")]
+ MaxProcessingUnits(i32),
+ }
+ }
+ /// The autoscaling targets for an instance.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct AutoscalingTargets {
+ /// Required. The target high priority cpu utilization percentage that the
+ /// autoscaler should be trying to achieve for the instance. This number is
+ /// on a scale from 0 (no utilization) to 100 (full utilization). The valid
+ /// range is [10, 90] inclusive.
+ #[prost(int32, tag = "1")]
+ pub high_priority_cpu_utilization_percent: i32,
+ /// Required. The target storage utilization percentage that the autoscaler
+ /// should be trying to achieve for the instance. This number is on a scale
+ /// from 0 (no utilization) to 100 (full utilization). The valid range is
+ /// [10, 100] inclusive.
+ #[prost(int32, tag = "2")]
+ pub storage_utilization_percent: i32,
+ }
+}
/// An isolated set of Cloud Spanner resources on which databases can be hosted.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
@@ -292,8 +376,12 @@ pub struct Instance {
#[prost(string, tag = "3")]
pub display_name: ::prost::alloc::string::String,
/// The number of nodes allocated to this instance. At most one of either
- /// node_count or processing_units should be present in the message. This
- /// may be zero in API responses for instances that are not yet in state
+ /// node_count or processing_units should be present in the message.
+ ///
+ /// Users can set the node_count field to specify the target number of nodes
+ /// allocated to the instance.
+ ///
+ /// This may be zero in API responses for instances that are not yet in state
/// `READY`.
///
/// See [the
@@ -302,14 +390,25 @@ pub struct Instance {
#[prost(int32, tag = "5")]
pub node_count: i32,
/// The number of processing units allocated to this instance. At most one of
- /// processing_units or node_count should be present in the message. This may
- /// be zero in API responses for instances that are not yet in state `READY`.
+ /// processing_units or node_count should be present in the message.
+ ///
+ /// Users can set the processing_units field to specify the target number of
+ /// processing units allocated to the instance.
+ ///
+ /// This may be zero in API responses for instances that are not yet in state
+ /// `READY`.
///
/// See [the
/// documentation]()
/// for more information about nodes and processing units.
#[prost(int32, tag = "9")]
pub processing_units: i32,
+ /// Optional. The autoscaling configuration. Autoscaling is enabled if this
+ /// field is set. When autoscaling is enabled, node_count and processing_units
+ /// are treated as OUTPUT_ONLY fields and reflect the current compute capacity
+ /// allocated to the instance.
+ #[prost(message, optional, tag = "17")]
+ pub autoscaling_config: ::core::option::Option,
/// Output only. The current instance state. For
/// \[CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance\],
/// the state must be either omitted or set to `CREATING`. For
diff --git a/googleapis/src/bytes/google.spanner.v1.rs b/googleapis/src/bytes/google.spanner.v1.rs
index 4fab7ca5..f934b88d 100644
--- a/googleapis/src/bytes/google.spanner.v1.rs
+++ b/googleapis/src/bytes/google.spanner.v1.rs
@@ -1167,6 +1167,10 @@ pub enum TypeAnnotationCode {
/// \[JSON][google.spanner.v1.TypeCode.JSON\] when a client interacts with PostgreSQL-enabled
/// Spanner databases.
PgJsonb = 3,
+ /// PostgreSQL compatible OID type. This annotation can be used by a client
+ /// interacting with PostgreSQL-enabled Spanner database to specify that a
+ /// value should be treated using the semantics of the OID type.
+ PgOid = 4,
}
impl TypeAnnotationCode {
/// String value of the enum field names used in the ProtoBuf definition.
@@ -1178,6 +1182,7 @@ impl TypeAnnotationCode {
TypeAnnotationCode::Unspecified => "TYPE_ANNOTATION_CODE_UNSPECIFIED",
TypeAnnotationCode::PgNumeric => "PG_NUMERIC",
TypeAnnotationCode::PgJsonb => "PG_JSONB",
+ TypeAnnotationCode::PgOid => "PG_OID",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
@@ -1186,6 +1191,7 @@ impl TypeAnnotationCode {
"TYPE_ANNOTATION_CODE_UNSPECIFIED" => Some(Self::Unspecified),
"PG_NUMERIC" => Some(Self::PgNumeric),
"PG_JSONB" => Some(Self::PgJsonb),
+ "PG_OID" => Some(Self::PgOid),
_ => None,
}
}
@@ -1402,7 +1408,8 @@ pub struct CreateSessionRequest {
#[prost(message, optional, tag = "2")]
pub session: ::core::option::Option,
}
-/// The request for \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
+/// The request for
+/// \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCreateSessionsRequest {
@@ -1416,11 +1423,13 @@ pub struct BatchCreateSessionsRequest {
/// The API may return fewer than the requested number of sessions. If a
/// specific number of sessions are desired, the client can make additional
/// calls to BatchCreateSessions (adjusting
- /// \[session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count\] as necessary).
+ /// \[session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count\]
+ /// as necessary).
#[prost(int32, tag = "3")]
pub session_count: i32,
}
-/// The response for \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
+/// The response for
+/// \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCreateSessionsResponse {
@@ -1480,7 +1489,8 @@ pub struct ListSessionsRequest {
#[prost(int32, tag = "2")]
pub page_size: i32,
/// If non-empty, `page_token` should contain a
- /// \[next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token\] from a previous
+ /// \[next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token\]
+ /// from a previous
/// \[ListSessionsResponse][google.spanner.v1.ListSessionsResponse\].
#[prost(string, tag = "3")]
pub page_token: ::prost::alloc::string::String,
@@ -1505,8 +1515,8 @@ pub struct ListSessionsResponse {
#[prost(message, repeated, tag = "1")]
pub sessions: ::prost::alloc::vec::Vec,
/// `next_page_token` can be sent in a subsequent
- /// \[ListSessions][google.spanner.v1.Spanner.ListSessions\] call to fetch more of the matching
- /// sessions.
+ /// \[ListSessions][google.spanner.v1.Spanner.ListSessions\] call to fetch more
+ /// of the matching sessions.
#[prost(string, tag = "2")]
pub next_page_token: ::prost::alloc::string::String,
}
@@ -1616,6 +1626,138 @@ pub mod request_options {
}
}
}
+/// The DirectedReadOptions can be used to indicate which replicas or regions
+/// should be used for non-transactional reads or queries.
+///
+/// DirectedReadOptions may only be specified for a read-only transaction,
+/// otherwise the API will return an `INVALID_ARGUMENT` error.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct DirectedReadOptions {
+ /// Required. Replicas indicates the order in which replicas should be
+ /// considered. At most one of either include_replicas or exclude_replicas
+ /// should be present in the message.
+ #[prost(oneof = "directed_read_options::Replicas", tags = "1, 2")]
+ pub replicas: ::core::option::Option,
+}
+/// Nested message and enum types in `DirectedReadOptions`.
+pub mod directed_read_options {
+ /// The directed read replica selector.
+ /// Callers must provide one or more of the following fields for replica
+ /// selection:
+ ///
+ /// * `location` - The location must be one of the regions within the
+ /// multi-region configuration of your database.
+ /// * `type` - The type of the replica.
+ ///
+ /// Some examples of using replica_selectors are:
+ ///
+ /// * `location:us-east1` --> The "us-east1" replica(s) of any available type
+ /// will be used to process the request.
+ /// * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest
+ /// . available location will be used to process the
+ /// request.
+ /// * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s)
+ /// in location "us-east1" will be used to process
+ /// the request.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct ReplicaSelection {
+ /// The location or region of the serving requests, e.g. "us-east1".
+ #[prost(string, tag = "1")]
+ pub location: ::prost::alloc::string::String,
+ /// The type of replica.
+ #[prost(enumeration = "replica_selection::Type", tag = "2")]
+ pub r#type: i32,
+ }
+ /// Nested message and enum types in `ReplicaSelection`.
+ pub mod replica_selection {
+ /// Indicates the type of replica.
+ #[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ Hash,
+ PartialOrd,
+ Ord,
+ ::prost::Enumeration
+ )]
+ #[repr(i32)]
+ pub enum Type {
+ /// Not specified.
+ Unspecified = 0,
+ /// Read-write replicas support both reads and writes.
+ ReadWrite = 1,
+ /// Read-only replicas only support reads (not writes).
+ ReadOnly = 2,
+ }
+ impl Type {
+ /// String value of the enum field names used in the ProtoBuf definition.
+ ///
+ /// The values are not transformed in any way and thus are considered stable
+ /// (if the ProtoBuf definition does not change) and safe for programmatic use.
+ pub fn as_str_name(&self) -> &'static str {
+ match self {
+ Type::Unspecified => "TYPE_UNSPECIFIED",
+ Type::ReadWrite => "READ_WRITE",
+ Type::ReadOnly => "READ_ONLY",
+ }
+ }
+ /// Creates an enum from field names used in the ProtoBuf definition.
+ pub fn from_str_name(value: &str) -> ::core::option::Option {
+ match value {
+ "TYPE_UNSPECIFIED" => Some(Self::Unspecified),
+ "READ_WRITE" => Some(Self::ReadWrite),
+ "READ_ONLY" => Some(Self::ReadOnly),
+ _ => None,
+ }
+ }
+ }
+ }
+ /// An IncludeReplicas contains a repeated set of ReplicaSelection which
+ /// indicates the order in which replicas should be considered.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct IncludeReplicas {
+ /// The directed read replica selector.
+ #[prost(message, repeated, tag = "1")]
+ pub replica_selections: ::prost::alloc::vec::Vec,
+ /// If true, Spanner will not route requests to a replica outside the
+ /// include_replicas list when all of the specified replicas are unavailable
+ /// or unhealthy. Default value is `false`.
+ #[prost(bool, tag = "2")]
+ pub auto_failover_disabled: bool,
+ }
+ /// An ExcludeReplicas contains a repeated set of ReplicaSelection that should
+ /// be excluded from serving requests.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct ExcludeReplicas {
+ /// The directed read replica selector.
+ #[prost(message, repeated, tag = "1")]
+ pub replica_selections: ::prost::alloc::vec::Vec,
+ }
+ /// Required. Replicas indicates the order in which replicas should be
+ /// considered. At most one of either include_replicas or exclude_replicas
+ /// should be present in the message.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum Replicas {
+ /// Include_replicas indicates the order of replicas (as they appear in
+ /// this list) to process the request. If auto_failover_disabled is set to
+ /// true and all replicas are exhausted without finding a healthy replica,
+ /// Spanner will wait for a replica in the list to become available, requests
+ /// may fail due to `DEADLINE_EXCEEDED` errors.
+ #[prost(message, tag = "1")]
+ IncludeReplicas(IncludeReplicas),
+ /// Exclude_replicas indicates that should be excluded from serving
+ /// requests. Spanner will not route requests to the replicas in this list.
+ #[prost(message, tag = "2")]
+ ExcludeReplicas(ExcludeReplicas),
+ }
+}
/// The request for \[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql\] and
/// \[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql\].
#[allow(clippy::derive_partial_eq_without_eq)]
@@ -1656,7 +1798,8 @@ pub struct ExecuteSqlRequest {
pub params: ::core::option::Option<::prost_types::Struct>,
/// It is not always possible for Cloud Spanner to infer the right SQL type
/// from a JSON value. For example, values of type `BYTES` and values
- /// of type `STRING` both appear in \[params][google.spanner.v1.ExecuteSqlRequest.params\] as JSON strings.
+ /// of type `STRING` both appear in
+ /// \[params][google.spanner.v1.ExecuteSqlRequest.params\] as JSON strings.
///
/// In these cases, `param_types` can be used to specify the exact
/// SQL type for some or all of the SQL statement parameters. See the
@@ -1666,15 +1809,18 @@ pub struct ExecuteSqlRequest {
pub param_types: ::std::collections::HashMap<::prost::alloc::string::String, Type>,
/// If this request is resuming a previously interrupted SQL statement
/// execution, `resume_token` should be copied from the last
- /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the interruption. Doing this
- /// enables the new SQL statement execution to resume where the last one left
- /// off. The rest of the request parameters must exactly match the
- /// request that yielded this token.
+ /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the
+ /// interruption. Doing this enables the new SQL statement execution to resume
+ /// where the last one left off. The rest of the request parameters must
+ /// exactly match the request that yielded this token.
#[prost(bytes = "bytes", tag = "6")]
pub resume_token: ::prost::bytes::Bytes,
/// Used to control the amount of debugging information returned in
- /// \[ResultSetStats][google.spanner.v1.ResultSetStats\]. If \[partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token\] is set, \[query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode\] can only
- /// be set to \[QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL\].
+ /// \[ResultSetStats][google.spanner.v1.ResultSetStats\]. If
+ /// \[partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token\] is
+ /// set, \[query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode\] can only
+ /// be set to
+ /// \[QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL\].
#[prost(enumeration = "execute_sql_request::QueryMode", tag = "7")]
pub query_mode: i32,
/// If present, results will be restricted to the specified partition
@@ -1701,11 +1847,14 @@ pub struct ExecuteSqlRequest {
/// Common options for this request.
#[prost(message, optional, tag = "11")]
pub request_options: ::core::option::Option,
+ /// Directed read options for this request.
+ #[prost(message, optional, tag = "15")]
+ pub directed_read_options: ::core::option::Option,
/// If this is for a partitioned query and this field is set to `true`, the
- /// request will be executed via Spanner independent compute resources.
+ /// request is executed with Spanner Data Boost independent compute resources.
///
/// If the field is set to `true` but the request does not set
- /// `partition_token`, the API will return an `INVALID_ARGUMENT` error.
+ /// `partition_token`, the API returns an `INVALID_ARGUMENT` error.
#[prost(bool, tag = "16")]
pub data_boost_enabled: bool,
}
@@ -1826,17 +1975,17 @@ pub struct ExecuteBatchDmlRequest {
/// transaction.
#[prost(message, optional, tag = "2")]
pub transaction: ::core::option::Option,
- /// Required. The list of statements to execute in this batch. Statements are executed
- /// serially, such that the effects of statement `i` are visible to statement
- /// `i+1`. Each statement must be a DML statement. Execution stops at the
- /// first failed statement; the remaining statements are not executed.
+ /// Required. The list of statements to execute in this batch. Statements are
+ /// executed serially, such that the effects of statement `i` are visible to
+ /// statement `i+1`. Each statement must be a DML statement. Execution stops at
+ /// the first failed statement; the remaining statements are not executed.
///
/// Callers must provide at least one statement.
#[prost(message, repeated, tag = "3")]
pub statements: ::prost::alloc::vec::Vec,
- /// Required. A per-transaction sequence number used to identify this request. This field
- /// makes each request idempotent such that if the request is received multiple
- /// times, at most one will succeed.
+ /// Required. A per-transaction sequence number used to identify this request.
+ /// This field makes each request idempotent such that if the request is
+ /// received multiple times, at most one will succeed.
///
/// The sequence number must be monotonically increasing within the
/// transaction. If a request arrives for the first time with an out-of-order
@@ -1873,7 +2022,9 @@ pub mod execute_batch_dml_request {
pub params: ::core::option::Option<::prost_types::Struct>,
/// It is not always possible for Cloud Spanner to infer the right SQL type
/// from a JSON value. For example, values of type `BYTES` and values
- /// of type `STRING` both appear in \[params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params\] as JSON strings.
+ /// of type `STRING` both appear in
+ /// \[params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params\] as
+ /// JSON strings.
///
/// In these cases, `param_types` can be used to specify the exact
/// SQL type for some or all of the SQL statement parameters. See the
@@ -1886,40 +2037,49 @@ pub mod execute_batch_dml_request {
>,
}
}
-/// The response for \[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml\]. Contains a list
-/// of \[ResultSet][google.spanner.v1.ResultSet\] messages, one for each DML statement that has successfully
-/// executed, in the same order as the statements in the request. If a statement
-/// fails, the status in the response body identifies the cause of the failure.
+/// The response for
+/// \[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml\]. Contains a list
+/// of \[ResultSet][google.spanner.v1.ResultSet\] messages, one for each DML
+/// statement that has successfully executed, in the same order as the statements
+/// in the request. If a statement fails, the status in the response body
+/// identifies the cause of the failure.
///
/// To check for DML statements that failed, use the following approach:
///
-/// 1. Check the status in the response message. The \[google.rpc.Code][google.rpc.Code\] enum
+/// 1. Check the status in the response message. The
+/// \[google.rpc.Code][google.rpc.Code\] enum
/// value `OK` indicates that all statements were executed successfully.
/// 2. If the status was not `OK`, check the number of result sets in the
-/// response. If the response contains `N` \[ResultSet][google.spanner.v1.ResultSet\] messages, then
-/// statement `N+1` in the request failed.
+/// response. If the response contains `N`
+/// \[ResultSet][google.spanner.v1.ResultSet\] messages, then statement `N+1` in
+/// the request failed.
///
/// Example 1:
///
/// * Request: 5 DML statements, all executed successfully.
-/// * Response: 5 \[ResultSet][google.spanner.v1.ResultSet\] messages, with the status `OK`.
+/// * Response: 5 \[ResultSet][google.spanner.v1.ResultSet\] messages, with the
+/// status `OK`.
///
/// Example 2:
///
/// * Request: 5 DML statements. The third statement has a syntax error.
-/// * Response: 2 \[ResultSet][google.spanner.v1.ResultSet\] messages, and a syntax error (`INVALID_ARGUMENT`)
-/// status. The number of \[ResultSet][google.spanner.v1.ResultSet\] messages indicates that the third
-/// statement failed, and the fourth and fifth statements were not executed.
+/// * Response: 2 \[ResultSet][google.spanner.v1.ResultSet\] messages, and a syntax
+/// error (`INVALID_ARGUMENT`)
+/// status. The number of \[ResultSet][google.spanner.v1.ResultSet\] messages
+/// indicates that the third statement failed, and the fourth and fifth
+/// statements were not executed.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExecuteBatchDmlResponse {
- /// One \[ResultSet][google.spanner.v1.ResultSet\] for each statement in the request that ran successfully,
- /// in the same order as the statements in the request. Each \[ResultSet][google.spanner.v1.ResultSet\] does
- /// not contain any rows. The \[ResultSetStats][google.spanner.v1.ResultSetStats\] in each \[ResultSet][google.spanner.v1.ResultSet\] contain
- /// the number of rows modified by the statement.
+ /// One \[ResultSet][google.spanner.v1.ResultSet\] for each statement in the
+ /// request that ran successfully, in the same order as the statements in the
+ /// request. Each \[ResultSet][google.spanner.v1.ResultSet\] does not contain any
+ /// rows. The \[ResultSetStats][google.spanner.v1.ResultSetStats\] in each
+ /// \[ResultSet][google.spanner.v1.ResultSet\] contain the number of rows
+ /// modified by the statement.
///
- /// Only the first \[ResultSet][google.spanner.v1.ResultSet\] in the response contains valid
- /// \[ResultSetMetadata][google.spanner.v1.ResultSetMetadata\].
+ /// Only the first \[ResultSet][google.spanner.v1.ResultSet\] in the response
+ /// contains valid \[ResultSetMetadata][google.spanner.v1.ResultSetMetadata\].
#[prost(message, repeated, tag = "1")]
pub result_sets: ::prost::alloc::vec::Vec,
/// If all DML statements are executed successfully, the status is `OK`.
@@ -1962,15 +2122,16 @@ pub struct PartitionQueryRequest {
/// transactions are not.
#[prost(message, optional, tag = "2")]
pub transaction: ::core::option::Option,
- /// Required. The query request to generate partitions for. The request will fail if
- /// the query is not root partitionable. The query plan of a root
- /// partitionable query has a single distributed union operator. A distributed
- /// union operator conceptually divides one or more tables into multiple
- /// splits, remotely evaluates a subquery independently on each split, and
- /// then unions all results.
+ /// Required. The query request to generate partitions for. The request will
+ /// fail if the query is not root partitionable. For a query to be root
+ /// partitionable, it needs to satisfy a few conditions. For example, the first
+ /// operator in the query execution plan must be a distributed union operator.
+ /// For more information about other conditions, see [Read data in
+ /// parallel]().
///
- /// This must not contain DML commands, such as INSERT, UPDATE, or
- /// DELETE. Use \[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql\] with a
+ /// The query request must not contain DML commands, such as INSERT, UPDATE, or
+ /// DELETE. Use
+ /// \[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql\] with a
/// PartitionedDml transaction for large, partition-friendly DML operations.
#[prost(string, tag = "3")]
pub sql: ::prost::alloc::string::String,
@@ -1990,7 +2151,8 @@ pub struct PartitionQueryRequest {
pub params: ::core::option::Option<::prost_types::Struct>,
/// It is not always possible for Cloud Spanner to infer the right SQL type
/// from a JSON value. For example, values of type `BYTES` and values
- /// of type `STRING` both appear in \[params][google.spanner.v1.PartitionQueryRequest.params\] as JSON strings.
+ /// of type `STRING` both appear in
+ /// \[params][google.spanner.v1.PartitionQueryRequest.params\] as JSON strings.
///
/// In these cases, `param_types` can be used to specify the exact
/// SQL type for some or all of the SQL query parameters. See the
@@ -2016,18 +2178,24 @@ pub struct PartitionReadRequest {
/// Required. The name of the table in the database to be read.
#[prost(string, tag = "3")]
pub table: ::prost::alloc::string::String,
- /// If non-empty, the name of an index on \[table][google.spanner.v1.PartitionReadRequest.table\]. This index is
- /// used instead of the table primary key when interpreting \[key_set][google.spanner.v1.PartitionReadRequest.key_set\]
- /// and sorting result rows. See \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] for further information.
+ /// If non-empty, the name of an index on
+ /// \[table][google.spanner.v1.PartitionReadRequest.table\]. This index is used
+ /// instead of the table primary key when interpreting
+ /// \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] and sorting
+ /// result rows. See \[key_set][google.spanner.v1.PartitionReadRequest.key_set\]
+ /// for further information.
#[prost(string, tag = "4")]
pub index: ::prost::alloc::string::String,
- /// The columns of \[table][google.spanner.v1.PartitionReadRequest.table\] to be returned for each row matching
- /// this request.
+ /// The columns of \[table][google.spanner.v1.PartitionReadRequest.table\] to be
+ /// returned for each row matching this request.
#[prost(string, repeated, tag = "5")]
pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Required. `key_set` identifies the rows to be yielded. `key_set` names the
- /// primary keys of the rows in \[table][google.spanner.v1.PartitionReadRequest.table\] to be yielded, unless \[index][google.spanner.v1.PartitionReadRequest.index\]
- /// is present. If \[index][google.spanner.v1.PartitionReadRequest.index\] is present, then \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] instead names
+ /// primary keys of the rows in
+ /// \[table][google.spanner.v1.PartitionReadRequest.table\] to be yielded, unless
+ /// \[index][google.spanner.v1.PartitionReadRequest.index\] is present. If
+ /// \[index][google.spanner.v1.PartitionReadRequest.index\] is present, then
+ /// \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] instead names
/// index keys in \[index][google.spanner.v1.PartitionReadRequest.index\].
///
/// It is not an error for the `key_set` to name rows that do not
@@ -2076,24 +2244,31 @@ pub struct ReadRequest {
/// Required. The name of the table in the database to be read.
#[prost(string, tag = "3")]
pub table: ::prost::alloc::string::String,
- /// If non-empty, the name of an index on \[table][google.spanner.v1.ReadRequest.table\]. This index is
- /// used instead of the table primary key when interpreting \[key_set][google.spanner.v1.ReadRequest.key_set\]
- /// and sorting result rows. See \[key_set][google.spanner.v1.ReadRequest.key_set\] for further information.
+ /// If non-empty, the name of an index on
+ /// \[table][google.spanner.v1.ReadRequest.table\]. This index is used instead of
+ /// the table primary key when interpreting
+ /// \[key_set][google.spanner.v1.ReadRequest.key_set\] and sorting result rows.
+ /// See \[key_set][google.spanner.v1.ReadRequest.key_set\] for further
+ /// information.
#[prost(string, tag = "4")]
pub index: ::prost::alloc::string::String,
- /// Required. The columns of \[table][google.spanner.v1.ReadRequest.table\] to be returned for each row matching
- /// this request.
+ /// Required. The columns of \[table][google.spanner.v1.ReadRequest.table\] to be
+ /// returned for each row matching this request.
#[prost(string, repeated, tag = "5")]
pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Required. `key_set` identifies the rows to be yielded. `key_set` names the
- /// primary keys of the rows in \[table][google.spanner.v1.ReadRequest.table\] to be yielded, unless \[index][google.spanner.v1.ReadRequest.index\]
- /// is present. If \[index][google.spanner.v1.ReadRequest.index\] is present, then \[key_set][google.spanner.v1.ReadRequest.key_set\] instead names
- /// index keys in \[index][google.spanner.v1.ReadRequest.index\].
+ /// primary keys of the rows in \[table][google.spanner.v1.ReadRequest.table\] to
+ /// be yielded, unless \[index][google.spanner.v1.ReadRequest.index\] is present.
+ /// If \[index][google.spanner.v1.ReadRequest.index\] is present, then
+ /// \[key_set][google.spanner.v1.ReadRequest.key_set\] instead names index keys
+ /// in \[index][google.spanner.v1.ReadRequest.index\].
///
- /// If the \[partition_token][google.spanner.v1.ReadRequest.partition_token\] field is empty, rows are yielded
- /// in table primary key order (if \[index][google.spanner.v1.ReadRequest.index\] is empty) or index key order
- /// (if \[index][google.spanner.v1.ReadRequest.index\] is non-empty). If the \[partition_token][google.spanner.v1.ReadRequest.partition_token\] field is not
- /// empty, rows will be yielded in an unspecified order.
+ /// If the \[partition_token][google.spanner.v1.ReadRequest.partition_token\]
+ /// field is empty, rows are yielded in table primary key order (if
+ /// \[index][google.spanner.v1.ReadRequest.index\] is empty) or index key order
+ /// (if \[index][google.spanner.v1.ReadRequest.index\] is non-empty). If the
+ /// \[partition_token][google.spanner.v1.ReadRequest.partition_token\] field is
+ /// not empty, rows will be yielded in an unspecified order.
///
/// It is not an error for the `key_set` to name rows that do not
/// exist in the database. Read yields nothing for nonexistent rows.
@@ -2106,9 +2281,9 @@ pub struct ReadRequest {
pub limit: i64,
/// If this request is resuming a previously interrupted read,
/// `resume_token` should be copied from the last
- /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the interruption. Doing this
- /// enables the new read to resume where the last read left off. The
- /// rest of the request parameters must exactly match the request
+ /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the
+ /// interruption. Doing this enables the new read to resume where the last read
+ /// left off. The rest of the request parameters must exactly match the request
/// that yielded this token.
#[prost(bytes = "bytes", tag = "9")]
pub resume_token: ::prost::bytes::Bytes,
@@ -2121,15 +2296,19 @@ pub struct ReadRequest {
/// Common options for this request.
#[prost(message, optional, tag = "11")]
pub request_options: ::core::option::Option,
+ /// Directed read options for this request.
+ #[prost(message, optional, tag = "14")]
+ pub directed_read_options: ::core::option::Option,
/// If this is for a partitioned read and this field is set to `true`, the
- /// request will be executed via Spanner independent compute resources.
+ /// request is executed with Spanner Data Boost independent compute resources.
///
/// If the field is set to `true` but the request does not set
- /// `partition_token`, the API will return an `INVALID_ARGUMENT` error.
+ /// `partition_token`, the API returns an `INVALID_ARGUMENT` error.
#[prost(bool, tag = "15")]
pub data_boost_enabled: bool,
}
-/// The request for \[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction\].
+/// The request for
+/// \[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction\].
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BeginTransactionRequest {
@@ -2160,8 +2339,8 @@ pub struct CommitRequest {
#[prost(message, repeated, tag = "4")]
pub mutations: ::prost::alloc::vec::Vec,
/// If `true`, then statistics related to the transaction will be included in
- /// the \[CommitResponse][google.spanner.v1.CommitResponse.commit_stats\]. Default value is
- /// `false`.
+ /// the \[CommitResponse][google.spanner.v1.CommitResponse.commit_stats\].
+ /// Default value is `false`.
#[prost(bool, tag = "5")]
pub return_commit_stats: bool,
/// Common options for this request.
@@ -2204,6 +2383,49 @@ pub struct RollbackRequest {
#[prost(bytes = "bytes", tag = "2")]
pub transaction_id: ::prost::bytes::Bytes,
}
+/// The request for \[BatchWrite][google.spanner.v1.Spanner.BatchWrite\].
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BatchWriteRequest {
+ /// Required. The session in which the batch request is to be run.
+ #[prost(string, tag = "1")]
+ pub session: ::prost::alloc::string::String,
+ /// Common options for this request.
+ #[prost(message, optional, tag = "3")]
+ pub request_options: ::core::option::Option,
+ /// Required. The groups of mutations to be applied.
+ #[prost(message, repeated, tag = "4")]
+ pub mutation_groups: ::prost::alloc::vec::Vec,
+}
+/// Nested message and enum types in `BatchWriteRequest`.
+pub mod batch_write_request {
+ /// A group of mutations to be committed together. Related mutations should be
+ /// placed in a group. For example, two mutations inserting rows with the same
+ /// primary key prefix in both parent and child tables are related.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct MutationGroup {
+ /// Required. The mutations in this group.
+ #[prost(message, repeated, tag = "1")]
+ pub mutations: ::prost::alloc::vec::Vec,
+ }
+}
+/// The result of applying a batch of mutations.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BatchWriteResponse {
+ /// The mutation groups applied in this batch. The values index into the
+ /// `mutation_groups` field in the corresponding `BatchWriteRequest`.
+ #[prost(int32, repeated, tag = "1")]
+ pub indexes: ::prost::alloc::vec::Vec,
+ /// An `OK` status indicates success. Any other status indicates a failure.
+ #[prost(message, optional, tag = "2")]
+ pub status: ::core::option::Option,
+ /// The commit timestamp of the transaction that applied this batch.
+ /// Present if `status` is `OK`, absent otherwise.
+ #[prost(message, optional, tag = "3")]
+ pub commit_timestamp: ::core::option::Option<::prost_types::Timestamp>,
+}
/// Generated client implementations.
pub mod spanner_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
@@ -2448,10 +2670,12 @@ pub mod spanner_client {
///
/// Operations inside read-write transactions might return `ABORTED`. If
/// this occurs, the application should restart the transaction from
- /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
+ /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
+ /// details.
///
/// Larger result sets can be fetched in streaming fashion by calling
- /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
+ /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
+ /// instead.
pub async fn execute_sql(
&mut self,
request: impl tonic::IntoRequest,
@@ -2474,11 +2698,11 @@ pub mod spanner_client {
.insert(GrpcMethod::new("google.spanner.v1.Spanner", "ExecuteSql"));
self.inner.unary(req, path, codec).await
}
- /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
- /// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
- /// is no limit on the size of the returned result set. However, no
- /// individual row in the result set can exceed 100 MiB, and no
- /// column value can exceed 10 MiB.
+ /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
+ /// result set as a stream. Unlike
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
+ /// the size of the returned result set. However, no individual row in the
+ /// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
pub async fn execute_streaming_sql(
&mut self,
request: impl tonic::IntoRequest,
@@ -2511,9 +2735,10 @@ pub mod spanner_client {
/// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
///
/// Statements are executed in sequential order. A request can succeed even if
- /// a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the
- /// response provides information about the statement that failed. Clients must
- /// inspect this field to determine whether an error occurred.
+ /// a statement fails. The
+ /// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
+ /// field in the response provides information about the statement that failed.
+ /// Clients must inspect this field to determine whether an error occurred.
///
/// Execution stops after the first failed statement; the remaining statements
/// are not executed.
@@ -2544,14 +2769,15 @@ pub mod spanner_client {
}
/// Reads rows from the database using key lookups and scans, as a
/// simple key/value style alternative to
- /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to
- /// return a result set larger than 10 MiB; if the read matches more
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
+ /// used to return a result set larger than 10 MiB; if the read matches more
/// data than that, the read fails with a `FAILED_PRECONDITION`
/// error.
///
/// Reads inside read-write transactions might return `ABORTED`. If
/// this occurs, the application should restart the transaction from
- /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
+ /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
+ /// details.
///
/// Larger result sets can be yielded in streaming fashion by calling
/// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
@@ -2577,9 +2803,9 @@ pub mod spanner_client {
.insert(GrpcMethod::new("google.spanner.v1.Spanner", "Read"));
self.inner.unary(req, path, codec).await
}
- /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
- /// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
- /// size of the returned result set. However, no individual row in
+ /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
+ /// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
+ /// limit on the size of the returned result set. However, no individual row in
/// the result set can exceed 100 MiB, and no column value can exceed
/// 10 MiB.
pub async fn streaming_read(
@@ -2608,7 +2834,8 @@ pub mod spanner_client {
self.inner.server_streaming(req, path, codec).await
}
/// Begins a new transaction. This step can often be skipped:
- /// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
+ /// [Read][google.spanner.v1.Spanner.Read],
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
/// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
/// side-effect.
pub async fn begin_transaction(
@@ -2673,8 +2900,9 @@ pub mod spanner_client {
}
/// Rolls back a transaction, releasing any locks it holds. It is a good
/// idea to call this for any transaction that includes one or more
- /// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
- /// ultimately decides not to commit.
+ /// [Read][google.spanner.v1.Spanner.Read] or
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
+ /// decides not to commit.
///
/// `Rollback` returns `OK` if it successfully aborts the transaction, the
/// transaction was already aborted, or the transaction is not
@@ -2703,10 +2931,11 @@ pub mod spanner_client {
}
/// Creates a set of partition tokens that can be used to execute a query
/// operation in parallel. Each of the returned partition tokens can be used
- /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset
- /// of the query result to read. The same session and read-only transaction
- /// must be used by the PartitionQueryRequest used to create the
- /// partition tokens and the ExecuteSqlRequests that use the partition tokens.
+ /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
+ /// specify a subset of the query result to read. The same session and
+ /// read-only transaction must be used by the PartitionQueryRequest used to
+ /// create the partition tokens and the ExecuteSqlRequests that use the
+ /// partition tokens.
///
/// Partition tokens become invalid when the session used to create them
/// is deleted, is idle for too long, begins a new transaction, or becomes too
@@ -2739,12 +2968,13 @@ pub mod spanner_client {
}
/// Creates a set of partition tokens that can be used to execute a read
/// operation in parallel. Each of the returned partition tokens can be used
- /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read
- /// result to read. The same session and read-only transaction must be used by
- /// the PartitionReadRequest used to create the partition tokens and the
- /// ReadRequests that use the partition tokens. There are no ordering
- /// guarantees on rows returned among the returned partition tokens, or even
- /// within each individual StreamingRead call issued with a partition_token.
+ /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
+ /// subset of the read result to read. The same session and read-only
+ /// transaction must be used by the PartitionReadRequest used to create the
+ /// partition tokens and the ReadRequests that use the partition tokens. There
+ /// are no ordering guarantees on rows returned among the returned partition
+ /// tokens, or even within each individual StreamingRead call issued with a
+ /// partition_token.
///
/// Partition tokens become invalid when the session used to create them
/// is deleted, is idle for too long, begins a new transaction, or becomes too
@@ -2775,5 +3005,45 @@ pub mod spanner_client {
.insert(GrpcMethod::new("google.spanner.v1.Spanner", "PartitionRead"));
self.inner.unary(req, path, codec).await
}
+ /// Batches the supplied mutation groups in a collection of efficient
+ /// transactions. All mutations in a group are committed atomically. However,
+ /// mutations across groups can be committed non-atomically in an unspecified
+ /// order and thus, they must be independent of each other. Partial failure is
+ /// possible, i.e., some groups may have been committed successfully, while
+ /// some may have failed. The results of individual batches are streamed into
+ /// the response as the batches are applied.
+ ///
+ /// BatchWrite requests are not replay protected, meaning that each mutation
+ /// group may be applied more than once. Replays of non-idempotent mutations
+ /// may have undesirable effects. For example, replays of an insert mutation
+ /// may produce an already exists error or if you use generated or commit
+ /// timestamp-based keys, it may result in additional rows being added to the
+ /// mutation's table. We recommend structuring your mutation groups to be
+ /// idempotent to avoid this issue.
+ pub async fn batch_write(
+ &mut self,
+ request: impl tonic::IntoRequest,
+ ) -> std::result::Result<
+ tonic::Response>,
+ tonic::Status,
+ > {
+ self.inner
+ .ready()
+ .await
+ .map_err(|e| {
+ tonic::Status::new(
+ tonic::Code::Unknown,
+ format!("Service was not ready: {}", e.into()),
+ )
+ })?;
+ let codec = tonic::codec::ProstCodec::default();
+ let path = http::uri::PathAndQuery::from_static(
+ "/google.spanner.v1.Spanner/BatchWrite",
+ );
+ let mut req = request.into_request();
+ req.extensions_mut()
+ .insert(GrpcMethod::new("google.spanner.v1.Spanner", "BatchWrite"));
+ self.inner.server_streaming(req, path, codec).await
+ }
}
}
diff --git a/googleapis/src/bytes/google.storage.v2.rs b/googleapis/src/bytes/google.storage.v2.rs
index f6bcd863..7e8cbab4 100644
--- a/googleapis/src/bytes/google.storage.v2.rs
+++ b/googleapis/src/bytes/google.storage.v2.rs
@@ -43,7 +43,7 @@ pub struct CreateBucketRequest {
pub parent: ::prost::alloc::string::String,
/// Properties of the new bucket being inserted.
/// The name of the bucket is specified in the `bucket_id` field. Populating
- /// `bucket.name` field will be ignored.
+ /// `bucket.name` field will result in an error.
/// The project of the bucket must be specified in the `bucket.project` field.
/// This field must be in `projects/{projectIdentifier}` format,
/// {projectIdentifier} can be the project ID or project number. The `parent`
@@ -326,6 +326,48 @@ pub struct DeleteObjectRequest {
#[prost(message, optional, tag = "10")]
pub common_object_request_params: ::core::option::Option,
}
+/// Message for restoring an object.
+/// `bucket`, `object`, and `generation` **must** be set.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct RestoreObjectRequest {
+ /// Required. Name of the bucket in which the object resides.
+ #[prost(string, tag = "1")]
+ pub bucket: ::prost::alloc::string::String,
+ /// Required. The name of the object to restore.
+ #[prost(string, tag = "2")]
+ pub object: ::prost::alloc::string::String,
+ /// Required. The specific revision of the object to restore.
+ #[prost(int64, tag = "3")]
+ pub generation: i64,
+ /// Makes the operation conditional on whether the object's current generation
+ /// matches the given value. Setting to 0 makes the operation succeed only if
+ /// there are no live versions of the object.
+ #[prost(int64, optional, tag = "4")]
+ pub if_generation_match: ::core::option::Option,
+ /// Makes the operation conditional on whether the object's live generation
+ /// does not match the given value. If no live object exists, the precondition
+ /// fails. Setting to 0 makes the operation succeed only if there is a live
+ /// version of the object.
+ #[prost(int64, optional, tag = "5")]
+ pub if_generation_not_match: ::core::option::Option,
+ /// Makes the operation conditional on whether the object's current
+ /// metageneration matches the given value.
+ #[prost(int64, optional, tag = "6")]
+ pub if_metageneration_match: ::core::option::Option,
+ /// Makes the operation conditional on whether the object's current
+ /// metageneration does not match the given value.
+ #[prost(int64, optional, tag = "7")]
+ pub if_metageneration_not_match: ::core::option::Option,
+ /// If false or unset, the bucket's default object ACL will be used.
+ /// If true, copy the source object's access controls.
+ /// Return an error if bucket has UBLA enabled.
+ #[prost(bool, optional, tag = "9")]
+ pub copy_source_acl: ::core::option::Option,
+ /// A set of parameters common to Storage API requests concerning an object.
+ #[prost(message, optional, tag = "8")]
+ pub common_object_request_params: ::core::option::Option,
+}
/// Message for canceling an in-progress resumable upload.
/// `upload_id` **must** be set.
#[allow(clippy::derive_partial_eq_without_eq)]
@@ -419,6 +461,9 @@ pub struct GetObjectRequest {
/// latest version, the default).
#[prost(int64, tag = "3")]
pub generation: i64,
+ /// If true, return the soft-deleted version of this object.
+ #[prost(bool, optional, tag = "11")]
+ pub soft_deleted: ::core::option::Option,
/// Makes the operation conditional on whether the object's current generation
/// matches the given value. Setting to 0 makes the operation succeed only if
/// there are no live versions of the object.
@@ -606,6 +651,113 @@ pub mod write_object_response {
Resource(super::Object),
}
}
+/// Request message for BidiWriteObject.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BidiWriteObjectRequest {
+ /// Required. The offset from the beginning of the object at which the data
+ /// should be written.
+ ///
+ /// In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ /// indicates the initial offset for the `Write()` call. The value **must** be
+ /// equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ /// return (0 if this is the first write to the object).
+ ///
+ /// On subsequent calls, this value **must** be no larger than the sum of the
+ /// first `write_offset` and the sizes of all `data` chunks sent previously on
+ /// this stream.
+ ///
+ /// An invalid value will cause an error.
+ #[prost(int64, tag = "3")]
+ pub write_offset: i64,
+ /// Checksums for the complete object. If the checksums computed by the service
+ /// don't match the specified checksums the call will fail. May only be
+ /// provided in the first or last request (either with first_message, or
+ /// finish_write set).
+ #[prost(message, optional, tag = "6")]
+ pub object_checksums: ::core::option::Option,
+ /// For each BidiWriteObjectRequest where state_lookup is `true` or the client
+ /// closes the stream, the service will send a BidiWriteObjectResponse
+ /// containing the current persisted size. The persisted size sent in responses
+ /// covers all the bytes the server has persisted thus far and can be used to
+ /// decide what data is safe for the client to drop. Note that the object's
+ /// current size reported by the BidiWriteObjectResponse may lag behind the
+ /// number of bytes written by the client.
+ #[prost(bool, tag = "7")]
+ pub state_lookup: bool,
+ /// Persists data written on the stream, up to and including the current
+ /// message, to permanent storage. This option should be used sparingly as it
+ /// may reduce performance. Ongoing writes will periodically be persisted on
+ /// the server even when `flush` is not set.
+ #[prost(bool, tag = "8")]
+ pub flush: bool,
+ /// If `true`, this indicates that the write is complete. Sending any
+ /// `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ /// will cause an error.
+ /// For a non-resumable write (where the upload_id was not set in the first
+ /// message), it is an error not to set this field in the final message of the
+ /// stream.
+ #[prost(bool, tag = "9")]
+ pub finish_write: bool,
+ /// A set of parameters common to Storage API requests concerning an object.
+ #[prost(message, optional, tag = "10")]
+ pub common_object_request_params: ::core::option::Option,
+ /// The first message of each stream should set one of the following.
+ #[prost(oneof = "bidi_write_object_request::FirstMessage", tags = "1, 2")]
+ pub first_message: ::core::option::Option,
+ /// A portion of the data for the object.
+ #[prost(oneof = "bidi_write_object_request::Data", tags = "4")]
+ pub data: ::core::option::Option,
+}
+/// Nested message and enum types in `BidiWriteObjectRequest`.
+pub mod bidi_write_object_request {
+ /// The first message of each stream should set one of the following.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum FirstMessage {
+ /// For resumable uploads. This should be the `upload_id` returned from a
+ /// call to `StartResumableWriteResponse`.
+ #[prost(string, tag = "1")]
+ UploadId(::prost::alloc::string::String),
+ /// For non-resumable uploads. Describes the overall upload, including the
+ /// destination bucket and object name, preconditions, etc.
+ #[prost(message, tag = "2")]
+ WriteObjectSpec(super::WriteObjectSpec),
+ }
+ /// A portion of the data for the object.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum Data {
+ /// The data to insert. If a crc32c checksum is provided that doesn't match
+ /// the checksum computed by the service, the request will fail.
+ #[prost(message, tag = "4")]
+ ChecksummedData(super::ChecksummedData),
+ }
+}
+/// Response message for BidiWriteObject.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BidiWriteObjectResponse {
+ /// The response will set one of the following.
+ #[prost(oneof = "bidi_write_object_response::WriteStatus", tags = "1, 2")]
+ pub write_status: ::core::option::Option,
+}
+/// Nested message and enum types in `BidiWriteObjectResponse`.
+pub mod bidi_write_object_response {
+ /// The response will set one of the following.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum WriteStatus {
+ /// The total number of bytes that have been processed for the given object
+ /// from all `WriteObject` calls. Only set if the upload has not finalized.
+ #[prost(int64, tag = "1")]
+ PersistedSize(i64),
+ /// A resource containing the metadata for the uploaded object. Only set if
+ /// the upload has finalized.
+ #[prost(message, tag = "2")]
+ Resource(super::Object),
+ }
+}
/// Request message for ListObjects.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
@@ -663,6 +815,16 @@ pub struct ListObjectsRequest {
/// lexicographic_end (exclusive).
#[prost(string, tag = "11")]
pub lexicographic_end: ::prost::alloc::string::String,
+ /// Optional. If true, only list all soft-deleted versions of the object.
+ /// Soft delete policy is required to set this option.
+ #[prost(bool, tag = "12")]
+ pub soft_deleted: bool,
+ /// Optional. Filter results to objects and prefixes that match this glob
+ /// pattern. See [List Objects Using
+ /// Glob]()
+ /// for the full syntax.
+ #[prost(string, tag = "14")]
+ pub match_glob: ::prost::alloc::string::String,
}
/// Request object for `QueryWriteStatus`.
#[allow(clippy::derive_partial_eq_without_eq)]
@@ -1234,8 +1396,6 @@ pub struct Bucket {
#[prost(string, tag = "3")]
pub project: ::prost::alloc::string::String,
/// Output only. The metadata generation of this bucket.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(int64, tag = "4")]
pub metageneration: i64,
/// Immutable. The location of the bucket. Object data for objects in the
@@ -1263,7 +1423,7 @@ pub struct Bucket {
/// replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region
/// buckets only. If rpo is not specified when the bucket is created, it
/// defaults to "DEFAULT". For more information, see
- ///
+ ///
#[prost(string, tag = "27")]
pub rpo: ::prost::alloc::string::String,
/// Access controls on the bucket.
@@ -1282,8 +1442,6 @@ pub struct Bucket {
#[prost(message, optional, tag = "10")]
pub lifecycle: ::core::option::Option,
/// Output only. The creation time of the bucket.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "11")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
/// The bucket's \[ Resource Sharing]
@@ -1291,8 +1449,6 @@ pub struct Bucket {
#[prost(message, repeated, tag = "12")]
pub cors: ::prost::alloc::vec::Vec,
/// Output only. The modification time of the bucket.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "13")]
pub update_time: ::core::option::Option<::prost_types::Timestamp>,
/// The default value for event-based hold on newly created objects in this
@@ -1364,6 +1520,10 @@ pub struct Bucket {
/// Autoclass feature will be disabled and have no effect on the bucket.
#[prost(message, optional, tag = "28")]
pub autoclass: ::core::option::Option,
+ /// Optional. The bucket's soft delete policy. The soft delete policy prevents
+ /// soft-deleted objects from being permanently deleted.
+ #[prost(message, optional, tag = "31")]
+ pub soft_delete_policy: ::core::option::Option,
}
/// Nested message and enum types in `Bucket`.
pub mod bucket {
@@ -1588,6 +1748,19 @@ pub mod bucket {
#[prost(message, optional, tag = "4")]
pub retention_duration: ::core::option::Option<::prost_types::Duration>,
}
+ /// Soft delete policy properties of a bucket.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct SoftDeletePolicy {
+ /// The period of time that soft-deleted objects in the bucket must be
+ /// retained and cannot be permanently deleted. The duration must be greater
+ /// than or equal to 7 days and less than 1 year.
+ #[prost(message, optional, tag = "1")]
+ pub retention_duration: ::core::option::Option<::prost_types::Duration>,
+ /// Time from which the policy was effective. This is service-provided.
+ #[prost(message, optional, tag = "2")]
+ pub effective_time: ::core::option::Option<::prost_types::Timestamp>,
+ }
/// Properties of a bucket related to versioning.
/// For more on Cloud Storage versioning, see
///
@@ -1641,6 +1814,19 @@ pub mod bucket {
/// to the bucket creation time.
#[prost(message, optional, tag = "2")]
pub toggle_time: ::core::option::Option<::prost_types::Timestamp>,
+ /// An object in an Autoclass bucket will eventually cool down to the
+ /// terminal storage class if there is no access to the object.
+ /// The only valid values are NEARLINE and ARCHIVE.
+ #[prost(string, optional, tag = "3")]
+ pub terminal_storage_class: ::core::option::Option<
+ ::prost::alloc::string::String,
+ >,
+ /// Output only. Latest instant at which the autoclass terminal storage class
+ /// was updated.
+ #[prost(message, optional, tag = "4")]
+ pub terminal_storage_class_update_time: ::core::option::Option<
+ ::prost_types::Timestamp,
+ >,
}
}
/// An access-control entry.
@@ -1701,7 +1887,7 @@ pub struct BucketAccessControl {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ChecksummedData {
- /// The data.
+ /// Optional. The data.
#[prost(bytes = "bytes", tag = "1")]
pub content: ::prost::bytes::Bytes,
/// If set, the CRC32C digest of the content field.
@@ -1837,15 +2023,13 @@ pub struct Object {
#[prost(string, tag = "27")]
pub etag: ::prost::alloc::string::String,
/// Immutable. The content generation of this object. Used for object
- /// versioning. Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// versioning.
#[prost(int64, tag = "3")]
pub generation: i64,
/// Output only. The version of the metadata for this generation of this
/// object. Used for preconditions and for detecting changes in metadata. A
/// metageneration number is only meaningful in the context of a particular
- /// generation of a particular object. Attempting to set or update this field
- /// will result in a \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// generation of a particular object.
#[prost(int64, tag = "4")]
pub metageneration: i64,
/// Storage class of the object.
@@ -1853,8 +2037,6 @@ pub struct Object {
pub storage_class: ::prost::alloc::string::String,
/// Output only. Content-Length of the object data in bytes, matching
/// \[ 7230 ยง3.3.2].
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(int64, tag = "6")]
pub size: i64,
/// Content-Encoding of the object data, matching
@@ -1881,8 +2063,7 @@ pub struct Object {
#[prost(string, tag = "11")]
pub content_language: ::prost::alloc::string::String,
/// Output only. If this object is noncurrent, this is the time when the object
- /// became noncurrent. Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// became noncurrent.
#[prost(message, optional, tag = "12")]
pub delete_time: ::core::option::Option<::prost_types::Timestamp>,
/// Content-Type of the object data, matching
@@ -1892,14 +2073,10 @@ pub struct Object {
#[prost(string, tag = "13")]
pub content_type: ::prost::alloc::string::String,
/// Output only. The creation time of the object.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "14")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
/// Output only. Number of underlying components that make up this object.
- /// Components are accumulated by compose operations. Attempting to set or
- /// update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// Components are accumulated by compose operations.
#[prost(int32, tag = "15")]
pub component_count: i32,
/// Output only. Hashes for the data part of this object. This field is used
@@ -1912,8 +2089,6 @@ pub struct Object {
/// such as modifying custom metadata, as well as changes made by Cloud Storage
/// on behalf of a requester, such as changing the storage class based on an
/// Object Lifecycle Configuration.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "17")]
pub update_time: ::core::option::Option<::prost_types::Timestamp>,
/// Cloud KMS Key used to encrypt this object, if the object is encrypted by
@@ -1922,8 +2097,6 @@ pub struct Object {
pub kms_key: ::prost::alloc::string::String,
/// Output only. The time at which the object's storage class was last changed.
/// When the object is initially created, it will be set to time_created.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "19")]
pub update_storage_class_time: ::core::option::Option<::prost_types::Timestamp>,
/// Whether an object is under temporary hold. While this flag is set to true,
@@ -1962,8 +2135,7 @@ pub struct Object {
#[prost(bool, optional, tag = "23")]
pub event_based_hold: ::core::option::Option,
/// Output only. The owner of the object. This will always be the uploader of
- /// the object. Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// the object.
#[prost(message, optional, tag = "24")]
pub owner: ::core::option::Option,
/// Metadata of Customer-Supplied Encryption Key, if the object is encrypted by
@@ -2320,8 +2492,8 @@ pub mod storage_client {
}
/// Gets the IAM policy for a specified bucket or object.
/// The `resource` field in the request should be
- /// projects/_/buckets/ for a bucket or
- /// projects/_/buckets//objects/ for an object.
+ /// `projects/_/buckets/{bucket}` for a bucket or
+ /// `projects/_/buckets/{bucket}/objects/{object}` for an object.
pub async fn get_iam_policy(
&mut self,
request: impl tonic::IntoRequest<
@@ -2351,8 +2523,8 @@ pub mod storage_client {
}
/// Updates an IAM policy for the specified bucket or object.
/// The `resource` field in the request should be
- /// projects/_/buckets/ for a bucket or
- /// projects/_/buckets//objects/ for an object.
+ /// `projects/_/buckets/{bucket}` for a bucket or
+ /// `projects/_/buckets/{bucket}/objects/{object}` for an object.
pub async fn set_iam_policy(
&mut self,
request: impl tonic::IntoRequest<
@@ -2383,8 +2555,8 @@ pub mod storage_client {
/// Tests a set of permissions on the given bucket or object to see which, if
/// any, are held by the caller.
/// The `resource` field in the request should be
- /// projects/_/buckets/ for a bucket or
- /// projects/_/buckets//objects/ for an object.
+ /// `projects/_/buckets/{bucket}` for a bucket or
+ /// `projects/_/buckets/{bucket}/objects/{object}` for an object.
pub async fn test_iam_permissions(
&mut self,
request: impl tonic::IntoRequest<
@@ -2610,6 +2782,29 @@ pub mod storage_client {
.insert(GrpcMethod::new("google.storage.v2.Storage", "DeleteObject"));
self.inner.unary(req, path, codec).await
}
+ /// Restores a soft-deleted object.
+ pub async fn restore_object(
+ &mut self,
+ request: impl tonic::IntoRequest,
+ ) -> std::result::Result, tonic::Status> {
+ self.inner
+ .ready()
+ .await
+ .map_err(|e| {
+ tonic::Status::new(
+ tonic::Code::Unknown,
+ format!("Service was not ready: {}", e.into()),
+ )
+ })?;
+ let codec = tonic::codec::ProstCodec::default();
+ let path = http::uri::PathAndQuery::from_static(
+ "/google.storage.v2.Storage/RestoreObject",
+ );
+ let mut req = request.into_request();
+ req.extensions_mut()
+ .insert(GrpcMethod::new("google.storage.v2.Storage", "RestoreObject"));
+ self.inner.unary(req, path, codec).await
+ }
/// Cancels an in-progress resumable upload.
///
/// Any attempts to write to the resumable upload after cancelling the upload
@@ -2774,6 +2969,9 @@ pub mod storage_client {
/// status, with a WriteObjectResponse containing the finalized object's
/// metadata.
///
+ /// Alternatively, the BidiWriteObject operation may be used to write an
+ /// object with controls over flushing and the ability to fetch the ability to
+ /// determine the current persisted size.
pub async fn write_object(
&mut self,
request: impl tonic::IntoStreamingRequest<
@@ -2801,6 +2999,48 @@ pub mod storage_client {
.insert(GrpcMethod::new("google.storage.v2.Storage", "WriteObject"));
self.inner.client_streaming(req, path, codec).await
}
+ /// Stores a new object and metadata.
+ ///
+ /// This is similar to the WriteObject call with the added support for
+ /// manual flushing of persisted state, and the ability to determine current
+ /// persisted size without closing the stream.
+ ///
+ /// The client may specify one or both of the `state_lookup` and `flush` fields
+ /// in each BidiWriteObjectRequest. If `flush` is specified, the data written
+ /// so far will be persisted to storage. If `state_lookup` is specified, the
+ /// service will respond with a BidiWriteObjectResponse that contains the
+ /// persisted size. If both `flush` and `state_lookup` are specified, the flush
+ /// will always occur before a `state_lookup`, so that both may be set in the
+ /// same request and the returned state will be the state of the object
+ /// post-flush. When the stream is closed, a BidiWriteObjectResponse will
+ /// always be sent to the client, regardless of the value of `state_lookup`.
+ pub async fn bidi_write_object(
+ &mut self,
+ request: impl tonic::IntoStreamingRequest<
+ Message = super::BidiWriteObjectRequest,
+ >,
+ ) -> std::result::Result<
+ tonic::Response>,
+ tonic::Status,
+ > {
+ self.inner
+ .ready()
+ .await
+ .map_err(|e| {
+ tonic::Status::new(
+ tonic::Code::Unknown,
+ format!("Service was not ready: {}", e.into()),
+ )
+ })?;
+ let codec = tonic::codec::ProstCodec::default();
+ let path = http::uri::PathAndQuery::from_static(
+ "/google.storage.v2.Storage/BidiWriteObject",
+ );
+ let mut req = request.into_streaming_request();
+ req.extensions_mut()
+ .insert(GrpcMethod::new("google.storage.v2.Storage", "BidiWriteObject"));
+ self.inner.streaming(req, path, codec).await
+ }
/// Retrieves a list of objects matching the criteria.
pub async fn list_objects(
&mut self,
diff --git a/googleapis/src/google.api.rs b/googleapis/src/google.api.rs
index dfe3c5c7..5e8c610e 100644
--- a/googleapis/src/google.api.rs
+++ b/googleapis/src/google.api.rs
@@ -871,6 +871,19 @@ pub enum FieldBehavior {
/// a non-empty value will be returned. The user will not be aware of what
/// non-empty value to expect.
NonEmptyDefault = 7,
+ /// Denotes that the field in a resource (a message annotated with
+ /// google.api.resource) is used in the resource name to uniquely identify the
+ /// resource. For AIP-compliant APIs, this should only be applied to the
+ /// `name` field on the resource.
+ ///
+ /// This behavior should not be applied to references to other resources within
+ /// the message.
+ ///
+ /// The identifier field of resources often have different field behavior
+ /// depending on the request it is embedded in (e.g. for Create methods name
+ /// is optional and unused, while for Update methods it is required). Instead
+ /// of method-specific annotations, only `IDENTIFIER` is required.
+ Identifier = 8,
}
impl FieldBehavior {
/// String value of the enum field names used in the ProtoBuf definition.
@@ -887,6 +900,7 @@ impl FieldBehavior {
FieldBehavior::Immutable => "IMMUTABLE",
FieldBehavior::UnorderedList => "UNORDERED_LIST",
FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT",
+ FieldBehavior::Identifier => "IDENTIFIER",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
@@ -900,6 +914,7 @@ impl FieldBehavior {
"IMMUTABLE" => Some(Self::Immutable),
"UNORDERED_LIST" => Some(Self::UnorderedList),
"NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault),
+ "IDENTIFIER" => Some(Self::Identifier),
_ => None,
}
}
diff --git a/googleapis/src/google.cloud.bigquery.storage.v1.rs b/googleapis/src/google.cloud.bigquery.storage.v1.rs
index 69a0b01d..24a95afa 100644
--- a/googleapis/src/google.cloud.bigquery.storage.v1.rs
+++ b/googleapis/src/google.cloud.bigquery.storage.v1.rs
@@ -981,6 +981,17 @@ pub struct AppendRowsRequest {
tag = "7"
)]
pub missing_value_interpretations: ::std::collections::HashMap<::prost::alloc::string::String, i32>,
+ /// Optional. Default missing value interpretation for all columns in the
+ /// table. When a value is specified on an `AppendRowsRequest`, it is applied
+ /// to all requests on the connection from that point forward, until a
+ /// subsequent `AppendRowsRequest` sets it to a different value.
+ /// `missing_value_interpretation` can override
+ /// `default_missing_value_interpretation`. For example, if you want to write
+ /// `NULL` instead of using default values for some columns, you can set
+ /// `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+ /// time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+ #[prost(enumeration = "append_rows_request::MissingValueInterpretation", tag = "8")]
+ pub default_missing_value_interpretation: i32,
/// Input rows. The `writer_schema` field must be specified at the initial
/// request and currently, it will be ignored if specified in following
/// requests. Following requests must have data in the same format as the
@@ -1256,7 +1267,8 @@ pub mod storage_error {
InvalidCmekProvided = 11,
/// There is an encryption error while using customer-managed encryption key.
CmekEncryptionError = 12,
- /// Key Management Service (KMS) service returned an error.
+ /// Key Management Service (KMS) service returned an error, which can be
+ /// retried.
KmsServiceError = 13,
/// Permission denied while using customer-managed encryption key.
KmsPermissionDenied = 14,
diff --git a/googleapis/src/google.iam.v1.rs b/googleapis/src/google.iam.v1.rs
index f1b3b8d6..91d834fb 100644
--- a/googleapis/src/google.iam.v1.rs
+++ b/googleapis/src/google.iam.v1.rs
@@ -43,6 +43,7 @@ pub struct GetPolicyOptions {
///
/// **JSON example:**
///
+/// ```
/// {
/// "bindings": [
/// {
@@ -70,9 +71,11 @@ pub struct GetPolicyOptions {
/// "etag": "BwWWja0YfJA=",
/// "version": 3
/// }
+/// ```
///
/// **YAML example:**
///
+/// ```
/// bindings:
/// - members:
/// - user:mike@example.com
@@ -89,6 +92,7 @@ pub struct GetPolicyOptions {
/// expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
/// etag: BwWWja0YfJA=
/// version: 3
+/// ```
///
/// For a description of IAM and its features, see the
/// [IAM documentation]().
@@ -160,7 +164,7 @@ pub struct Binding {
/// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
#[prost(string, tag = "1")]
pub role: ::prost::alloc::string::String,
- /// Specifies the principals requesting access for a Cloud Platform resource.
+ /// Specifies the principals requesting access for a Google Cloud resource.
/// `members` can have the following values:
///
/// * `allUsers`: A special identifier that represents anyone who is
@@ -270,8 +274,8 @@ pub struct Binding {
/// }
///
/// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
-/// logging. It also exempts jose@example.com from DATA_READ logging, and
-/// aliya@example.com from DATA_WRITE logging.
+/// logging. It also exempts `jose@example.com` from DATA_READ logging, and
+/// `aliya@example.com` from DATA_WRITE logging.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AuditConfig {
@@ -382,7 +386,7 @@ pub struct BindingDelta {
/// Required
#[prost(string, tag = "2")]
pub role: ::prost::alloc::string::String,
- /// A single identity requesting access for a Cloud Platform resource.
+ /// A single identity requesting access for a Google Cloud resource.
/// Follows the same format of Binding.members.
/// Required
#[prost(string, tag = "3")]
diff --git a/googleapis/src/google.pubsub.v1.rs b/googleapis/src/google.pubsub.v1.rs
index b599e283..0ff60264 100644
--- a/googleapis/src/google.pubsub.v1.rs
+++ b/googleapis/src/google.pubsub.v1.rs
@@ -568,11 +568,12 @@ pub mod schema_service_client {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MessageStoragePolicy {
- /// A list of IDs of GCP regions where messages that are published to the topic
- /// may be persisted in storage. Messages published by publishers running in
- /// non-allowed GCP regions (or running outside of GCP altogether) will be
- /// routed for storage in one of the allowed regions. An empty list means that
- /// no regions are allowed, and is not a valid configuration.
+ /// A list of IDs of Google Cloud regions where messages that are published
+ /// to the topic may be persisted in storage. Messages published by publishers
+ /// running in non-allowed Google Cloud regions (or running outside of Google
+ /// Cloud altogether) are routed for storage in one of the allowed regions.
+ /// An empty list means that no regions are allowed, and is not a valid
+ /// configuration.
#[prost(string, repeated, tag = "1")]
pub allowed_persistence_regions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
@@ -1734,8 +1735,8 @@ pub struct CreateSnapshotRequest {
/// in the request, the server will assign a random name for this snapshot on
/// the same project as the subscription. Note that for REST API requests, you
/// must specify a name. See the [resource name
- /// rules](). Format
- /// is `projects/{project}/snapshots/{snap}`.
+ /// rules]().
+ /// Format is `projects/{project}/snapshots/{snap}`.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Required. The subscription whose backlog the snapshot retains.
@@ -1964,7 +1965,7 @@ pub mod publisher_client {
self
}
/// Creates the given topic with the given name. See the [resource name rules]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names).
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).
pub async fn create_topic(
&mut self,
request: impl tonic::IntoRequest,
@@ -2196,16 +2197,16 @@ pub mod subscriber_client {
self
}
/// Creates a subscription to a given topic. See the [resource name rules]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names).
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).
/// If the subscription already exists, returns `ALREADY_EXISTS`.
/// If the corresponding topic doesn't exist, returns `NOT_FOUND`.
///
/// If the name is not provided in the request, the server will assign a random
/// name for this subscription on the same project as the topic, conforming
/// to the [resource name format]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated
- /// name is populated in the returned Subscription object. Note that for REST
- /// API requests, you must specify a name in the request.
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The
+ /// generated name is populated in the returned Subscription object. Note that
+ /// for REST API requests, you must specify a name in the request.
pub async fn create_subscription(
&mut self,
request: impl tonic::IntoRequest,
@@ -2433,7 +2434,7 @@ pub mod subscriber_client {
/// the request, the server will assign a random
/// name for this snapshot on the same project as the subscription, conforming
/// to the [resource name format]
- /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The
+ /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The
/// generated name is populated in the returned Snapshot object. Note that for
/// REST API requests, you must specify a name in the request.
pub async fn create_snapshot(
diff --git a/googleapis/src/google.spanner.admin.instance.v1.rs b/googleapis/src/google.spanner.admin.instance.v1.rs
index 9cb464bd..06712fa2 100644
--- a/googleapis/src/google.spanner.admin.instance.v1.rs
+++ b/googleapis/src/google.spanner.admin.instance.v1.rs
@@ -238,6 +238,86 @@ pub mod instance_config {
}
}
}
+/// Autoscaling config for an instance.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct AutoscalingConfig {
+ /// Required. Autoscaling limits for an instance.
+ #[prost(message, optional, tag = "1")]
+ pub autoscaling_limits: ::core::option::Option,
+ /// Required. The autoscaling targets for an instance.
+ #[prost(message, optional, tag = "2")]
+ pub autoscaling_targets: ::core::option::Option,
+}
+/// Nested message and enum types in `AutoscalingConfig`.
+pub mod autoscaling_config {
+ /// The autoscaling limits for the instance. Users can define the minimum and
+ /// maximum compute capacity allocated to the instance, and the autoscaler will
+ /// only scale within that range. Users can either use nodes or processing
+ /// units to specify the limits, but should use the same unit to set both the
+ /// min_limit and max_limit.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct AutoscalingLimits {
+ /// The minimum compute capacity for the instance.
+ #[prost(oneof = "autoscaling_limits::MinLimit", tags = "1, 2")]
+ pub min_limit: ::core::option::Option,
+ /// The maximum compute capacity for the instance. The maximum compute
+ /// capacity should be less than or equal to 10X the minimum compute
+ /// capacity.
+ #[prost(oneof = "autoscaling_limits::MaxLimit", tags = "3, 4")]
+ pub max_limit: ::core::option::Option,
+ }
+ /// Nested message and enum types in `AutoscalingLimits`.
+ pub mod autoscaling_limits {
+ /// The minimum compute capacity for the instance.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum MinLimit {
+ /// Minimum number of nodes allocated to the instance. If set, this number
+ /// should be greater than or equal to 1.
+ #[prost(int32, tag = "1")]
+ MinNodes(i32),
+ /// Minimum number of processing units allocated to the instance. If set,
+ /// this number should be multiples of 1000.
+ #[prost(int32, tag = "2")]
+ MinProcessingUnits(i32),
+ }
+ /// The maximum compute capacity for the instance. The maximum compute
+ /// capacity should be less than or equal to 10X the minimum compute
+ /// capacity.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum MaxLimit {
+ /// Maximum number of nodes allocated to the instance. If set, this number
+ /// should be greater than or equal to min_nodes.
+ #[prost(int32, tag = "3")]
+ MaxNodes(i32),
+ /// Maximum number of processing units allocated to the instance. If set,
+ /// this number should be multiples of 1000 and be greater than or equal to
+ /// min_processing_units.
+ #[prost(int32, tag = "4")]
+ MaxProcessingUnits(i32),
+ }
+ }
+ /// The autoscaling targets for an instance.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct AutoscalingTargets {
+ /// Required. The target high priority cpu utilization percentage that the
+ /// autoscaler should be trying to achieve for the instance. This number is
+ /// on a scale from 0 (no utilization) to 100 (full utilization). The valid
+ /// range is [10, 90] inclusive.
+ #[prost(int32, tag = "1")]
+ pub high_priority_cpu_utilization_percent: i32,
+ /// Required. The target storage utilization percentage that the autoscaler
+ /// should be trying to achieve for the instance. This number is on a scale
+ /// from 0 (no utilization) to 100 (full utilization). The valid range is
+ /// [10, 100] inclusive.
+ #[prost(int32, tag = "2")]
+ pub storage_utilization_percent: i32,
+ }
+}
/// An isolated set of Cloud Spanner resources on which databases can be hosted.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
@@ -259,8 +339,12 @@ pub struct Instance {
#[prost(string, tag = "3")]
pub display_name: ::prost::alloc::string::String,
/// The number of nodes allocated to this instance. At most one of either
- /// node_count or processing_units should be present in the message. This
- /// may be zero in API responses for instances that are not yet in state
+ /// node_count or processing_units should be present in the message.
+ ///
+ /// Users can set the node_count field to specify the target number of nodes
+ /// allocated to the instance.
+ ///
+ /// This may be zero in API responses for instances that are not yet in state
/// `READY`.
///
/// See [the
@@ -269,14 +353,25 @@ pub struct Instance {
#[prost(int32, tag = "5")]
pub node_count: i32,
/// The number of processing units allocated to this instance. At most one of
- /// processing_units or node_count should be present in the message. This may
- /// be zero in API responses for instances that are not yet in state `READY`.
+ /// processing_units or node_count should be present in the message.
+ ///
+ /// Users can set the processing_units field to specify the target number of
+ /// processing units allocated to the instance.
+ ///
+ /// This may be zero in API responses for instances that are not yet in state
+ /// `READY`.
///
/// See [the
/// documentation]()
/// for more information about nodes and processing units.
#[prost(int32, tag = "9")]
pub processing_units: i32,
+ /// Optional. The autoscaling configuration. Autoscaling is enabled if this
+ /// field is set. When autoscaling is enabled, node_count and processing_units
+ /// are treated as OUTPUT_ONLY fields and reflect the current compute capacity
+ /// allocated to the instance.
+ #[prost(message, optional, tag = "17")]
+ pub autoscaling_config: ::core::option::Option,
/// Output only. The current instance state. For
/// \[CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance\],
/// the state must be either omitted or set to `CREATING`. For
diff --git a/googleapis/src/google.spanner.v1.rs b/googleapis/src/google.spanner.v1.rs
index 4a9aa57c..95d3dbc8 100644
--- a/googleapis/src/google.spanner.v1.rs
+++ b/googleapis/src/google.spanner.v1.rs
@@ -1147,6 +1147,10 @@ pub enum TypeAnnotationCode {
/// \[JSON][google.spanner.v1.TypeCode.JSON\] when a client interacts with PostgreSQL-enabled
/// Spanner databases.
PgJsonb = 3,
+ /// PostgreSQL compatible OID type. This annotation can be used by a client
+ /// interacting with PostgreSQL-enabled Spanner database to specify that a
+ /// value should be treated using the semantics of the OID type.
+ PgOid = 4,
}
impl TypeAnnotationCode {
/// String value of the enum field names used in the ProtoBuf definition.
@@ -1158,6 +1162,7 @@ impl TypeAnnotationCode {
TypeAnnotationCode::Unspecified => "TYPE_ANNOTATION_CODE_UNSPECIFIED",
TypeAnnotationCode::PgNumeric => "PG_NUMERIC",
TypeAnnotationCode::PgJsonb => "PG_JSONB",
+ TypeAnnotationCode::PgOid => "PG_OID",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
@@ -1166,6 +1171,7 @@ impl TypeAnnotationCode {
"TYPE_ANNOTATION_CODE_UNSPECIFIED" => Some(Self::Unspecified),
"PG_NUMERIC" => Some(Self::PgNumeric),
"PG_JSONB" => Some(Self::PgJsonb),
+ "PG_OID" => Some(Self::PgOid),
_ => None,
}
}
@@ -1382,7 +1388,8 @@ pub struct CreateSessionRequest {
#[prost(message, optional, tag = "2")]
pub session: ::core::option::Option,
}
-/// The request for \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
+/// The request for
+/// \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCreateSessionsRequest {
@@ -1396,11 +1403,13 @@ pub struct BatchCreateSessionsRequest {
/// The API may return fewer than the requested number of sessions. If a
/// specific number of sessions are desired, the client can make additional
/// calls to BatchCreateSessions (adjusting
- /// \[session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count\] as necessary).
+ /// \[session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count\]
+ /// as necessary).
#[prost(int32, tag = "3")]
pub session_count: i32,
}
-/// The response for \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
+/// The response for
+/// \[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions\].
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCreateSessionsResponse {
@@ -1457,7 +1466,8 @@ pub struct ListSessionsRequest {
#[prost(int32, tag = "2")]
pub page_size: i32,
/// If non-empty, `page_token` should contain a
- /// \[next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token\] from a previous
+ /// \[next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token\]
+ /// from a previous
/// \[ListSessionsResponse][google.spanner.v1.ListSessionsResponse\].
#[prost(string, tag = "3")]
pub page_token: ::prost::alloc::string::String,
@@ -1482,8 +1492,8 @@ pub struct ListSessionsResponse {
#[prost(message, repeated, tag = "1")]
pub sessions: ::prost::alloc::vec::Vec,
/// `next_page_token` can be sent in a subsequent
- /// \[ListSessions][google.spanner.v1.Spanner.ListSessions\] call to fetch more of the matching
- /// sessions.
+ /// \[ListSessions][google.spanner.v1.Spanner.ListSessions\] call to fetch more
+ /// of the matching sessions.
#[prost(string, tag = "2")]
pub next_page_token: ::prost::alloc::string::String,
}
@@ -1583,6 +1593,128 @@ pub mod request_options {
}
}
}
+/// The DirectedReadOptions can be used to indicate which replicas or regions
+/// should be used for non-transactional reads or queries.
+///
+/// DirectedReadOptions may only be specified for a read-only transaction,
+/// otherwise the API will return an `INVALID_ARGUMENT` error.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct DirectedReadOptions {
+ /// Required. Replicas indicates the order in which replicas should be
+ /// considered. At most one of either include_replicas or exclude_replicas
+ /// should be present in the message.
+ #[prost(oneof = "directed_read_options::Replicas", tags = "1, 2")]
+ pub replicas: ::core::option::Option,
+}
+/// Nested message and enum types in `DirectedReadOptions`.
+pub mod directed_read_options {
+ /// The directed read replica selector.
+ /// Callers must provide one or more of the following fields for replica
+ /// selection:
+ ///
+ /// * `location` - The location must be one of the regions within the
+ /// multi-region configuration of your database.
+ /// * `type` - The type of the replica.
+ ///
+ /// Some examples of using replica_selectors are:
+ ///
+ /// * `location:us-east1` --> The "us-east1" replica(s) of any available type
+ /// will be used to process the request.
+ /// * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest
+ /// . available location will be used to process the
+ /// request.
+ /// * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s)
+ /// in location "us-east1" will be used to process
+ /// the request.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct ReplicaSelection {
+ /// The location or region of the serving requests, e.g. "us-east1".
+ #[prost(string, tag = "1")]
+ pub location: ::prost::alloc::string::String,
+ /// The type of replica.
+ #[prost(enumeration = "replica_selection::Type", tag = "2")]
+ pub r#type: i32,
+ }
+ /// Nested message and enum types in `ReplicaSelection`.
+ pub mod replica_selection {
+ /// Indicates the type of replica.
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
+ #[repr(i32)]
+ pub enum Type {
+ /// Not specified.
+ Unspecified = 0,
+ /// Read-write replicas support both reads and writes.
+ ReadWrite = 1,
+ /// Read-only replicas only support reads (not writes).
+ ReadOnly = 2,
+ }
+ impl Type {
+ /// String value of the enum field names used in the ProtoBuf definition.
+ ///
+ /// The values are not transformed in any way and thus are considered stable
+ /// (if the ProtoBuf definition does not change) and safe for programmatic use.
+ pub fn as_str_name(&self) -> &'static str {
+ match self {
+ Type::Unspecified => "TYPE_UNSPECIFIED",
+ Type::ReadWrite => "READ_WRITE",
+ Type::ReadOnly => "READ_ONLY",
+ }
+ }
+ /// Creates an enum from field names used in the ProtoBuf definition.
+ pub fn from_str_name(value: &str) -> ::core::option::Option {
+ match value {
+ "TYPE_UNSPECIFIED" => Some(Self::Unspecified),
+ "READ_WRITE" => Some(Self::ReadWrite),
+ "READ_ONLY" => Some(Self::ReadOnly),
+ _ => None,
+ }
+ }
+ }
+ }
+ /// An IncludeReplicas contains a repeated set of ReplicaSelection which
+ /// indicates the order in which replicas should be considered.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct IncludeReplicas {
+ /// The directed read replica selector.
+ #[prost(message, repeated, tag = "1")]
+ pub replica_selections: ::prost::alloc::vec::Vec,
+ /// If true, Spanner will not route requests to a replica outside the
+ /// include_replicas list when all of the specified replicas are unavailable
+ /// or unhealthy. Default value is `false`.
+ #[prost(bool, tag = "2")]
+ pub auto_failover_disabled: bool,
+ }
+ /// An ExcludeReplicas contains a repeated set of ReplicaSelection that should
+ /// be excluded from serving requests.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct ExcludeReplicas {
+ /// The directed read replica selector.
+ #[prost(message, repeated, tag = "1")]
+ pub replica_selections: ::prost::alloc::vec::Vec,
+ }
+ /// Required. Replicas indicates the order in which replicas should be
+ /// considered. At most one of either include_replicas or exclude_replicas
+ /// should be present in the message.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum Replicas {
+ /// Include_replicas indicates the order of replicas (as they appear in
+ /// this list) to process the request. If auto_failover_disabled is set to
+ /// true and all replicas are exhausted without finding a healthy replica,
+ /// Spanner will wait for a replica in the list to become available, requests
+ /// may fail due to `DEADLINE_EXCEEDED` errors.
+ #[prost(message, tag = "1")]
+ IncludeReplicas(IncludeReplicas),
+ /// Exclude_replicas indicates that should be excluded from serving
+ /// requests. Spanner will not route requests to the replicas in this list.
+ #[prost(message, tag = "2")]
+ ExcludeReplicas(ExcludeReplicas),
+ }
+}
/// The request for \[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql\] and
/// \[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql\].
#[allow(clippy::derive_partial_eq_without_eq)]
@@ -1623,7 +1755,8 @@ pub struct ExecuteSqlRequest {
pub params: ::core::option::Option<::prost_types::Struct>,
/// It is not always possible for Cloud Spanner to infer the right SQL type
/// from a JSON value. For example, values of type `BYTES` and values
- /// of type `STRING` both appear in \[params][google.spanner.v1.ExecuteSqlRequest.params\] as JSON strings.
+ /// of type `STRING` both appear in
+ /// \[params][google.spanner.v1.ExecuteSqlRequest.params\] as JSON strings.
///
/// In these cases, `param_types` can be used to specify the exact
/// SQL type for some or all of the SQL statement parameters. See the
@@ -1633,15 +1766,18 @@ pub struct ExecuteSqlRequest {
pub param_types: ::std::collections::HashMap<::prost::alloc::string::String, Type>,
/// If this request is resuming a previously interrupted SQL statement
/// execution, `resume_token` should be copied from the last
- /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the interruption. Doing this
- /// enables the new SQL statement execution to resume where the last one left
- /// off. The rest of the request parameters must exactly match the
- /// request that yielded this token.
+ /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the
+ /// interruption. Doing this enables the new SQL statement execution to resume
+ /// where the last one left off. The rest of the request parameters must
+ /// exactly match the request that yielded this token.
#[prost(bytes = "vec", tag = "6")]
pub resume_token: ::prost::alloc::vec::Vec,
/// Used to control the amount of debugging information returned in
- /// \[ResultSetStats][google.spanner.v1.ResultSetStats\]. If \[partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token\] is set, \[query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode\] can only
- /// be set to \[QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL\].
+ /// \[ResultSetStats][google.spanner.v1.ResultSetStats\]. If
+ /// \[partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token\] is
+ /// set, \[query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode\] can only
+ /// be set to
+ /// \[QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL\].
#[prost(enumeration = "execute_sql_request::QueryMode", tag = "7")]
pub query_mode: i32,
/// If present, results will be restricted to the specified partition
@@ -1668,11 +1804,14 @@ pub struct ExecuteSqlRequest {
/// Common options for this request.
#[prost(message, optional, tag = "11")]
pub request_options: ::core::option::Option,
+ /// Directed read options for this request.
+ #[prost(message, optional, tag = "15")]
+ pub directed_read_options: ::core::option::Option,
/// If this is for a partitioned query and this field is set to `true`, the
- /// request will be executed via Spanner independent compute resources.
+ /// request is executed with Spanner Data Boost independent compute resources.
///
/// If the field is set to `true` but the request does not set
- /// `partition_token`, the API will return an `INVALID_ARGUMENT` error.
+ /// `partition_token`, the API returns an `INVALID_ARGUMENT` error.
#[prost(bool, tag = "16")]
pub data_boost_enabled: bool,
}
@@ -1783,17 +1922,17 @@ pub struct ExecuteBatchDmlRequest {
/// transaction.
#[prost(message, optional, tag = "2")]
pub transaction: ::core::option::Option,
- /// Required. The list of statements to execute in this batch. Statements are executed
- /// serially, such that the effects of statement `i` are visible to statement
- /// `i+1`. Each statement must be a DML statement. Execution stops at the
- /// first failed statement; the remaining statements are not executed.
+ /// Required. The list of statements to execute in this batch. Statements are
+ /// executed serially, such that the effects of statement `i` are visible to
+ /// statement `i+1`. Each statement must be a DML statement. Execution stops at
+ /// the first failed statement; the remaining statements are not executed.
///
/// Callers must provide at least one statement.
#[prost(message, repeated, tag = "3")]
pub statements: ::prost::alloc::vec::Vec,
- /// Required. A per-transaction sequence number used to identify this request. This field
- /// makes each request idempotent such that if the request is received multiple
- /// times, at most one will succeed.
+ /// Required. A per-transaction sequence number used to identify this request.
+ /// This field makes each request idempotent such that if the request is
+ /// received multiple times, at most one will succeed.
///
/// The sequence number must be monotonically increasing within the
/// transaction. If a request arrives for the first time with an out-of-order
@@ -1830,7 +1969,9 @@ pub mod execute_batch_dml_request {
pub params: ::core::option::Option<::prost_types::Struct>,
/// It is not always possible for Cloud Spanner to infer the right SQL type
/// from a JSON value. For example, values of type `BYTES` and values
- /// of type `STRING` both appear in \[params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params\] as JSON strings.
+ /// of type `STRING` both appear in
+ /// \[params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params\] as
+ /// JSON strings.
///
/// In these cases, `param_types` can be used to specify the exact
/// SQL type for some or all of the SQL statement parameters. See the
@@ -1840,40 +1981,49 @@ pub mod execute_batch_dml_request {
pub param_types: ::std::collections::HashMap<::prost::alloc::string::String, super::Type>,
}
}
-/// The response for \[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml\]. Contains a list
-/// of \[ResultSet][google.spanner.v1.ResultSet\] messages, one for each DML statement that has successfully
-/// executed, in the same order as the statements in the request. If a statement
-/// fails, the status in the response body identifies the cause of the failure.
+/// The response for
+/// \[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml\]. Contains a list
+/// of \[ResultSet][google.spanner.v1.ResultSet\] messages, one for each DML
+/// statement that has successfully executed, in the same order as the statements
+/// in the request. If a statement fails, the status in the response body
+/// identifies the cause of the failure.
///
/// To check for DML statements that failed, use the following approach:
///
-/// 1. Check the status in the response message. The \[google.rpc.Code][google.rpc.Code\] enum
+/// 1. Check the status in the response message. The
+/// \[google.rpc.Code][google.rpc.Code\] enum
/// value `OK` indicates that all statements were executed successfully.
/// 2. If the status was not `OK`, check the number of result sets in the
-/// response. If the response contains `N` \[ResultSet][google.spanner.v1.ResultSet\] messages, then
-/// statement `N+1` in the request failed.
+/// response. If the response contains `N`
+/// \[ResultSet][google.spanner.v1.ResultSet\] messages, then statement `N+1` in
+/// the request failed.
///
/// Example 1:
///
/// * Request: 5 DML statements, all executed successfully.
-/// * Response: 5 \[ResultSet][google.spanner.v1.ResultSet\] messages, with the status `OK`.
+/// * Response: 5 \[ResultSet][google.spanner.v1.ResultSet\] messages, with the
+/// status `OK`.
///
/// Example 2:
///
/// * Request: 5 DML statements. The third statement has a syntax error.
-/// * Response: 2 \[ResultSet][google.spanner.v1.ResultSet\] messages, and a syntax error (`INVALID_ARGUMENT`)
-/// status. The number of \[ResultSet][google.spanner.v1.ResultSet\] messages indicates that the third
-/// statement failed, and the fourth and fifth statements were not executed.
+/// * Response: 2 \[ResultSet][google.spanner.v1.ResultSet\] messages, and a syntax
+/// error (`INVALID_ARGUMENT`)
+/// status. The number of \[ResultSet][google.spanner.v1.ResultSet\] messages
+/// indicates that the third statement failed, and the fourth and fifth
+/// statements were not executed.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExecuteBatchDmlResponse {
- /// One \[ResultSet][google.spanner.v1.ResultSet\] for each statement in the request that ran successfully,
- /// in the same order as the statements in the request. Each \[ResultSet][google.spanner.v1.ResultSet\] does
- /// not contain any rows. The \[ResultSetStats][google.spanner.v1.ResultSetStats\] in each \[ResultSet][google.spanner.v1.ResultSet\] contain
- /// the number of rows modified by the statement.
+ /// One \[ResultSet][google.spanner.v1.ResultSet\] for each statement in the
+ /// request that ran successfully, in the same order as the statements in the
+ /// request. Each \[ResultSet][google.spanner.v1.ResultSet\] does not contain any
+ /// rows. The \[ResultSetStats][google.spanner.v1.ResultSetStats\] in each
+ /// \[ResultSet][google.spanner.v1.ResultSet\] contain the number of rows
+ /// modified by the statement.
///
- /// Only the first \[ResultSet][google.spanner.v1.ResultSet\] in the response contains valid
- /// \[ResultSetMetadata][google.spanner.v1.ResultSetMetadata\].
+ /// Only the first \[ResultSet][google.spanner.v1.ResultSet\] in the response
+ /// contains valid \[ResultSetMetadata][google.spanner.v1.ResultSetMetadata\].
#[prost(message, repeated, tag = "1")]
pub result_sets: ::prost::alloc::vec::Vec,
/// If all DML statements are executed successfully, the status is `OK`.
@@ -1916,15 +2066,16 @@ pub struct PartitionQueryRequest {
/// transactions are not.
#[prost(message, optional, tag = "2")]
pub transaction: ::core::option::Option,
- /// Required. The query request to generate partitions for. The request will fail if
- /// the query is not root partitionable. The query plan of a root
- /// partitionable query has a single distributed union operator. A distributed
- /// union operator conceptually divides one or more tables into multiple
- /// splits, remotely evaluates a subquery independently on each split, and
- /// then unions all results.
+ /// Required. The query request to generate partitions for. The request will
+ /// fail if the query is not root partitionable. For a query to be root
+ /// partitionable, it needs to satisfy a few conditions. For example, the first
+ /// operator in the query execution plan must be a distributed union operator.
+ /// For more information about other conditions, see [Read data in
+ /// parallel]().
///
- /// This must not contain DML commands, such as INSERT, UPDATE, or
- /// DELETE. Use \[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql\] with a
+ /// The query request must not contain DML commands, such as INSERT, UPDATE, or
+ /// DELETE. Use
+ /// \[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql\] with a
/// PartitionedDml transaction for large, partition-friendly DML operations.
#[prost(string, tag = "3")]
pub sql: ::prost::alloc::string::String,
@@ -1944,7 +2095,8 @@ pub struct PartitionQueryRequest {
pub params: ::core::option::Option<::prost_types::Struct>,
/// It is not always possible for Cloud Spanner to infer the right SQL type
/// from a JSON value. For example, values of type `BYTES` and values
- /// of type `STRING` both appear in \[params][google.spanner.v1.PartitionQueryRequest.params\] as JSON strings.
+ /// of type `STRING` both appear in
+ /// \[params][google.spanner.v1.PartitionQueryRequest.params\] as JSON strings.
///
/// In these cases, `param_types` can be used to specify the exact
/// SQL type for some or all of the SQL query parameters. See the
@@ -1970,18 +2122,24 @@ pub struct PartitionReadRequest {
/// Required. The name of the table in the database to be read.
#[prost(string, tag = "3")]
pub table: ::prost::alloc::string::String,
- /// If non-empty, the name of an index on \[table][google.spanner.v1.PartitionReadRequest.table\]. This index is
- /// used instead of the table primary key when interpreting \[key_set][google.spanner.v1.PartitionReadRequest.key_set\]
- /// and sorting result rows. See \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] for further information.
+ /// If non-empty, the name of an index on
+ /// \[table][google.spanner.v1.PartitionReadRequest.table\]. This index is used
+ /// instead of the table primary key when interpreting
+ /// \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] and sorting
+ /// result rows. See \[key_set][google.spanner.v1.PartitionReadRequest.key_set\]
+ /// for further information.
#[prost(string, tag = "4")]
pub index: ::prost::alloc::string::String,
- /// The columns of \[table][google.spanner.v1.PartitionReadRequest.table\] to be returned for each row matching
- /// this request.
+ /// The columns of \[table][google.spanner.v1.PartitionReadRequest.table\] to be
+ /// returned for each row matching this request.
#[prost(string, repeated, tag = "5")]
pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Required. `key_set` identifies the rows to be yielded. `key_set` names the
- /// primary keys of the rows in \[table][google.spanner.v1.PartitionReadRequest.table\] to be yielded, unless \[index][google.spanner.v1.PartitionReadRequest.index\]
- /// is present. If \[index][google.spanner.v1.PartitionReadRequest.index\] is present, then \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] instead names
+ /// primary keys of the rows in
+ /// \[table][google.spanner.v1.PartitionReadRequest.table\] to be yielded, unless
+ /// \[index][google.spanner.v1.PartitionReadRequest.index\] is present. If
+ /// \[index][google.spanner.v1.PartitionReadRequest.index\] is present, then
+ /// \[key_set][google.spanner.v1.PartitionReadRequest.key_set\] instead names
/// index keys in \[index][google.spanner.v1.PartitionReadRequest.index\].
///
/// It is not an error for the `key_set` to name rows that do not
@@ -2030,24 +2188,31 @@ pub struct ReadRequest {
/// Required. The name of the table in the database to be read.
#[prost(string, tag = "3")]
pub table: ::prost::alloc::string::String,
- /// If non-empty, the name of an index on \[table][google.spanner.v1.ReadRequest.table\]. This index is
- /// used instead of the table primary key when interpreting \[key_set][google.spanner.v1.ReadRequest.key_set\]
- /// and sorting result rows. See \[key_set][google.spanner.v1.ReadRequest.key_set\] for further information.
+ /// If non-empty, the name of an index on
+ /// \[table][google.spanner.v1.ReadRequest.table\]. This index is used instead of
+ /// the table primary key when interpreting
+ /// \[key_set][google.spanner.v1.ReadRequest.key_set\] and sorting result rows.
+ /// See \[key_set][google.spanner.v1.ReadRequest.key_set\] for further
+ /// information.
#[prost(string, tag = "4")]
pub index: ::prost::alloc::string::String,
- /// Required. The columns of \[table][google.spanner.v1.ReadRequest.table\] to be returned for each row matching
- /// this request.
+ /// Required. The columns of \[table][google.spanner.v1.ReadRequest.table\] to be
+ /// returned for each row matching this request.
#[prost(string, repeated, tag = "5")]
pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Required. `key_set` identifies the rows to be yielded. `key_set` names the
- /// primary keys of the rows in \[table][google.spanner.v1.ReadRequest.table\] to be yielded, unless \[index][google.spanner.v1.ReadRequest.index\]
- /// is present. If \[index][google.spanner.v1.ReadRequest.index\] is present, then \[key_set][google.spanner.v1.ReadRequest.key_set\] instead names
- /// index keys in \[index][google.spanner.v1.ReadRequest.index\].
+ /// primary keys of the rows in \[table][google.spanner.v1.ReadRequest.table\] to
+ /// be yielded, unless \[index][google.spanner.v1.ReadRequest.index\] is present.
+ /// If \[index][google.spanner.v1.ReadRequest.index\] is present, then
+ /// \[key_set][google.spanner.v1.ReadRequest.key_set\] instead names index keys
+ /// in \[index][google.spanner.v1.ReadRequest.index\].
///
- /// If the \[partition_token][google.spanner.v1.ReadRequest.partition_token\] field is empty, rows are yielded
- /// in table primary key order (if \[index][google.spanner.v1.ReadRequest.index\] is empty) or index key order
- /// (if \[index][google.spanner.v1.ReadRequest.index\] is non-empty). If the \[partition_token][google.spanner.v1.ReadRequest.partition_token\] field is not
- /// empty, rows will be yielded in an unspecified order.
+ /// If the \[partition_token][google.spanner.v1.ReadRequest.partition_token\]
+ /// field is empty, rows are yielded in table primary key order (if
+ /// \[index][google.spanner.v1.ReadRequest.index\] is empty) or index key order
+ /// (if \[index][google.spanner.v1.ReadRequest.index\] is non-empty). If the
+ /// \[partition_token][google.spanner.v1.ReadRequest.partition_token\] field is
+ /// not empty, rows will be yielded in an unspecified order.
///
/// It is not an error for the `key_set` to name rows that do not
/// exist in the database. Read yields nothing for nonexistent rows.
@@ -2060,9 +2225,9 @@ pub struct ReadRequest {
pub limit: i64,
/// If this request is resuming a previously interrupted read,
/// `resume_token` should be copied from the last
- /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the interruption. Doing this
- /// enables the new read to resume where the last read left off. The
- /// rest of the request parameters must exactly match the request
+ /// \[PartialResultSet][google.spanner.v1.PartialResultSet\] yielded before the
+ /// interruption. Doing this enables the new read to resume where the last read
+ /// left off. The rest of the request parameters must exactly match the request
/// that yielded this token.
#[prost(bytes = "vec", tag = "9")]
pub resume_token: ::prost::alloc::vec::Vec,
@@ -2075,15 +2240,19 @@ pub struct ReadRequest {
/// Common options for this request.
#[prost(message, optional, tag = "11")]
pub request_options: ::core::option::Option,
+ /// Directed read options for this request.
+ #[prost(message, optional, tag = "14")]
+ pub directed_read_options: ::core::option::Option,
/// If this is for a partitioned read and this field is set to `true`, the
- /// request will be executed via Spanner independent compute resources.
+ /// request is executed with Spanner Data Boost independent compute resources.
///
/// If the field is set to `true` but the request does not set
- /// `partition_token`, the API will return an `INVALID_ARGUMENT` error.
+ /// `partition_token`, the API returns an `INVALID_ARGUMENT` error.
#[prost(bool, tag = "15")]
pub data_boost_enabled: bool,
}
-/// The request for \[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction\].
+/// The request for
+/// \[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction\].
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BeginTransactionRequest {
@@ -2114,8 +2283,8 @@ pub struct CommitRequest {
#[prost(message, repeated, tag = "4")]
pub mutations: ::prost::alloc::vec::Vec,
/// If `true`, then statistics related to the transaction will be included in
- /// the \[CommitResponse][google.spanner.v1.CommitResponse.commit_stats\]. Default value is
- /// `false`.
+ /// the \[CommitResponse][google.spanner.v1.CommitResponse.commit_stats\].
+ /// Default value is `false`.
#[prost(bool, tag = "5")]
pub return_commit_stats: bool,
/// Common options for this request.
@@ -2158,6 +2327,49 @@ pub struct RollbackRequest {
#[prost(bytes = "vec", tag = "2")]
pub transaction_id: ::prost::alloc::vec::Vec,
}
+/// The request for \[BatchWrite][google.spanner.v1.Spanner.BatchWrite\].
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BatchWriteRequest {
+ /// Required. The session in which the batch request is to be run.
+ #[prost(string, tag = "1")]
+ pub session: ::prost::alloc::string::String,
+ /// Common options for this request.
+ #[prost(message, optional, tag = "3")]
+ pub request_options: ::core::option::Option,
+ /// Required. The groups of mutations to be applied.
+ #[prost(message, repeated, tag = "4")]
+ pub mutation_groups: ::prost::alloc::vec::Vec,
+}
+/// Nested message and enum types in `BatchWriteRequest`.
+pub mod batch_write_request {
+ /// A group of mutations to be committed together. Related mutations should be
+ /// placed in a group. For example, two mutations inserting rows with the same
+ /// primary key prefix in both parent and child tables are related.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct MutationGroup {
+ /// Required. The mutations in this group.
+ #[prost(message, repeated, tag = "1")]
+ pub mutations: ::prost::alloc::vec::Vec,
+ }
+}
+/// The result of applying a batch of mutations.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BatchWriteResponse {
+ /// The mutation groups applied in this batch. The values index into the
+ /// `mutation_groups` field in the corresponding `BatchWriteRequest`.
+ #[prost(int32, repeated, tag = "1")]
+ pub indexes: ::prost::alloc::vec::Vec,
+ /// An `OK` status indicates success. Any other status indicates a failure.
+ #[prost(message, optional, tag = "2")]
+ pub status: ::core::option::Option,
+ /// The commit timestamp of the transaction that applied this batch.
+ /// Present if `status` is `OK`, absent otherwise.
+ #[prost(message, optional, tag = "3")]
+ pub commit_timestamp: ::core::option::Option<::prost_types::Timestamp>,
+}
/// Generated client implementations.
pub mod spanner_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
@@ -2347,10 +2559,12 @@ pub mod spanner_client {
///
/// Operations inside read-write transactions might return `ABORTED`. If
/// this occurs, the application should restart the transaction from
- /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
+ /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
+ /// details.
///
/// Larger result sets can be fetched in streaming fashion by calling
- /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
+ /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
+ /// instead.
pub async fn execute_sql(
&mut self,
request: impl tonic::IntoRequest,
@@ -2365,11 +2579,11 @@ pub mod spanner_client {
.insert(GrpcMethod::new("google.spanner.v1.Spanner", "ExecuteSql"));
self.inner.unary(req, path, codec).await
}
- /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
- /// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
- /// is no limit on the size of the returned result set. However, no
- /// individual row in the result set can exceed 100 MiB, and no
- /// column value can exceed 10 MiB.
+ /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
+ /// result set as a stream. Unlike
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
+ /// the size of the returned result set. However, no individual row in the
+ /// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
pub async fn execute_streaming_sql(
&mut self,
request: impl tonic::IntoRequest,
@@ -2390,9 +2604,10 @@ pub mod spanner_client {
/// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
///
/// Statements are executed in sequential order. A request can succeed even if
- /// a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the
- /// response provides information about the statement that failed. Clients must
- /// inspect this field to determine whether an error occurred.
+ /// a statement fails. The
+ /// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
+ /// field in the response provides information about the statement that failed.
+ /// Clients must inspect this field to determine whether an error occurred.
///
/// Execution stops after the first failed statement; the remaining statements
/// are not executed.
@@ -2412,14 +2627,15 @@ pub mod spanner_client {
}
/// Reads rows from the database using key lookups and scans, as a
/// simple key/value style alternative to
- /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to
- /// return a result set larger than 10 MiB; if the read matches more
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
+ /// used to return a result set larger than 10 MiB; if the read matches more
/// data than that, the read fails with a `FAILED_PRECONDITION`
/// error.
///
/// Reads inside read-write transactions might return `ABORTED`. If
/// this occurs, the application should restart the transaction from
- /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
+ /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
+ /// details.
///
/// Larger result sets can be yielded in streaming fashion by calling
/// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
@@ -2437,9 +2653,9 @@ pub mod spanner_client {
.insert(GrpcMethod::new("google.spanner.v1.Spanner", "Read"));
self.inner.unary(req, path, codec).await
}
- /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
- /// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
- /// size of the returned result set. However, no individual row in
+ /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
+ /// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
+ /// limit on the size of the returned result set. However, no individual row in
/// the result set can exceed 100 MiB, and no column value can exceed
/// 10 MiB.
pub async fn streaming_read(
@@ -2458,7 +2674,8 @@ pub mod spanner_client {
self.inner.server_streaming(req, path, codec).await
}
/// Begins a new transaction. This step can often be skipped:
- /// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
+ /// [Read][google.spanner.v1.Spanner.Read],
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
/// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
/// side-effect.
pub async fn begin_transaction(
@@ -2505,8 +2722,9 @@ pub mod spanner_client {
}
/// Rolls back a transaction, releasing any locks it holds. It is a good
/// idea to call this for any transaction that includes one or more
- /// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
- /// ultimately decides not to commit.
+ /// [Read][google.spanner.v1.Spanner.Read] or
+ /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
+ /// decides not to commit.
///
/// `Rollback` returns `OK` if it successfully aborts the transaction, the
/// transaction was already aborted, or the transaction is not
@@ -2527,10 +2745,11 @@ pub mod spanner_client {
}
/// Creates a set of partition tokens that can be used to execute a query
/// operation in parallel. Each of the returned partition tokens can be used
- /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset
- /// of the query result to read. The same session and read-only transaction
- /// must be used by the PartitionQueryRequest used to create the
- /// partition tokens and the ExecuteSqlRequests that use the partition tokens.
+ /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
+ /// specify a subset of the query result to read. The same session and
+ /// read-only transaction must be used by the PartitionQueryRequest used to
+ /// create the partition tokens and the ExecuteSqlRequests that use the
+ /// partition tokens.
///
/// Partition tokens become invalid when the session used to create them
/// is deleted, is idle for too long, begins a new transaction, or becomes too
@@ -2552,12 +2771,13 @@ pub mod spanner_client {
}
/// Creates a set of partition tokens that can be used to execute a read
/// operation in parallel. Each of the returned partition tokens can be used
- /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read
- /// result to read. The same session and read-only transaction must be used by
- /// the PartitionReadRequest used to create the partition tokens and the
- /// ReadRequests that use the partition tokens. There are no ordering
- /// guarantees on rows returned among the returned partition tokens, or even
- /// within each individual StreamingRead call issued with a partition_token.
+ /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
+ /// subset of the read result to read. The same session and read-only
+ /// transaction must be used by the PartitionReadRequest used to create the
+ /// partition tokens and the ReadRequests that use the partition tokens. There
+ /// are no ordering guarantees on rows returned among the returned partition
+ /// tokens, or even within each individual StreamingRead call issued with a
+ /// partition_token.
///
/// Partition tokens become invalid when the session used to create them
/// is deleted, is idle for too long, begins a new transaction, or becomes too
@@ -2577,5 +2797,35 @@ pub mod spanner_client {
.insert(GrpcMethod::new("google.spanner.v1.Spanner", "PartitionRead"));
self.inner.unary(req, path, codec).await
}
+ /// Batches the supplied mutation groups in a collection of efficient
+ /// transactions. All mutations in a group are committed atomically. However,
+ /// mutations across groups can be committed non-atomically in an unspecified
+ /// order and thus, they must be independent of each other. Partial failure is
+ /// possible, i.e., some groups may have been committed successfully, while
+ /// some may have failed. The results of individual batches are streamed into
+ /// the response as the batches are applied.
+ ///
+ /// BatchWrite requests are not replay protected, meaning that each mutation
+ /// group may be applied more than once. Replays of non-idempotent mutations
+ /// may have undesirable effects. For example, replays of an insert mutation
+ /// may produce an already exists error or if you use generated or commit
+ /// timestamp-based keys, it may result in additional rows being added to the
+ /// mutation's table. We recommend structuring your mutation groups to be
+ /// idempotent to avoid this issue.
+ pub async fn batch_write(
+ &mut self,
+ request: impl tonic::IntoRequest,
+ ) -> std::result::Result>, tonic::Status>
+ {
+ self.inner.ready().await.map_err(|e| {
+ tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into()))
+ })?;
+ let codec = tonic::codec::ProstCodec::default();
+ let path = http::uri::PathAndQuery::from_static("/google.spanner.v1.Spanner/BatchWrite");
+ let mut req = request.into_request();
+ req.extensions_mut()
+ .insert(GrpcMethod::new("google.spanner.v1.Spanner", "BatchWrite"));
+ self.inner.server_streaming(req, path, codec).await
+ }
}
}
diff --git a/googleapis/src/google.storage.v2.rs b/googleapis/src/google.storage.v2.rs
index a1df4ea3..251776fb 100644
--- a/googleapis/src/google.storage.v2.rs
+++ b/googleapis/src/google.storage.v2.rs
@@ -43,7 +43,7 @@ pub struct CreateBucketRequest {
pub parent: ::prost::alloc::string::String,
/// Properties of the new bucket being inserted.
/// The name of the bucket is specified in the `bucket_id` field. Populating
- /// `bucket.name` field will be ignored.
+ /// `bucket.name` field will result in an error.
/// The project of the bucket must be specified in the `bucket.project` field.
/// This field must be in `projects/{projectIdentifier}` format,
/// {projectIdentifier} can be the project ID or project number. The `parent`
@@ -324,6 +324,48 @@ pub struct DeleteObjectRequest {
#[prost(message, optional, tag = "10")]
pub common_object_request_params: ::core::option::Option,
}
+/// Message for restoring an object.
+/// `bucket`, `object`, and `generation` **must** be set.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct RestoreObjectRequest {
+ /// Required. Name of the bucket in which the object resides.
+ #[prost(string, tag = "1")]
+ pub bucket: ::prost::alloc::string::String,
+ /// Required. The name of the object to restore.
+ #[prost(string, tag = "2")]
+ pub object: ::prost::alloc::string::String,
+ /// Required. The specific revision of the object to restore.
+ #[prost(int64, tag = "3")]
+ pub generation: i64,
+ /// Makes the operation conditional on whether the object's current generation
+ /// matches the given value. Setting to 0 makes the operation succeed only if
+ /// there are no live versions of the object.
+ #[prost(int64, optional, tag = "4")]
+ pub if_generation_match: ::core::option::Option,
+ /// Makes the operation conditional on whether the object's live generation
+ /// does not match the given value. If no live object exists, the precondition
+ /// fails. Setting to 0 makes the operation succeed only if there is a live
+ /// version of the object.
+ #[prost(int64, optional, tag = "5")]
+ pub if_generation_not_match: ::core::option::Option,
+ /// Makes the operation conditional on whether the object's current
+ /// metageneration matches the given value.
+ #[prost(int64, optional, tag = "6")]
+ pub if_metageneration_match: ::core::option::Option,
+ /// Makes the operation conditional on whether the object's current
+ /// metageneration does not match the given value.
+ #[prost(int64, optional, tag = "7")]
+ pub if_metageneration_not_match: ::core::option::Option,
+ /// If false or unset, the bucket's default object ACL will be used.
+ /// If true, copy the source object's access controls.
+ /// Return an error if bucket has UBLA enabled.
+ #[prost(bool, optional, tag = "9")]
+ pub copy_source_acl: ::core::option::Option,
+ /// A set of parameters common to Storage API requests concerning an object.
+ #[prost(message, optional, tag = "8")]
+ pub common_object_request_params: ::core::option::Option,
+}
/// Message for canceling an in-progress resumable upload.
/// `upload_id` **must** be set.
#[allow(clippy::derive_partial_eq_without_eq)]
@@ -417,6 +459,9 @@ pub struct GetObjectRequest {
/// latest version, the default).
#[prost(int64, tag = "3")]
pub generation: i64,
+ /// If true, return the soft-deleted version of this object.
+ #[prost(bool, optional, tag = "11")]
+ pub soft_deleted: ::core::option::Option,
/// Makes the operation conditional on whether the object's current generation
/// matches the given value. Setting to 0 makes the operation succeed only if
/// there are no live versions of the object.
@@ -604,6 +649,113 @@ pub mod write_object_response {
Resource(super::Object),
}
}
+/// Request message for BidiWriteObject.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BidiWriteObjectRequest {
+ /// Required. The offset from the beginning of the object at which the data
+ /// should be written.
+ ///
+ /// In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ /// indicates the initial offset for the `Write()` call. The value **must** be
+ /// equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ /// return (0 if this is the first write to the object).
+ ///
+ /// On subsequent calls, this value **must** be no larger than the sum of the
+ /// first `write_offset` and the sizes of all `data` chunks sent previously on
+ /// this stream.
+ ///
+ /// An invalid value will cause an error.
+ #[prost(int64, tag = "3")]
+ pub write_offset: i64,
+ /// Checksums for the complete object. If the checksums computed by the service
+ /// don't match the specified checksums the call will fail. May only be
+ /// provided in the first or last request (either with first_message, or
+ /// finish_write set).
+ #[prost(message, optional, tag = "6")]
+ pub object_checksums: ::core::option::Option,
+ /// For each BidiWriteObjectRequest where state_lookup is `true` or the client
+ /// closes the stream, the service will send a BidiWriteObjectResponse
+ /// containing the current persisted size. The persisted size sent in responses
+ /// covers all the bytes the server has persisted thus far and can be used to
+ /// decide what data is safe for the client to drop. Note that the object's
+ /// current size reported by the BidiWriteObjectResponse may lag behind the
+ /// number of bytes written by the client.
+ #[prost(bool, tag = "7")]
+ pub state_lookup: bool,
+ /// Persists data written on the stream, up to and including the current
+ /// message, to permanent storage. This option should be used sparingly as it
+ /// may reduce performance. Ongoing writes will periodically be persisted on
+ /// the server even when `flush` is not set.
+ #[prost(bool, tag = "8")]
+ pub flush: bool,
+ /// If `true`, this indicates that the write is complete. Sending any
+ /// `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ /// will cause an error.
+ /// For a non-resumable write (where the upload_id was not set in the first
+ /// message), it is an error not to set this field in the final message of the
+ /// stream.
+ #[prost(bool, tag = "9")]
+ pub finish_write: bool,
+ /// A set of parameters common to Storage API requests concerning an object.
+ #[prost(message, optional, tag = "10")]
+ pub common_object_request_params: ::core::option::Option,
+ /// The first message of each stream should set one of the following.
+ #[prost(oneof = "bidi_write_object_request::FirstMessage", tags = "1, 2")]
+ pub first_message: ::core::option::Option,
+ /// A portion of the data for the object.
+ #[prost(oneof = "bidi_write_object_request::Data", tags = "4")]
+ pub data: ::core::option::Option,
+}
+/// Nested message and enum types in `BidiWriteObjectRequest`.
+pub mod bidi_write_object_request {
+ /// The first message of each stream should set one of the following.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum FirstMessage {
+ /// For resumable uploads. This should be the `upload_id` returned from a
+ /// call to `StartResumableWriteResponse`.
+ #[prost(string, tag = "1")]
+ UploadId(::prost::alloc::string::String),
+ /// For non-resumable uploads. Describes the overall upload, including the
+ /// destination bucket and object name, preconditions, etc.
+ #[prost(message, tag = "2")]
+ WriteObjectSpec(super::WriteObjectSpec),
+ }
+ /// A portion of the data for the object.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum Data {
+ /// The data to insert. If a crc32c checksum is provided that doesn't match
+ /// the checksum computed by the service, the request will fail.
+ #[prost(message, tag = "4")]
+ ChecksummedData(super::ChecksummedData),
+ }
+}
+/// Response message for BidiWriteObject.
+#[allow(clippy::derive_partial_eq_without_eq)]
+#[derive(Clone, PartialEq, ::prost::Message)]
+pub struct BidiWriteObjectResponse {
+ /// The response will set one of the following.
+ #[prost(oneof = "bidi_write_object_response::WriteStatus", tags = "1, 2")]
+ pub write_status: ::core::option::Option,
+}
+/// Nested message and enum types in `BidiWriteObjectResponse`.
+pub mod bidi_write_object_response {
+ /// The response will set one of the following.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Oneof)]
+ pub enum WriteStatus {
+ /// The total number of bytes that have been processed for the given object
+ /// from all `WriteObject` calls. Only set if the upload has not finalized.
+ #[prost(int64, tag = "1")]
+ PersistedSize(i64),
+ /// A resource containing the metadata for the uploaded object. Only set if
+ /// the upload has finalized.
+ #[prost(message, tag = "2")]
+ Resource(super::Object),
+ }
+}
/// Request message for ListObjects.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
@@ -661,6 +813,16 @@ pub struct ListObjectsRequest {
/// lexicographic_end (exclusive).
#[prost(string, tag = "11")]
pub lexicographic_end: ::prost::alloc::string::String,
+ /// Optional. If true, only list all soft-deleted versions of the object.
+ /// Soft delete policy is required to set this option.
+ #[prost(bool, tag = "12")]
+ pub soft_deleted: bool,
+ /// Optional. Filter results to objects and prefixes that match this glob
+ /// pattern. See [List Objects Using
+ /// Glob]()
+ /// for the full syntax.
+ #[prost(string, tag = "14")]
+ pub match_glob: ::prost::alloc::string::String,
}
/// Request object for `QueryWriteStatus`.
#[allow(clippy::derive_partial_eq_without_eq)]
@@ -1190,8 +1352,6 @@ pub struct Bucket {
#[prost(string, tag = "3")]
pub project: ::prost::alloc::string::String,
/// Output only. The metadata generation of this bucket.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(int64, tag = "4")]
pub metageneration: i64,
/// Immutable. The location of the bucket. Object data for objects in the
@@ -1219,7 +1379,7 @@ pub struct Bucket {
/// replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region
/// buckets only. If rpo is not specified when the bucket is created, it
/// defaults to "DEFAULT". For more information, see
- ///
+ ///
#[prost(string, tag = "27")]
pub rpo: ::prost::alloc::string::String,
/// Access controls on the bucket.
@@ -1238,8 +1398,6 @@ pub struct Bucket {
#[prost(message, optional, tag = "10")]
pub lifecycle: ::core::option::Option,
/// Output only. The creation time of the bucket.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "11")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
/// The bucket's \[ Resource Sharing]
@@ -1247,8 +1405,6 @@ pub struct Bucket {
#[prost(message, repeated, tag = "12")]
pub cors: ::prost::alloc::vec::Vec,
/// Output only. The modification time of the bucket.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "13")]
pub update_time: ::core::option::Option<::prost_types::Timestamp>,
/// The default value for event-based hold on newly created objects in this
@@ -1317,6 +1473,10 @@ pub struct Bucket {
/// Autoclass feature will be disabled and have no effect on the bucket.
#[prost(message, optional, tag = "28")]
pub autoclass: ::core::option::Option,
+ /// Optional. The bucket's soft delete policy. The soft delete policy prevents
+ /// soft-deleted objects from being permanently deleted.
+ #[prost(message, optional, tag = "31")]
+ pub soft_delete_policy: ::core::option::Option,
}
/// Nested message and enum types in `Bucket`.
pub mod bucket {
@@ -1527,6 +1687,19 @@ pub mod bucket {
#[prost(message, optional, tag = "4")]
pub retention_duration: ::core::option::Option<::prost_types::Duration>,
}
+ /// Soft delete policy properties of a bucket.
+ #[allow(clippy::derive_partial_eq_without_eq)]
+ #[derive(Clone, PartialEq, ::prost::Message)]
+ pub struct SoftDeletePolicy {
+ /// The period of time that soft-deleted objects in the bucket must be
+ /// retained and cannot be permanently deleted. The duration must be greater
+ /// than or equal to 7 days and less than 1 year.
+ #[prost(message, optional, tag = "1")]
+ pub retention_duration: ::core::option::Option<::prost_types::Duration>,
+ /// Time from which the policy was effective. This is service-provided.
+ #[prost(message, optional, tag = "2")]
+ pub effective_time: ::core::option::Option<::prost_types::Timestamp>,
+ }
/// Properties of a bucket related to versioning.
/// For more on Cloud Storage versioning, see
///
@@ -1580,6 +1753,15 @@ pub mod bucket {
/// to the bucket creation time.
#[prost(message, optional, tag = "2")]
pub toggle_time: ::core::option::Option<::prost_types::Timestamp>,
+ /// An object in an Autoclass bucket will eventually cool down to the
+ /// terminal storage class if there is no access to the object.
+ /// The only valid values are NEARLINE and ARCHIVE.
+ #[prost(string, optional, tag = "3")]
+ pub terminal_storage_class: ::core::option::Option<::prost::alloc::string::String>,
+ /// Output only. Latest instant at which the autoclass terminal storage class
+ /// was updated.
+ #[prost(message, optional, tag = "4")]
+ pub terminal_storage_class_update_time: ::core::option::Option<::prost_types::Timestamp>,
}
}
/// An access-control entry.
@@ -1640,7 +1822,7 @@ pub struct BucketAccessControl {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ChecksummedData {
- /// The data.
+ /// Optional. The data.
#[prost(bytes = "vec", tag = "1")]
pub content: ::prost::alloc::vec::Vec,
/// If set, the CRC32C digest of the content field.
@@ -1773,15 +1955,13 @@ pub struct Object {
#[prost(string, tag = "27")]
pub etag: ::prost::alloc::string::String,
/// Immutable. The content generation of this object. Used for object
- /// versioning. Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// versioning.
#[prost(int64, tag = "3")]
pub generation: i64,
/// Output only. The version of the metadata for this generation of this
/// object. Used for preconditions and for detecting changes in metadata. A
/// metageneration number is only meaningful in the context of a particular
- /// generation of a particular object. Attempting to set or update this field
- /// will result in a \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// generation of a particular object.
#[prost(int64, tag = "4")]
pub metageneration: i64,
/// Storage class of the object.
@@ -1789,8 +1969,6 @@ pub struct Object {
pub storage_class: ::prost::alloc::string::String,
/// Output only. Content-Length of the object data in bytes, matching
/// \[ 7230 ยง3.3.2].
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(int64, tag = "6")]
pub size: i64,
/// Content-Encoding of the object data, matching
@@ -1817,8 +1995,7 @@ pub struct Object {
#[prost(string, tag = "11")]
pub content_language: ::prost::alloc::string::String,
/// Output only. If this object is noncurrent, this is the time when the object
- /// became noncurrent. Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// became noncurrent.
#[prost(message, optional, tag = "12")]
pub delete_time: ::core::option::Option<::prost_types::Timestamp>,
/// Content-Type of the object data, matching
@@ -1828,14 +2005,10 @@ pub struct Object {
#[prost(string, tag = "13")]
pub content_type: ::prost::alloc::string::String,
/// Output only. The creation time of the object.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "14")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
/// Output only. Number of underlying components that make up this object.
- /// Components are accumulated by compose operations. Attempting to set or
- /// update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// Components are accumulated by compose operations.
#[prost(int32, tag = "15")]
pub component_count: i32,
/// Output only. Hashes for the data part of this object. This field is used
@@ -1848,8 +2021,6 @@ pub struct Object {
/// such as modifying custom metadata, as well as changes made by Cloud Storage
/// on behalf of a requester, such as changing the storage class based on an
/// Object Lifecycle Configuration.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "17")]
pub update_time: ::core::option::Option<::prost_types::Timestamp>,
/// Cloud KMS Key used to encrypt this object, if the object is encrypted by
@@ -1858,8 +2029,6 @@ pub struct Object {
pub kms_key: ::prost::alloc::string::String,
/// Output only. The time at which the object's storage class was last changed.
/// When the object is initially created, it will be set to time_created.
- /// Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
#[prost(message, optional, tag = "19")]
pub update_storage_class_time: ::core::option::Option<::prost_types::Timestamp>,
/// Whether an object is under temporary hold. While this flag is set to true,
@@ -1895,8 +2064,7 @@ pub struct Object {
#[prost(bool, optional, tag = "23")]
pub event_based_hold: ::core::option::Option,
/// Output only. The owner of the object. This will always be the uploader of
- /// the object. Attempting to set or update this field will result in a
- /// \[FieldViolation][google.rpc.BadRequest.FieldViolation\].
+ /// the object.
#[prost(message, optional, tag = "24")]
pub owner: ::core::option::Option,
/// Metadata of Customer-Supplied Encryption Key, if the object is encrypted by
@@ -2198,8 +2366,8 @@ pub mod storage_client {
}
/// Gets the IAM policy for a specified bucket or object.
/// The `resource` field in the request should be
- /// projects/_/buckets/ for a bucket or
- /// projects/_/buckets//objects/ for an object.
+ /// `projects/_/buckets/{bucket}` for a bucket or
+ /// `projects/_/buckets/{bucket}/objects/{object}` for an object.
pub async fn get_iam_policy(
&mut self,
request: impl tonic::IntoRequest,
@@ -2216,8 +2384,8 @@ pub mod storage_client {
}
/// Updates an IAM policy for the specified bucket or object.
/// The `resource` field in the request should be
- /// projects/_/buckets/ for a bucket or
- /// projects/_/buckets//objects/ for an object.
+ /// `projects/_/buckets/{bucket}` for a bucket or
+ /// `projects/_/buckets/{bucket}/objects/{object}` for an object.
pub async fn set_iam_policy(
&mut self,
request: impl tonic::IntoRequest,
@@ -2235,8 +2403,8 @@ pub mod storage_client {
/// Tests a set of permissions on the given bucket or object to see which, if
/// any, are held by the caller.
/// The `resource` field in the request should be
- /// projects/_/buckets/ for a bucket or
- /// projects/_/buckets//objects/ for an object.
+ /// `projects/_/buckets/{bucket}` for a bucket or
+ /// `projects/_/buckets/{bucket}/objects/{object}` for an object.
pub async fn test_iam_permissions(
&mut self,
request: impl tonic::IntoRequest,
@@ -2366,6 +2534,21 @@ pub mod storage_client {
.insert(GrpcMethod::new("google.storage.v2.Storage", "DeleteObject"));
self.inner.unary(req, path, codec).await
}
+ /// Restores a soft-deleted object.
+ pub async fn restore_object(
+ &mut self,
+ request: impl tonic::IntoRequest,
+ ) -> std::result::Result, tonic::Status> {
+ self.inner.ready().await.map_err(|e| {
+ tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into()))
+ })?;
+ let codec = tonic::codec::ProstCodec::default();
+ let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/RestoreObject");
+ let mut req = request.into_request();
+ req.extensions_mut()
+ .insert(GrpcMethod::new("google.storage.v2.Storage", "RestoreObject"));
+ self.inner.unary(req, path, codec).await
+ }
/// Cancels an in-progress resumable upload.
///
/// Any attempts to write to the resumable upload after cancelling the upload
@@ -2491,6 +2674,9 @@ pub mod storage_client {
/// status, with a WriteObjectResponse containing the finalized object's
/// metadata.
///
+ /// Alternatively, the BidiWriteObject operation may be used to write an
+ /// object with controls over flushing and the ability to fetch the ability to
+ /// determine the current persisted size.
pub async fn write_object(
&mut self,
request: impl tonic::IntoStreamingRequest,
@@ -2505,6 +2691,36 @@ pub mod storage_client {
.insert(GrpcMethod::new("google.storage.v2.Storage", "WriteObject"));
self.inner.client_streaming(req, path, codec).await
}
+ /// Stores a new object and metadata.
+ ///
+ /// This is similar to the WriteObject call with the added support for
+ /// manual flushing of persisted state, and the ability to determine current
+ /// persisted size without closing the stream.
+ ///
+ /// The client may specify one or both of the `state_lookup` and `flush` fields
+ /// in each BidiWriteObjectRequest. If `flush` is specified, the data written
+ /// so far will be persisted to storage. If `state_lookup` is specified, the
+ /// service will respond with a BidiWriteObjectResponse that contains the
+ /// persisted size. If both `flush` and `state_lookup` are specified, the flush
+ /// will always occur before a `state_lookup`, so that both may be set in the
+ /// same request and the returned state will be the state of the object
+ /// post-flush. When the stream is closed, a BidiWriteObjectResponse will
+ /// always be sent to the client, regardless of the value of `state_lookup`.
+ pub async fn bidi_write_object(
+ &mut self,
+ request: impl tonic::IntoStreamingRequest,
+ ) -> std::result::Result>, tonic::Status>
+ {
+ self.inner.ready().await.map_err(|e| {
+ tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into()))
+ })?;
+ let codec = tonic::codec::ProstCodec::default();
+ let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/BidiWriteObject");
+ let mut req = request.into_streaming_request();
+ req.extensions_mut()
+ .insert(GrpcMethod::new("google.storage.v2.Storage", "BidiWriteObject"));
+ self.inner.streaming(req, path, codec).await
+ }
/// Retrieves a list of objects matching the criteria.
pub async fn list_objects(
&mut self,