diff --git a/BigQueryStorage/metadata/V1/Annotations.php b/BigQueryStorage/metadata/V1/Annotations.php
new file mode 100644
index 000000000000..e94a47965444
--- /dev/null
+++ b/BigQueryStorage/metadata/V1/Annotations.php
@@ -0,0 +1,27 @@
+internalAddGeneratedFile(
+ '
+�
+2google/cloud/bigquery/storage/v1/annotations.proto google.cloud.bigquery.storage.v1 google/protobuf/descriptor.protoB�
+$com.google.cloud.bigquery.storage.v1BAnnotationsProtoPZ>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb� Google.Cloud.BigQuery.Storage.V1� Google\\Cloud\\BigQuery\\Storage\\V1bproto3'
+ , true);
+
+ static::$is_initialized = true;
+ }
+}
+
diff --git a/BigQueryStorage/metadata/V1/Arrow.php b/BigQueryStorage/metadata/V1/Arrow.php
index b6fbb5ddf3ff..c40bcd2e2975 100644
Binary files a/BigQueryStorage/metadata/V1/Arrow.php and b/BigQueryStorage/metadata/V1/Arrow.php differ
diff --git a/BigQueryStorage/metadata/V1/Avro.php b/BigQueryStorage/metadata/V1/Avro.php
index 26b99a1da126..1c77c1dfbc3d 100644
--- a/BigQueryStorage/metadata/V1/Avro.php
+++ b/BigQueryStorage/metadata/V1/Avro.php
@@ -16,15 +16,17 @@ public static function initOnce() {
}
$pool->internalAddGeneratedFile(
'
-�
+�
+google/cloud/bigquery/storage/v1/avro.proto google.cloud.bigquery.storage.v1"
AvroSchema
-schema ( "=
+schema ( "A
AvroRows
-serialized_binary_rows (
- row_count (B�
-$com.google.cloud.bigquery.storage.v1B AvroProtoPZGgoogle.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage� Google.Cloud.BigQuery.Storage.V1� Google\\Cloud\\BigQuery\\Storage\\V1bproto3'
+serialized_binary_rows (
+ row_count (B"A
+AvroSerializationOptions%
+enable_display_name_attribute (B�
+$com.google.cloud.bigquery.storage.v1B AvroProtoPZ>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb� Google.Cloud.BigQuery.Storage.V1� Google\\Cloud\\BigQuery\\Storage\\V1bproto3'
, true);
static::$is_initialized = true;
diff --git a/BigQueryStorage/metadata/V1/Protobuf.php b/BigQueryStorage/metadata/V1/Protobuf.php
new file mode 100644
index 000000000000..b136e094cddd
--- /dev/null
+++ b/BigQueryStorage/metadata/V1/Protobuf.php
@@ -0,0 +1,31 @@
+internalAddGeneratedFile(
+ '
+�
+/google/cloud/bigquery/storage/v1/protobuf.proto google.cloud.bigquery.storage.v1 google/protobuf/descriptor.proto"I
+ProtoSchema:
+proto_descriptor (2 .google.protobuf.DescriptorProto"$
+ ProtoRows
+serialized_rows (B�
+$com.google.cloud.bigquery.storage.v1B
ProtoBufProtoPZ>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb� Google.Cloud.BigQuery.Storage.V1� Google\\Cloud\\BigQuery\\Storage\\V1bproto3'
+ , true);
+
+ static::$is_initialized = true;
+ }
+}
+
diff --git a/BigQueryStorage/metadata/V1/Storage.php b/BigQueryStorage/metadata/V1/Storage.php
index bbc921e73ca8..3f516a0732ee 100644
Binary files a/BigQueryStorage/metadata/V1/Storage.php and b/BigQueryStorage/metadata/V1/Storage.php differ
diff --git a/BigQueryStorage/metadata/V1/Stream.php b/BigQueryStorage/metadata/V1/Stream.php
index 649d3837cdcd..b1c4601a3cc0 100644
Binary files a/BigQueryStorage/metadata/V1/Stream.php and b/BigQueryStorage/metadata/V1/Stream.php differ
diff --git a/BigQueryStorage/metadata/V1/Table.php b/BigQueryStorage/metadata/V1/Table.php
new file mode 100644
index 000000000000..16d9a1f8eef3
Binary files /dev/null and b/BigQueryStorage/metadata/V1/Table.php differ
diff --git a/BigQueryStorage/samples/V1/BigQueryReadClient/create_read_session.php b/BigQueryStorage/samples/V1/BigQueryReadClient/create_read_session.php
new file mode 100644
index 000000000000..c3c5f545ce20
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryReadClient/create_read_session.php
@@ -0,0 +1,88 @@
+createReadSession($formattedParent, $readSession);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedParent = BigQueryReadClient::projectName('[PROJECT]');
+
+ create_read_session_sample($formattedParent);
+}
+// [END bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryReadClient/read_rows.php b/BigQueryStorage/samples/V1/BigQueryReadClient/read_rows.php
new file mode 100644
index 000000000000..ca823b98185c
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryReadClient/read_rows.php
@@ -0,0 +1,82 @@
+readRows($formattedReadStream);
+
+ /** @var ReadRowsResponse $element */
+ foreach ($stream->readAll() as $element) {
+ printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString());
+ }
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedReadStream = BigQueryReadClient::readStreamName(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[SESSION]',
+ '[STREAM]'
+ );
+
+ read_rows_sample($formattedReadStream);
+}
+// [END bigquerystorage_v1_generated_BigQueryRead_ReadRows_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryReadClient/split_read_stream.php b/BigQueryStorage/samples/V1/BigQueryReadClient/split_read_stream.php
new file mode 100644
index 000000000000..045a607238be
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryReadClient/split_read_stream.php
@@ -0,0 +1,82 @@
+splitReadStream($formattedName);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedName = BigQueryReadClient::readStreamName(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[SESSION]',
+ '[STREAM]'
+ );
+
+ split_read_stream_sample($formattedName);
+}
+// [END bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryWriteClient/append_rows.php b/BigQueryStorage/samples/V1/BigQueryWriteClient/append_rows.php
new file mode 100644
index 000000000000..4012e51f70f9
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryWriteClient/append_rows.php
@@ -0,0 +1,123 @@
+setWriteStream($formattedWriteStream);
+
+ // Call the API and handle any network failures.
+ try {
+ /** @var BidiStream $stream */
+ $stream = $bigQueryWriteClient->appendRows();
+ $stream->writeAll([$request,]);
+
+ /** @var AppendRowsResponse $element */
+ foreach ($stream->closeWriteAndReadAll() as $element) {
+ printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString());
+ }
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedWriteStream = BigQueryWriteClient::writeStreamName(
+ '[PROJECT]',
+ '[DATASET]',
+ '[TABLE]',
+ '[STREAM]'
+ );
+
+ append_rows_sample($formattedWriteStream);
+}
+// [END bigquerystorage_v1_generated_BigQueryWrite_AppendRows_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryWriteClient/batch_commit_write_streams.php b/BigQueryStorage/samples/V1/BigQueryWriteClient/batch_commit_write_streams.php
new file mode 100644
index 000000000000..42b5881f40b1
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryWriteClient/batch_commit_write_streams.php
@@ -0,0 +1,79 @@
+batchCommitWriteStreams($formattedParent, $writeStreams);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedParent = BigQueryWriteClient::tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ $writeStreamsElement = '[WRITE_STREAMS]';
+
+ batch_commit_write_streams_sample($formattedParent, $writeStreamsElement);
+}
+// [END bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryWriteClient/create_write_stream.php b/BigQueryStorage/samples/V1/BigQueryWriteClient/create_write_stream.php
new file mode 100644
index 000000000000..e3593ddc45bb
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryWriteClient/create_write_stream.php
@@ -0,0 +1,75 @@
+createWriteStream($formattedParent, $writeStream);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedParent = BigQueryWriteClient::tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+
+ create_write_stream_sample($formattedParent);
+}
+// [END bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryWriteClient/finalize_write_stream.php b/BigQueryStorage/samples/V1/BigQueryWriteClient/finalize_write_stream.php
new file mode 100644
index 000000000000..8dd91d7f88ee
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryWriteClient/finalize_write_stream.php
@@ -0,0 +1,73 @@
+finalizeWriteStream($formattedName);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedName = BigQueryWriteClient::writeStreamName(
+ '[PROJECT]',
+ '[DATASET]',
+ '[TABLE]',
+ '[STREAM]'
+ );
+
+ finalize_write_stream_sample($formattedName);
+}
+// [END bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryWriteClient/flush_rows.php b/BigQueryStorage/samples/V1/BigQueryWriteClient/flush_rows.php
new file mode 100644
index 000000000000..8b6636a6d5c0
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryWriteClient/flush_rows.php
@@ -0,0 +1,78 @@
+flushRows($formattedWriteStream);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedWriteStream = BigQueryWriteClient::writeStreamName(
+ '[PROJECT]',
+ '[DATASET]',
+ '[TABLE]',
+ '[STREAM]'
+ );
+
+ flush_rows_sample($formattedWriteStream);
+}
+// [END bigquerystorage_v1_generated_BigQueryWrite_FlushRows_sync]
diff --git a/BigQueryStorage/samples/V1/BigQueryWriteClient/get_write_stream.php b/BigQueryStorage/samples/V1/BigQueryWriteClient/get_write_stream.php
new file mode 100644
index 000000000000..b422edbcb55e
--- /dev/null
+++ b/BigQueryStorage/samples/V1/BigQueryWriteClient/get_write_stream.php
@@ -0,0 +1,72 @@
+getWriteStream($formattedName);
+ printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString());
+ } catch (ApiException $ex) {
+ printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
+ }
+}
+
+/**
+ * Helper to execute the sample.
+ *
+ * This sample has been automatically generated and should be regarded as a code
+ * template only. It will require modifications to work:
+ * - It may require correct/in-range values for request initialization.
+ * - It may require specifying regional endpoints when creating the service client,
+ * please see the apiEndpoint client configuration option for more details.
+ */
+function callSample(): void
+{
+ $formattedName = BigQueryWriteClient::writeStreamName(
+ '[PROJECT]',
+ '[DATASET]',
+ '[TABLE]',
+ '[STREAM]'
+ );
+
+ get_write_stream_sample($formattedName);
+}
+// [END bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_sync]
diff --git a/BigQueryStorage/src/V1/AppendRowsRequest.php b/BigQueryStorage/src/V1/AppendRowsRequest.php
new file mode 100644
index 000000000000..5ad019e69f6e
--- /dev/null
+++ b/BigQueryStorage/src/V1/AppendRowsRequest.php
@@ -0,0 +1,355 @@
+google.cloud.bigquery.storage.v1.AppendRowsRequest
+ */
+class AppendRowsRequest extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. The write_stream identifies the target of the append operation,
+ * and only needs to be specified as part of the first request on the gRPC
+ * connection. If provided for subsequent requests, it must match the value of
+ * the first request.
+ * For explicitly created write streams, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+ * For the special default stream, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+ *
+ * Generated from protobuf field string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ */
+ private $write_stream = '';
+ /**
+ * If present, the write is only performed if the next append offset is same
+ * as the provided value. If not present, the write is performed at the
+ * current end of stream. Specifying a value for this field is not allowed
+ * when calling AppendRows for the '_default' stream.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ */
+ private $offset = null;
+ /**
+ * Id set by client to annotate its identity. Only initial request setting is
+ * respected.
+ *
+ * Generated from protobuf field string trace_id = 6;
+ */
+ private $trace_id = '';
+ /**
+ * A map to indicate how to interpret missing value for some fields. Missing
+ * values are fields present in user schema but missing in rows. The key is
+ * the field name. The value is the interpretation of missing values for the
+ * field.
+ * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+ * missing values in field foo are interpreted as NULL, all missing values in
+ * field bar are interpreted as the default value of field bar in table
+ * schema.
+ * If a field is not in this map and has missing values, the missing values
+ * in this field are interpreted as NULL.
+ * This field only applies to the current request, it won't affect other
+ * requests on the connection.
+ * Currently, field name can only be top-level column name, can't be a struct
+ * field path like 'foo.bar'.
+ *
+ * Generated from protobuf field map missing_value_interpretations = 7;
+ */
+ private $missing_value_interpretations;
+ protected $rows;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $write_stream
+ * Required. The write_stream identifies the target of the append operation,
+ * and only needs to be specified as part of the first request on the gRPC
+ * connection. If provided for subsequent requests, it must match the value of
+ * the first request.
+ * For explicitly created write streams, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+ * For the special default stream, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+ * @type \Google\Protobuf\Int64Value $offset
+ * If present, the write is only performed if the next append offset is same
+ * as the provided value. If not present, the write is performed at the
+ * current end of stream. Specifying a value for this field is not allowed
+ * when calling AppendRows for the '_default' stream.
+ * @type \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest\ProtoData $proto_rows
+ * Rows in proto format.
+ * @type string $trace_id
+ * Id set by client to annotate its identity. Only initial request setting is
+ * respected.
+ * @type array|\Google\Protobuf\Internal\MapField $missing_value_interpretations
+ * A map to indicate how to interpret missing value for some fields. Missing
+ * values are fields present in user schema but missing in rows. The key is
+ * the field name. The value is the interpretation of missing values for the
+ * field.
+ * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+ * missing values in field foo are interpreted as NULL, all missing values in
+ * field bar are interpreted as the default value of field bar in table
+ * schema.
+ * If a field is not in this map and has missing values, the missing values
+ * in this field are interpreted as NULL.
+ * This field only applies to the current request, it won't affect other
+ * requests on the connection.
+ * Currently, field name can only be top-level column name, can't be a struct
+ * field path like 'foo.bar'.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. The write_stream identifies the target of the append operation,
+ * and only needs to be specified as part of the first request on the gRPC
+ * connection. If provided for subsequent requests, it must match the value of
+ * the first request.
+ * For explicitly created write streams, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+ * For the special default stream, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+ *
+ * Generated from protobuf field string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @return string
+ */
+ public function getWriteStream()
+ {
+ return $this->write_stream;
+ }
+
+ /**
+ * Required. The write_stream identifies the target of the append operation,
+ * and only needs to be specified as part of the first request on the gRPC
+ * connection. If provided for subsequent requests, it must match the value of
+ * the first request.
+ * For explicitly created write streams, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+ * For the special default stream, the format is:
+ * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+ *
+ * Generated from protobuf field string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @param string $var
+ * @return $this
+ */
+ public function setWriteStream($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->write_stream = $var;
+
+ return $this;
+ }
+
+ /**
+ * If present, the write is only performed if the next append offset is same
+ * as the provided value. If not present, the write is performed at the
+ * current end of stream. Specifying a value for this field is not allowed
+ * when calling AppendRows for the '_default' stream.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @return \Google\Protobuf\Int64Value|null
+ */
+ public function getOffset()
+ {
+ return $this->offset;
+ }
+
+ public function hasOffset()
+ {
+ return isset($this->offset);
+ }
+
+ public function clearOffset()
+ {
+ unset($this->offset);
+ }
+
+ /**
+ * Returns the unboxed value from getOffset()
+
+ * If present, the write is only performed if the next append offset is same
+ * as the provided value. If not present, the write is performed at the
+ * current end of stream. Specifying a value for this field is not allowed
+ * when calling AppendRows for the '_default' stream.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @return int|string|null
+ */
+ public function getOffsetValue()
+ {
+ return $this->readWrapperValue("offset");
+ }
+
+ /**
+ * If present, the write is only performed if the next append offset is same
+ * as the provided value. If not present, the write is performed at the
+ * current end of stream. Specifying a value for this field is not allowed
+ * when calling AppendRows for the '_default' stream.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @param \Google\Protobuf\Int64Value $var
+ * @return $this
+ */
+ public function setOffset($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\Int64Value::class);
+ $this->offset = $var;
+
+ return $this;
+ }
+
+ /**
+ * Sets the field by wrapping a primitive type in a Google\Protobuf\Int64Value object.
+
+ * If present, the write is only performed if the next append offset is same
+ * as the provided value. If not present, the write is performed at the
+ * current end of stream. Specifying a value for this field is not allowed
+ * when calling AppendRows for the '_default' stream.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @param int|string|null $var
+ * @return $this
+ */
+ public function setOffsetValue($var)
+ {
+ $this->writeWrapperValue("offset", $var);
+ return $this;}
+
+ /**
+ * Rows in proto format.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4;
+ * @return \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest\ProtoData|null
+ */
+ public function getProtoRows()
+ {
+ return $this->readOneof(4);
+ }
+
+ public function hasProtoRows()
+ {
+ return $this->hasOneof(4);
+ }
+
+ /**
+ * Rows in proto format.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4;
+ * @param \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest\ProtoData $var
+ * @return $this
+ */
+ public function setProtoRows($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest\ProtoData::class);
+ $this->writeOneof(4, $var);
+
+ return $this;
+ }
+
+ /**
+ * Id set by client to annotate its identity. Only initial request setting is
+ * respected.
+ *
+ * Generated from protobuf field string trace_id = 6;
+ * @return string
+ */
+ public function getTraceId()
+ {
+ return $this->trace_id;
+ }
+
+ /**
+ * Id set by client to annotate its identity. Only initial request setting is
+ * respected.
+ *
+ * Generated from protobuf field string trace_id = 6;
+ * @param string $var
+ * @return $this
+ */
+ public function setTraceId($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->trace_id = $var;
+
+ return $this;
+ }
+
+ /**
+ * A map to indicate how to interpret missing value for some fields. Missing
+ * values are fields present in user schema but missing in rows. The key is
+ * the field name. The value is the interpretation of missing values for the
+ * field.
+ * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+ * missing values in field foo are interpreted as NULL, all missing values in
+ * field bar are interpreted as the default value of field bar in table
+ * schema.
+ * If a field is not in this map and has missing values, the missing values
+ * in this field are interpreted as NULL.
+ * This field only applies to the current request, it won't affect other
+ * requests on the connection.
+ * Currently, field name can only be top-level column name, can't be a struct
+ * field path like 'foo.bar'.
+ *
+ * Generated from protobuf field map missing_value_interpretations = 7;
+ * @return \Google\Protobuf\Internal\MapField
+ */
+ public function getMissingValueInterpretations()
+ {
+ return $this->missing_value_interpretations;
+ }
+
+ /**
+ * A map to indicate how to interpret missing value for some fields. Missing
+ * values are fields present in user schema but missing in rows. The key is
+ * the field name. The value is the interpretation of missing values for the
+ * field.
+ * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+ * missing values in field foo are interpreted as NULL, all missing values in
+ * field bar are interpreted as the default value of field bar in table
+ * schema.
+ * If a field is not in this map and has missing values, the missing values
+ * in this field are interpreted as NULL.
+ * This field only applies to the current request, it won't affect other
+ * requests on the connection.
+ * Currently, field name can only be top-level column name, can't be a struct
+ * field path like 'foo.bar'.
+ *
+ * Generated from protobuf field map missing_value_interpretations = 7;
+ * @param array|\Google\Protobuf\Internal\MapField $var
+ * @return $this
+ */
+ public function setMissingValueInterpretations($var)
+ {
+ $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::STRING, \Google\Protobuf\Internal\GPBType::ENUM, \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest\MissingValueInterpretation::class);
+ $this->missing_value_interpretations = $arr;
+
+ return $this;
+ }
+
+ /**
+ * @return string
+ */
+ public function getRows()
+ {
+ return $this->whichOneof("rows");
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/AppendRowsRequest/MissingValueInterpretation.php b/BigQueryStorage/src/V1/AppendRowsRequest/MissingValueInterpretation.php
new file mode 100644
index 000000000000..e1a01db3c2bb
--- /dev/null
+++ b/BigQueryStorage/src/V1/AppendRowsRequest/MissingValueInterpretation.php
@@ -0,0 +1,69 @@
+google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation
+ */
+class MissingValueInterpretation
+{
+ /**
+ * Invalid missing value interpretation. Requests with this value will be
+ * rejected.
+ *
+ * Generated from protobuf enum MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0;
+ */
+ const MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0;
+ /**
+ * Missing value is interpreted as NULL.
+ *
+ * Generated from protobuf enum NULL_VALUE = 1;
+ */
+ const NULL_VALUE = 1;
+ /**
+ * Missing value is interpreted as column default value if declared in the
+ * table schema, NULL otherwise.
+ *
+ * Generated from protobuf enum DEFAULT_VALUE = 2;
+ */
+ const DEFAULT_VALUE = 2;
+
+ private static $valueToName = [
+ self::MISSING_VALUE_INTERPRETATION_UNSPECIFIED => 'MISSING_VALUE_INTERPRETATION_UNSPECIFIED',
+ self::NULL_VALUE => 'NULL_VALUE',
+ self::DEFAULT_VALUE => 'DEFAULT_VALUE',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(MissingValueInterpretation::class, \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest_MissingValueInterpretation::class);
+
diff --git a/BigQueryStorage/src/V1/AppendRowsRequest/ProtoData.php b/BigQueryStorage/src/V1/AppendRowsRequest/ProtoData.php
new file mode 100644
index 000000000000..b5b0da2f9bb8
--- /dev/null
+++ b/BigQueryStorage/src/V1/AppendRowsRequest/ProtoData.php
@@ -0,0 +1,145 @@
+google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData
+ */
+class ProtoData extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Proto schema used to serialize the data. This value only needs to be
+ * provided as part of the first request on a gRPC network connection,
+ * and will be ignored for subsequent requests on the connection.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1;
+ */
+ private $writer_schema = null;
+ /**
+ * Serialized row data in protobuf message format.
+ * Currently, the backend expects the serialized rows to adhere to
+ * proto2 semantics when appending rows, particularly with respect to
+ * how default values are encoded.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.ProtoRows rows = 2;
+ */
+ private $rows = null;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type \Google\Cloud\BigQuery\Storage\V1\ProtoSchema $writer_schema
+ * Proto schema used to serialize the data. This value only needs to be
+ * provided as part of the first request on a gRPC network connection,
+ * and will be ignored for subsequent requests on the connection.
+ * @type \Google\Cloud\BigQuery\Storage\V1\ProtoRows $rows
+ * Serialized row data in protobuf message format.
+ * Currently, the backend expects the serialized rows to adhere to
+ * proto2 semantics when appending rows, particularly with respect to
+ * how default values are encoded.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Proto schema used to serialize the data. This value only needs to be
+ * provided as part of the first request on a gRPC network connection,
+ * and will be ignored for subsequent requests on the connection.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1;
+ * @return \Google\Cloud\BigQuery\Storage\V1\ProtoSchema|null
+ */
+ public function getWriterSchema()
+ {
+ return $this->writer_schema;
+ }
+
+ public function hasWriterSchema()
+ {
+ return isset($this->writer_schema);
+ }
+
+ public function clearWriterSchema()
+ {
+ unset($this->writer_schema);
+ }
+
+ /**
+ * Proto schema used to serialize the data. This value only needs to be
+ * provided as part of the first request on a gRPC network connection,
+ * and will be ignored for subsequent requests on the connection.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1;
+ * @param \Google\Cloud\BigQuery\Storage\V1\ProtoSchema $var
+ * @return $this
+ */
+ public function setWriterSchema($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\ProtoSchema::class);
+ $this->writer_schema = $var;
+
+ return $this;
+ }
+
+ /**
+ * Serialized row data in protobuf message format.
+ * Currently, the backend expects the serialized rows to adhere to
+ * proto2 semantics when appending rows, particularly with respect to
+ * how default values are encoded.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.ProtoRows rows = 2;
+ * @return \Google\Cloud\BigQuery\Storage\V1\ProtoRows|null
+ */
+ public function getRows()
+ {
+ return $this->rows;
+ }
+
+ public function hasRows()
+ {
+ return isset($this->rows);
+ }
+
+ public function clearRows()
+ {
+ unset($this->rows);
+ }
+
+ /**
+ * Serialized row data in protobuf message format.
+ * Currently, the backend expects the serialized rows to adhere to
+ * proto2 semantics when appending rows, particularly with respect to
+ * how default values are encoded.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.ProtoRows rows = 2;
+ * @param \Google\Cloud\BigQuery\Storage\V1\ProtoRows $var
+ * @return $this
+ */
+ public function setRows($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\ProtoRows::class);
+ $this->rows = $var;
+
+ return $this;
+ }
+
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(ProtoData::class, \Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest_ProtoData::class);
+
diff --git a/BigQueryStorage/src/V1/AppendRowsRequest_MissingValueInterpretation.php b/BigQueryStorage/src/V1/AppendRowsRequest_MissingValueInterpretation.php
new file mode 100644
index 000000000000..0b7fd4a3d1a4
--- /dev/null
+++ b/BigQueryStorage/src/V1/AppendRowsRequest_MissingValueInterpretation.php
@@ -0,0 +1,16 @@
+google.cloud.bigquery.storage.v1.AppendRowsResponse
+ */
+class AppendRowsResponse extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * If backend detects a schema update, pass it to user so that user can
+ * use it to input new type of message. It will be empty when no schema
+ * updates have occurred.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3;
+ */
+ private $updated_schema = null;
+ /**
+ * If a request failed due to corrupted rows, no rows in the batch will be
+ * appended. The API will return row level error info, so that the caller can
+ * remove the bad rows and retry the request.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4;
+ */
+ private $row_errors;
+ /**
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ * Generated from protobuf field string write_stream = 5;
+ */
+ private $write_stream = '';
+ protected $response;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type \Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse\AppendResult $append_result
+ * Result if the append is successful.
+ * @type \Google\Rpc\Status $error
+ * Error returned when problems were encountered. If present,
+ * it indicates rows were not accepted into the system.
+ * Users can retry or continue with other append requests within the
+ * same connection.
+ * Additional information about error signalling:
+ * ALREADY_EXISTS: Happens when an append specified an offset, and the
+ * backend already has received data at this offset. Typically encountered
+ * in retry scenarios, and can be ignored.
+ * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+ * the current end of the stream.
+ * INVALID_ARGUMENT: Indicates a malformed request or data.
+ * ABORTED: Request processing is aborted because of prior failures. The
+ * request can be retried if previous failure is addressed.
+ * INTERNAL: Indicates server side error(s) that can be retried.
+ * @type \Google\Cloud\BigQuery\Storage\V1\TableSchema $updated_schema
+ * If backend detects a schema update, pass it to user so that user can
+ * use it to input new type of message. It will be empty when no schema
+ * updates have occurred.
+ * @type array<\Google\Cloud\BigQuery\Storage\V1\RowError>|\Google\Protobuf\Internal\RepeatedField $row_errors
+ * If a request failed due to corrupted rows, no rows in the batch will be
+ * appended. The API will return row level error info, so that the caller can
+ * remove the bad rows and retry the request.
+ * @type string $write_stream
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Result if the append is successful.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1;
+ * @return \Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse\AppendResult|null
+ */
+ public function getAppendResult()
+ {
+ return $this->readOneof(1);
+ }
+
+ public function hasAppendResult()
+ {
+ return $this->hasOneof(1);
+ }
+
+ /**
+ * Result if the append is successful.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1;
+ * @param \Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse\AppendResult $var
+ * @return $this
+ */
+ public function setAppendResult($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse\AppendResult::class);
+ $this->writeOneof(1, $var);
+
+ return $this;
+ }
+
+ /**
+ * Error returned when problems were encountered. If present,
+ * it indicates rows were not accepted into the system.
+ * Users can retry or continue with other append requests within the
+ * same connection.
+ * Additional information about error signalling:
+ * ALREADY_EXISTS: Happens when an append specified an offset, and the
+ * backend already has received data at this offset. Typically encountered
+ * in retry scenarios, and can be ignored.
+ * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+ * the current end of the stream.
+ * INVALID_ARGUMENT: Indicates a malformed request or data.
+ * ABORTED: Request processing is aborted because of prior failures. The
+ * request can be retried if previous failure is addressed.
+ * INTERNAL: Indicates server side error(s) that can be retried.
+ *
+ * Generated from protobuf field .google.rpc.Status error = 2;
+ * @return \Google\Rpc\Status|null
+ */
+ public function getError()
+ {
+ return $this->readOneof(2);
+ }
+
+ public function hasError()
+ {
+ return $this->hasOneof(2);
+ }
+
+ /**
+ * Error returned when problems were encountered. If present,
+ * it indicates rows were not accepted into the system.
+ * Users can retry or continue with other append requests within the
+ * same connection.
+ * Additional information about error signalling:
+ * ALREADY_EXISTS: Happens when an append specified an offset, and the
+ * backend already has received data at this offset. Typically encountered
+ * in retry scenarios, and can be ignored.
+ * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+ * the current end of the stream.
+ * INVALID_ARGUMENT: Indicates a malformed request or data.
+ * ABORTED: Request processing is aborted because of prior failures. The
+ * request can be retried if previous failure is addressed.
+ * INTERNAL: Indicates server side error(s) that can be retried.
+ *
+ * Generated from protobuf field .google.rpc.Status error = 2;
+ * @param \Google\Rpc\Status $var
+ * @return $this
+ */
+ public function setError($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Rpc\Status::class);
+ $this->writeOneof(2, $var);
+
+ return $this;
+ }
+
+ /**
+ * If backend detects a schema update, pass it to user so that user can
+ * use it to input new type of message. It will be empty when no schema
+ * updates have occurred.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3;
+ * @return \Google\Cloud\BigQuery\Storage\V1\TableSchema|null
+ */
+ public function getUpdatedSchema()
+ {
+ return $this->updated_schema;
+ }
+
+ public function hasUpdatedSchema()
+ {
+ return isset($this->updated_schema);
+ }
+
+ public function clearUpdatedSchema()
+ {
+ unset($this->updated_schema);
+ }
+
+ /**
+ * If backend detects a schema update, pass it to user so that user can
+ * use it to input new type of message. It will be empty when no schema
+ * updates have occurred.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3;
+ * @param \Google\Cloud\BigQuery\Storage\V1\TableSchema $var
+ * @return $this
+ */
+ public function setUpdatedSchema($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\TableSchema::class);
+ $this->updated_schema = $var;
+
+ return $this;
+ }
+
+ /**
+ * If a request failed due to corrupted rows, no rows in the batch will be
+ * appended. The API will return row level error info, so that the caller can
+ * remove the bad rows and retry the request.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4;
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getRowErrors()
+ {
+ return $this->row_errors;
+ }
+
+ /**
+ * If a request failed due to corrupted rows, no rows in the batch will be
+ * appended. The API will return row level error info, so that the caller can
+ * remove the bad rows and retry the request.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4;
+ * @param array<\Google\Cloud\BigQuery\Storage\V1\RowError>|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setRowErrors($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\BigQuery\Storage\V1\RowError::class);
+ $this->row_errors = $arr;
+
+ return $this;
+ }
+
+ /**
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ * Generated from protobuf field string write_stream = 5;
+ * @return string
+ */
+ public function getWriteStream()
+ {
+ return $this->write_stream;
+ }
+
+ /**
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ * Generated from protobuf field string write_stream = 5;
+ * @param string $var
+ * @return $this
+ */
+ public function setWriteStream($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->write_stream = $var;
+
+ return $this;
+ }
+
+ /**
+ * @return string
+ */
+ public function getResponse()
+ {
+ return $this->whichOneof("response");
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/AppendRowsResponse/AppendResult.php b/BigQueryStorage/src/V1/AppendRowsResponse/AppendResult.php
new file mode 100644
index 000000000000..c2364981f183
--- /dev/null
+++ b/BigQueryStorage/src/V1/AppendRowsResponse/AppendResult.php
@@ -0,0 +1,113 @@
+google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult
+ */
+class AppendResult extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * The row offset at which the last append occurred. The offset will not be
+ * set if appending using default streams.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 1;
+ */
+ private $offset = null;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type \Google\Protobuf\Int64Value $offset
+ * The row offset at which the last append occurred. The offset will not be
+ * set if appending using default streams.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * The row offset at which the last append occurred. The offset will not be
+ * set if appending using default streams.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 1;
+ * @return \Google\Protobuf\Int64Value|null
+ */
+ public function getOffset()
+ {
+ return $this->offset;
+ }
+
+ public function hasOffset()
+ {
+ return isset($this->offset);
+ }
+
+ public function clearOffset()
+ {
+ unset($this->offset);
+ }
+
+ /**
+ * Returns the unboxed value from getOffset()
+
+ * The row offset at which the last append occurred. The offset will not be
+ * set if appending using default streams.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 1;
+ * @return int|string|null
+ */
+ public function getOffsetValue()
+ {
+ return $this->readWrapperValue("offset");
+ }
+
+ /**
+ * The row offset at which the last append occurred. The offset will not be
+ * set if appending using default streams.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 1;
+ * @param \Google\Protobuf\Int64Value $var
+ * @return $this
+ */
+ public function setOffset($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\Int64Value::class);
+ $this->offset = $var;
+
+ return $this;
+ }
+
+ /**
+ * Sets the field by wrapping a primitive type in a Google\Protobuf\Int64Value object.
+
+ * The row offset at which the last append occurred. The offset will not be
+ * set if appending using default streams.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 1;
+ * @param int|string|null $var
+ * @return $this
+ */
+ public function setOffsetValue($var)
+ {
+ $this->writeWrapperValue("offset", $var);
+ return $this;}
+
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(AppendResult::class, \Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse_AppendResult::class);
+
diff --git a/BigQueryStorage/src/V1/AppendRowsResponse_AppendResult.php b/BigQueryStorage/src/V1/AppendRowsResponse_AppendResult.php
new file mode 100644
index 000000000000..c58260ef72b5
--- /dev/null
+++ b/BigQueryStorage/src/V1/AppendRowsResponse_AppendResult.php
@@ -0,0 +1,16 @@
+int64 row_count = 2;
+ * Generated from protobuf field int64 row_count = 2 [deprecated = true];
+ * @deprecated
*/
- private $row_count = 0;
+ protected $row_count = 0;
/**
* Constructor.
@@ -37,7 +39,8 @@ class ArrowRecordBatch extends \Google\Protobuf\Internal\Message
* @type string $serialized_record_batch
* IPC-serialized Arrow RecordBatch.
* @type int|string $row_count
- * The count of rows in `serialized_record_batch`.
+ * [Deprecated] The count of rows in `serialized_record_batch`.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
* }
*/
public function __construct($data = NULL) {
@@ -72,25 +75,31 @@ public function setSerializedRecordBatch($var)
}
/**
- * The count of rows in `serialized_record_batch`.
+ * [Deprecated] The count of rows in `serialized_record_batch`.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
*
- * Generated from protobuf field int64 row_count = 2;
+ * Generated from protobuf field int64 row_count = 2 [deprecated = true];
* @return int|string
+ * @deprecated
*/
public function getRowCount()
{
+ @trigger_error('row_count is deprecated.', E_USER_DEPRECATED);
return $this->row_count;
}
/**
- * The count of rows in `serialized_record_batch`.
+ * [Deprecated] The count of rows in `serialized_record_batch`.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
*
- * Generated from protobuf field int64 row_count = 2;
+ * Generated from protobuf field int64 row_count = 2 [deprecated = true];
* @param int|string $var
* @return $this
+ * @deprecated
*/
public function setRowCount($var)
{
+ @trigger_error('row_count is deprecated.', E_USER_DEPRECATED);
GPBUtil::checkInt64($var);
$this->row_count = $var;
diff --git a/BigQueryStorage/src/V1/AvroRows.php b/BigQueryStorage/src/V1/AvroRows.php
index a65a762a3c03..d787a417f8e7 100644
--- a/BigQueryStorage/src/V1/AvroRows.php
+++ b/BigQueryStorage/src/V1/AvroRows.php
@@ -22,11 +22,13 @@ class AvroRows extends \Google\Protobuf\Internal\Message
*/
private $serialized_binary_rows = '';
/**
- * The count of rows in the returning block.
+ * [Deprecated] The count of rows in the returning block.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
*
- * Generated from protobuf field int64 row_count = 2;
+ * Generated from protobuf field int64 row_count = 2 [deprecated = true];
+ * @deprecated
*/
- private $row_count = 0;
+ protected $row_count = 0;
/**
* Constructor.
@@ -37,7 +39,8 @@ class AvroRows extends \Google\Protobuf\Internal\Message
* @type string $serialized_binary_rows
* Binary serialized rows in a block.
* @type int|string $row_count
- * The count of rows in the returning block.
+ * [Deprecated] The count of rows in the returning block.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
* }
*/
public function __construct($data = NULL) {
@@ -72,25 +75,31 @@ public function setSerializedBinaryRows($var)
}
/**
- * The count of rows in the returning block.
+ * [Deprecated] The count of rows in the returning block.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
*
- * Generated from protobuf field int64 row_count = 2;
+ * Generated from protobuf field int64 row_count = 2 [deprecated = true];
* @return int|string
+ * @deprecated
*/
public function getRowCount()
{
+ @trigger_error('row_count is deprecated.', E_USER_DEPRECATED);
return $this->row_count;
}
/**
- * The count of rows in the returning block.
+ * [Deprecated] The count of rows in the returning block.
+ * Please use the format-independent ReadRowsResponse.row_count instead.
*
- * Generated from protobuf field int64 row_count = 2;
+ * Generated from protobuf field int64 row_count = 2 [deprecated = true];
* @param int|string $var
* @return $this
+ * @deprecated
*/
public function setRowCount($var)
{
+ @trigger_error('row_count is deprecated.', E_USER_DEPRECATED);
GPBUtil::checkInt64($var);
$this->row_count = $var;
diff --git a/BigQueryStorage/src/V1/AvroSerializationOptions.php b/BigQueryStorage/src/V1/AvroSerializationOptions.php
new file mode 100644
index 000000000000..6aea463a3353
--- /dev/null
+++ b/BigQueryStorage/src/V1/AvroSerializationOptions.php
@@ -0,0 +1,95 @@
+google.cloud.bigquery.storage.v1.AvroSerializationOptions
+ */
+class AvroSerializationOptions extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Enable displayName attribute in Avro schema.
+ * The Avro specification requires field names to be alphanumeric. By
+ * default, in cases when column names do not conform to these requirements
+ * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+ * format, the CreateReadSession call will fail.
+ * Setting this field to true, populates avro field names with a placeholder
+ * value and populates a "displayName" attribute for every avro field with the
+ * original column name.
+ *
+ * Generated from protobuf field bool enable_display_name_attribute = 1;
+ */
+ private $enable_display_name_attribute = false;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type bool $enable_display_name_attribute
+ * Enable displayName attribute in Avro schema.
+ * The Avro specification requires field names to be alphanumeric. By
+ * default, in cases when column names do not conform to these requirements
+ * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+ * format, the CreateReadSession call will fail.
+ * Setting this field to true, populates avro field names with a placeholder
+ * value and populates a "displayName" attribute for every avro field with the
+ * original column name.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Avro::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Enable displayName attribute in Avro schema.
+ * The Avro specification requires field names to be alphanumeric. By
+ * default, in cases when column names do not conform to these requirements
+ * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+ * format, the CreateReadSession call will fail.
+ * Setting this field to true, populates avro field names with a placeholder
+ * value and populates a "displayName" attribute for every avro field with the
+ * original column name.
+ *
+ * Generated from protobuf field bool enable_display_name_attribute = 1;
+ * @return bool
+ */
+ public function getEnableDisplayNameAttribute()
+ {
+ return $this->enable_display_name_attribute;
+ }
+
+ /**
+ * Enable displayName attribute in Avro schema.
+ * The Avro specification requires field names to be alphanumeric. By
+ * default, in cases when column names do not conform to these requirements
+ * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+ * format, the CreateReadSession call will fail.
+ * Setting this field to true, populates avro field names with a placeholder
+ * value and populates a "displayName" attribute for every avro field with the
+ * original column name.
+ *
+ * Generated from protobuf field bool enable_display_name_attribute = 1;
+ * @param bool $var
+ * @return $this
+ */
+ public function setEnableDisplayNameAttribute($var)
+ {
+ GPBUtil::checkBool($var);
+ $this->enable_display_name_attribute = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/BatchCommitWriteStreamsRequest.php b/BigQueryStorage/src/V1/BatchCommitWriteStreamsRequest.php
new file mode 100644
index 000000000000..74aae380d19f
--- /dev/null
+++ b/BigQueryStorage/src/V1/BatchCommitWriteStreamsRequest.php
@@ -0,0 +1,105 @@
+google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest
+ */
+class BatchCommitWriteStreamsRequest extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. Parent table that all the streams should belong to, in the form
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ *
+ * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ */
+ private $parent = '';
+ /**
+ * Required. The group of streams that will be committed atomically.
+ *
+ * Generated from protobuf field repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED];
+ */
+ private $write_streams;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $parent
+ * Required. Parent table that all the streams should belong to, in the form
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ * @type array|\Google\Protobuf\Internal\RepeatedField $write_streams
+ * Required. The group of streams that will be committed atomically.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. Parent table that all the streams should belong to, in the form
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ *
+ * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @return string
+ */
+ public function getParent()
+ {
+ return $this->parent;
+ }
+
+ /**
+ * Required. Parent table that all the streams should belong to, in the form
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ *
+ * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @param string $var
+ * @return $this
+ */
+ public function setParent($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->parent = $var;
+
+ return $this;
+ }
+
+ /**
+ * Required. The group of streams that will be committed atomically.
+ *
+ * Generated from protobuf field repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED];
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getWriteStreams()
+ {
+ return $this->write_streams;
+ }
+
+ /**
+ * Required. The group of streams that will be committed atomically.
+ *
+ * Generated from protobuf field repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED];
+ * @param array|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setWriteStreams($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::STRING);
+ $this->write_streams = $arr;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/BatchCommitWriteStreamsResponse.php b/BigQueryStorage/src/V1/BatchCommitWriteStreamsResponse.php
new file mode 100644
index 000000000000..171dd695436b
--- /dev/null
+++ b/BigQueryStorage/src/V1/BatchCommitWriteStreamsResponse.php
@@ -0,0 +1,135 @@
+google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse
+ */
+class BatchCommitWriteStreamsResponse extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * The time at which streams were committed in microseconds granularity.
+ * This field will only exist when there are no stream errors.
+ * **Note** if this field is not set, it means the commit was not successful.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp commit_time = 1;
+ */
+ private $commit_time = null;
+ /**
+ * Stream level error if commit failed. Only streams with error will be in
+ * the list.
+ * If empty, there is no error and all streams are committed successfully.
+ * If non empty, certain streams have errors and ZERO stream is committed due
+ * to atomicity guarantee.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2;
+ */
+ private $stream_errors;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type \Google\Protobuf\Timestamp $commit_time
+ * The time at which streams were committed in microseconds granularity.
+ * This field will only exist when there are no stream errors.
+ * **Note** if this field is not set, it means the commit was not successful.
+ * @type array<\Google\Cloud\BigQuery\Storage\V1\StorageError>|\Google\Protobuf\Internal\RepeatedField $stream_errors
+ * Stream level error if commit failed. Only streams with error will be in
+ * the list.
+ * If empty, there is no error and all streams are committed successfully.
+ * If non empty, certain streams have errors and ZERO stream is committed due
+ * to atomicity guarantee.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * The time at which streams were committed in microseconds granularity.
+ * This field will only exist when there are no stream errors.
+ * **Note** if this field is not set, it means the commit was not successful.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp commit_time = 1;
+ * @return \Google\Protobuf\Timestamp|null
+ */
+ public function getCommitTime()
+ {
+ return $this->commit_time;
+ }
+
+ public function hasCommitTime()
+ {
+ return isset($this->commit_time);
+ }
+
+ public function clearCommitTime()
+ {
+ unset($this->commit_time);
+ }
+
+ /**
+ * The time at which streams were committed in microseconds granularity.
+ * This field will only exist when there are no stream errors.
+ * **Note** if this field is not set, it means the commit was not successful.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp commit_time = 1;
+ * @param \Google\Protobuf\Timestamp $var
+ * @return $this
+ */
+ public function setCommitTime($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class);
+ $this->commit_time = $var;
+
+ return $this;
+ }
+
+ /**
+ * Stream level error if commit failed. Only streams with error will be in
+ * the list.
+ * If empty, there is no error and all streams are committed successfully.
+ * If non empty, certain streams have errors and ZERO stream is committed due
+ * to atomicity guarantee.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2;
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getStreamErrors()
+ {
+ return $this->stream_errors;
+ }
+
+ /**
+ * Stream level error if commit failed. Only streams with error will be in
+ * the list.
+ * If empty, there is no error and all streams are committed successfully.
+ * If non empty, certain streams have errors and ZERO stream is committed due
+ * to atomicity guarantee.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2;
+ * @param array<\Google\Cloud\BigQuery\Storage\V1\StorageError>|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setStreamErrors($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\BigQuery\Storage\V1\StorageError::class);
+ $this->stream_errors = $arr;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/BigQueryReadClient.php b/BigQueryStorage/src/V1/BigQueryReadClient.php
index 5dd950c58636..63cfc8a05f9c 100644
--- a/BigQueryStorage/src/V1/BigQueryReadClient.php
+++ b/BigQueryStorage/src/V1/BigQueryReadClient.php
@@ -18,7 +18,7 @@
/*
* GENERATED CODE WARNING
* Generated by gapic-generator-php from the file
- * https://github.com/google/googleapis/blob/master/google/cloud/bigquery/storage/v1/storage.proto
+ * https://github.com/googleapis/googleapis/blob/master/google/cloud/bigquery/storage/v1/storage.proto
* Updates to the above are reflected here through a refresh process.
*/
diff --git a/BigQueryStorage/src/V1/BigQueryReadGrpcClient.php b/BigQueryStorage/src/V1/BigQueryReadGrpcClient.php
index 2f16047d6a64..52611ecda3f9 100644
--- a/BigQueryStorage/src/V1/BigQueryReadGrpcClient.php
+++ b/BigQueryStorage/src/V1/BigQueryReadGrpcClient.php
@@ -52,7 +52,7 @@ public function __construct($hostname, $opts, $channel = null) {
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
*
- * Read sessions automatically expire 24 hours after they are created and do
+ * Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
* @param \Google\Cloud\BigQuery\Storage\V1\CreateReadSessionRequest $argument input argument
* @param array $metadata metadata
diff --git a/BigQueryStorage/src/V1/BigQueryWriteClient.php b/BigQueryStorage/src/V1/BigQueryWriteClient.php
new file mode 100644
index 000000000000..af678782464a
--- /dev/null
+++ b/BigQueryStorage/src/V1/BigQueryWriteClient.php
@@ -0,0 +1,34 @@
+_simpleRequest('/google.cloud.bigquery.storage.v1.BigQueryWrite/CreateWriteStream',
+ $argument,
+ ['\Google\Cloud\BigQuery\Storage\V1\WriteStream', 'decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Appends data to the given stream.
+ *
+ * If `offset` is specified, the `offset` is checked against the end of
+ * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+ * attempt is made to append to an offset beyond the current end of the stream
+ * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+ * written to. User can retry with adjusted offset within the same RPC
+ * connection. If `offset` is not specified, append happens at the end of the
+ * stream.
+ *
+ * The response contains an optional offset at which the append
+ * happened. No offset information will be returned for appends to a
+ * default stream.
+ *
+ * Responses are received in the same order in which requests are sent.
+ * There will be one response for each successful inserted request. Responses
+ * may optionally embed error information if the originating AppendRequest was
+ * not successfully processed.
+ *
+ * The specifics of when successfully appended data is made visible to the
+ * table are governed by the type of stream:
+ *
+ * * For COMMITTED streams (which includes the default stream), data is
+ * visible immediately upon successful append.
+ *
+ * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+ * rpc which advances a cursor to a newer offset in the stream.
+ *
+ * * For PENDING streams, data is not made visible until the stream itself is
+ * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+ * committed via the `BatchCommitWriteStreams` rpc.
+ * @param array $metadata metadata
+ * @param array $options call options
+ * @return \Grpc\BidiStreamingCall
+ */
+ public function AppendRows($metadata = [], $options = []) {
+ return $this->_bidiRequest('/google.cloud.bigquery.storage.v1.BigQueryWrite/AppendRows',
+ ['\Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse','decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Gets information about a write stream.
+ * @param \Google\Cloud\BigQuery\Storage\V1\GetWriteStreamRequest $argument input argument
+ * @param array $metadata metadata
+ * @param array $options call options
+ * @return \Grpc\UnaryCall
+ */
+ public function GetWriteStream(\Google\Cloud\BigQuery\Storage\V1\GetWriteStreamRequest $argument,
+ $metadata = [], $options = []) {
+ return $this->_simpleRequest('/google.cloud.bigquery.storage.v1.BigQueryWrite/GetWriteStream',
+ $argument,
+ ['\Google\Cloud\BigQuery\Storage\V1\WriteStream', 'decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Finalize a write stream so that no new data can be appended to the
+ * stream. Finalize is not supported on the '_default' stream.
+ * @param \Google\Cloud\BigQuery\Storage\V1\FinalizeWriteStreamRequest $argument input argument
+ * @param array $metadata metadata
+ * @param array $options call options
+ * @return \Grpc\UnaryCall
+ */
+ public function FinalizeWriteStream(\Google\Cloud\BigQuery\Storage\V1\FinalizeWriteStreamRequest $argument,
+ $metadata = [], $options = []) {
+ return $this->_simpleRequest('/google.cloud.bigquery.storage.v1.BigQueryWrite/FinalizeWriteStream',
+ $argument,
+ ['\Google\Cloud\BigQuery\Storage\V1\FinalizeWriteStreamResponse', 'decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Atomically commits a group of `PENDING` streams that belong to the same
+ * `parent` table.
+ *
+ * Streams must be finalized before commit and cannot be committed multiple
+ * times. Once a stream is committed, data in the stream becomes available
+ * for read operations.
+ * @param \Google\Cloud\BigQuery\Storage\V1\BatchCommitWriteStreamsRequest $argument input argument
+ * @param array $metadata metadata
+ * @param array $options call options
+ * @return \Grpc\UnaryCall
+ */
+ public function BatchCommitWriteStreams(\Google\Cloud\BigQuery\Storage\V1\BatchCommitWriteStreamsRequest $argument,
+ $metadata = [], $options = []) {
+ return $this->_simpleRequest('/google.cloud.bigquery.storage.v1.BigQueryWrite/BatchCommitWriteStreams',
+ $argument,
+ ['\Google\Cloud\BigQuery\Storage\V1\BatchCommitWriteStreamsResponse', 'decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Flushes rows to a BUFFERED stream.
+ *
+ * If users are appending rows to BUFFERED stream, flush operation is
+ * required in order for the rows to become available for reading. A
+ * Flush operation flushes up to any previously flushed offset in a BUFFERED
+ * stream, to the offset specified in the request.
+ *
+ * Flush is not supported on the _default stream, since it is not BUFFERED.
+ * @param \Google\Cloud\BigQuery\Storage\V1\FlushRowsRequest $argument input argument
+ * @param array $metadata metadata
+ * @param array $options call options
+ * @return \Grpc\UnaryCall
+ */
+ public function FlushRows(\Google\Cloud\BigQuery\Storage\V1\FlushRowsRequest $argument,
+ $metadata = [], $options = []) {
+ return $this->_simpleRequest('/google.cloud.bigquery.storage.v1.BigQueryWrite/FlushRows',
+ $argument,
+ ['\Google\Cloud\BigQuery\Storage\V1\FlushRowsResponse', 'decode'],
+ $metadata, $options);
+ }
+
+}
diff --git a/BigQueryStorage/src/V1/CreateReadSessionRequest.php b/BigQueryStorage/src/V1/CreateReadSessionRequest.php
index 4a027c0110a8..7c28abc44a65 100644
--- a/BigQueryStorage/src/V1/CreateReadSessionRequest.php
+++ b/BigQueryStorage/src/V1/CreateReadSessionRequest.php
@@ -32,14 +32,28 @@ class CreateReadSessionRequest extends \Google\Protobuf\Internal\Message
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
- * depending on the amount parallelism that is reasonable for the table. Error
- * will be returned if the max count is greater than the current system
- * max limit of 1,000.
- * Streams must be read starting from offset 0.
+ * depending on the amount parallelism that is reasonable for the table.
+ * There is a default system max limit of 1,000.
+ * This must be greater than or equal to preferred_min_stream_count.
+ * Typically, clients should either leave this unset to let the system to
+ * determine an upper bound OR set this a size for the maximum "units of work"
+ * it can gracefully handle.
*
* Generated from protobuf field int32 max_stream_count = 3;
*/
private $max_stream_count = 0;
+ /**
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ * Generated from protobuf field int32 preferred_min_stream_count = 4;
+ */
+ private $preferred_min_stream_count = 0;
/**
* Constructor.
@@ -56,10 +70,20 @@ class CreateReadSessionRequest extends \Google\Protobuf\Internal\Message
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
- * depending on the amount parallelism that is reasonable for the table. Error
- * will be returned if the max count is greater than the current system
- * max limit of 1,000.
- * Streams must be read starting from offset 0.
+ * depending on the amount parallelism that is reasonable for the table.
+ * There is a default system max limit of 1,000.
+ * This must be greater than or equal to preferred_min_stream_count.
+ * Typically, clients should either leave this unset to let the system to
+ * determine an upper bound OR set this a size for the maximum "units of work"
+ * it can gracefully handle.
+ * @type int $preferred_min_stream_count
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
* }
*/
public function __construct($data = NULL) {
@@ -103,7 +127,7 @@ public function setParent($var)
*/
public function getReadSession()
{
- return isset($this->read_session) ? $this->read_session : null;
+ return $this->read_session;
}
public function hasReadSession()
@@ -135,10 +159,12 @@ public function setReadSession($var)
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
- * depending on the amount parallelism that is reasonable for the table. Error
- * will be returned if the max count is greater than the current system
- * max limit of 1,000.
- * Streams must be read starting from offset 0.
+ * depending on the amount parallelism that is reasonable for the table.
+ * There is a default system max limit of 1,000.
+ * This must be greater than or equal to preferred_min_stream_count.
+ * Typically, clients should either leave this unset to let the system to
+ * determine an upper bound OR set this a size for the maximum "units of work"
+ * it can gracefully handle.
*
* Generated from protobuf field int32 max_stream_count = 3;
* @return int
@@ -152,10 +178,12 @@ public function getMaxStreamCount()
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
- * depending on the amount parallelism that is reasonable for the table. Error
- * will be returned if the max count is greater than the current system
- * max limit of 1,000.
- * Streams must be read starting from offset 0.
+ * depending on the amount parallelism that is reasonable for the table.
+ * There is a default system max limit of 1,000.
+ * This must be greater than or equal to preferred_min_stream_count.
+ * Typically, clients should either leave this unset to let the system to
+ * determine an upper bound OR set this a size for the maximum "units of work"
+ * it can gracefully handle.
*
* Generated from protobuf field int32 max_stream_count = 3;
* @param int $var
@@ -169,5 +197,43 @@ public function setMaxStreamCount($var)
return $this;
}
+ /**
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ * Generated from protobuf field int32 preferred_min_stream_count = 4;
+ * @return int
+ */
+ public function getPreferredMinStreamCount()
+ {
+ return $this->preferred_min_stream_count;
+ }
+
+ /**
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ * Generated from protobuf field int32 preferred_min_stream_count = 4;
+ * @param int $var
+ * @return $this
+ */
+ public function setPreferredMinStreamCount($var)
+ {
+ GPBUtil::checkInt32($var);
+ $this->preferred_min_stream_count = $var;
+
+ return $this;
+ }
+
}
diff --git a/BigQueryStorage/src/V1/CreateWriteStreamRequest.php b/BigQueryStorage/src/V1/CreateWriteStreamRequest.php
new file mode 100644
index 000000000000..9b47baec0e48
--- /dev/null
+++ b/BigQueryStorage/src/V1/CreateWriteStreamRequest.php
@@ -0,0 +1,115 @@
+google.cloud.bigquery.storage.v1.CreateWriteStreamRequest
+ */
+class CreateWriteStreamRequest extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. Reference to the table to which the stream belongs, in the format
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ *
+ * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ */
+ private $parent = '';
+ /**
+ * Required. Stream to be created.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED];
+ */
+ private $write_stream = null;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $parent
+ * Required. Reference to the table to which the stream belongs, in the format
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ * @type \Google\Cloud\BigQuery\Storage\V1\WriteStream $write_stream
+ * Required. Stream to be created.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. Reference to the table to which the stream belongs, in the format
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ *
+ * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @return string
+ */
+ public function getParent()
+ {
+ return $this->parent;
+ }
+
+ /**
+ * Required. Reference to the table to which the stream belongs, in the format
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ *
+ * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @param string $var
+ * @return $this
+ */
+ public function setParent($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->parent = $var;
+
+ return $this;
+ }
+
+ /**
+ * Required. Stream to be created.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED];
+ * @return \Google\Cloud\BigQuery\Storage\V1\WriteStream|null
+ */
+ public function getWriteStream()
+ {
+ return $this->write_stream;
+ }
+
+ public function hasWriteStream()
+ {
+ return isset($this->write_stream);
+ }
+
+ public function clearWriteStream()
+ {
+ unset($this->write_stream);
+ }
+
+ /**
+ * Required. Stream to be created.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED];
+ * @param \Google\Cloud\BigQuery\Storage\V1\WriteStream $var
+ * @return $this
+ */
+ public function setWriteStream($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\WriteStream::class);
+ $this->write_stream = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/DataFormat.php b/BigQueryStorage/src/V1/DataFormat.php
index cab71f55ef6c..c5e81f0da7a1 100644
--- a/BigQueryStorage/src/V1/DataFormat.php
+++ b/BigQueryStorage/src/V1/DataFormat.php
@@ -14,6 +14,8 @@
class DataFormat
{
/**
+ * Data format is unspecified.
+ *
* Generated from protobuf enum DATA_FORMAT_UNSPECIFIED = 0;
*/
const DATA_FORMAT_UNSPECIFIED = 0;
diff --git a/BigQueryStorage/src/V1/FinalizeWriteStreamRequest.php b/BigQueryStorage/src/V1/FinalizeWriteStreamRequest.php
new file mode 100644
index 000000000000..7f0e56f3da72
--- /dev/null
+++ b/BigQueryStorage/src/V1/FinalizeWriteStreamRequest.php
@@ -0,0 +1,71 @@
+google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest
+ */
+class FinalizeWriteStreamRequest extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. Name of the stream to finalize, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ */
+ private $name = '';
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $name
+ * Required. Name of the stream to finalize, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. Name of the stream to finalize, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @return string
+ */
+ public function getName()
+ {
+ return $this->name;
+ }
+
+ /**
+ * Required. Name of the stream to finalize, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @param string $var
+ * @return $this
+ */
+ public function setName($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->name = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/FinalizeWriteStreamResponse.php b/BigQueryStorage/src/V1/FinalizeWriteStreamResponse.php
new file mode 100644
index 000000000000..bd83a3c87291
--- /dev/null
+++ b/BigQueryStorage/src/V1/FinalizeWriteStreamResponse.php
@@ -0,0 +1,67 @@
+google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse
+ */
+class FinalizeWriteStreamResponse extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Number of rows in the finalized stream.
+ *
+ * Generated from protobuf field int64 row_count = 1;
+ */
+ private $row_count = 0;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type int|string $row_count
+ * Number of rows in the finalized stream.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Number of rows in the finalized stream.
+ *
+ * Generated from protobuf field int64 row_count = 1;
+ * @return int|string
+ */
+ public function getRowCount()
+ {
+ return $this->row_count;
+ }
+
+ /**
+ * Number of rows in the finalized stream.
+ *
+ * Generated from protobuf field int64 row_count = 1;
+ * @param int|string $var
+ * @return $this
+ */
+ public function setRowCount($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->row_count = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/FlushRowsRequest.php b/BigQueryStorage/src/V1/FlushRowsRequest.php
new file mode 100644
index 000000000000..0fc34a84461b
--- /dev/null
+++ b/BigQueryStorage/src/V1/FlushRowsRequest.php
@@ -0,0 +1,144 @@
+google.cloud.bigquery.storage.v1.FlushRowsRequest
+ */
+class FlushRowsRequest extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. The stream that is the target of the flush operation.
+ *
+ * Generated from protobuf field string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ */
+ private $write_stream = '';
+ /**
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ */
+ private $offset = null;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $write_stream
+ * Required. The stream that is the target of the flush operation.
+ * @type \Google\Protobuf\Int64Value $offset
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. The stream that is the target of the flush operation.
+ *
+ * Generated from protobuf field string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @return string
+ */
+ public function getWriteStream()
+ {
+ return $this->write_stream;
+ }
+
+ /**
+ * Required. The stream that is the target of the flush operation.
+ *
+ * Generated from protobuf field string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @param string $var
+ * @return $this
+ */
+ public function setWriteStream($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->write_stream = $var;
+
+ return $this;
+ }
+
+ /**
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @return \Google\Protobuf\Int64Value|null
+ */
+ public function getOffset()
+ {
+ return $this->offset;
+ }
+
+ public function hasOffset()
+ {
+ return isset($this->offset);
+ }
+
+ public function clearOffset()
+ {
+ unset($this->offset);
+ }
+
+ /**
+ * Returns the unboxed value from getOffset()
+
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @return int|string|null
+ */
+ public function getOffsetValue()
+ {
+ return $this->readWrapperValue("offset");
+ }
+
+ /**
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @param \Google\Protobuf\Int64Value $var
+ * @return $this
+ */
+ public function setOffset($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\Int64Value::class);
+ $this->offset = $var;
+
+ return $this;
+ }
+
+ /**
+ * Sets the field by wrapping a primitive type in a Google\Protobuf\Int64Value object.
+
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ *
+ * Generated from protobuf field .google.protobuf.Int64Value offset = 2;
+ * @param int|string|null $var
+ * @return $this
+ */
+ public function setOffsetValue($var)
+ {
+ $this->writeWrapperValue("offset", $var);
+ return $this;}
+
+}
+
diff --git a/BigQueryStorage/src/V1/FlushRowsResponse.php b/BigQueryStorage/src/V1/FlushRowsResponse.php
new file mode 100644
index 000000000000..b8582b21bfea
--- /dev/null
+++ b/BigQueryStorage/src/V1/FlushRowsResponse.php
@@ -0,0 +1,67 @@
+google.cloud.bigquery.storage.v1.FlushRowsResponse
+ */
+class FlushRowsResponse extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * The rows before this offset (including this offset) are flushed.
+ *
+ * Generated from protobuf field int64 offset = 1;
+ */
+ private $offset = 0;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type int|string $offset
+ * The rows before this offset (including this offset) are flushed.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * The rows before this offset (including this offset) are flushed.
+ *
+ * Generated from protobuf field int64 offset = 1;
+ * @return int|string
+ */
+ public function getOffset()
+ {
+ return $this->offset;
+ }
+
+ /**
+ * The rows before this offset (including this offset) are flushed.
+ *
+ * Generated from protobuf field int64 offset = 1;
+ * @param int|string $var
+ * @return $this
+ */
+ public function setOffset($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->offset = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/Gapic/BigQueryReadGapicClient.php b/BigQueryStorage/src/V1/Gapic/BigQueryReadGapicClient.php
index 274c9cd9248e..133e1d8bc473 100644
--- a/BigQueryStorage/src/V1/Gapic/BigQueryReadGapicClient.php
+++ b/BigQueryStorage/src/V1/Gapic/BigQueryReadGapicClient.php
@@ -18,7 +18,7 @@
/*
* GENERATED CODE WARNING
* Generated by gapic-generator-php from the file
- * https://github.com/google/googleapis/blob/master/google/cloud/bigquery/storage/v1/storage.proto
+ * https://github.com/googleapis/googleapis/blob/master/google/cloud/bigquery/storage/v1/storage.proto
* Updates to the above are reflected here through a refresh process.
*/
@@ -27,7 +27,6 @@
use Google\ApiCore\ApiException;
use Google\ApiCore\Call;
use Google\ApiCore\CredentialsWrapper;
-
use Google\ApiCore\GapicClientTrait;
use Google\ApiCore\PathTemplate;
use Google\ApiCore\RequestParamsHeaderDescriptor;
@@ -70,32 +69,21 @@ class BigQueryReadGapicClient
{
use GapicClientTrait;
- /**
- * The name of the service.
- */
+ /** The name of the service. */
const SERVICE_NAME = 'google.cloud.bigquery.storage.v1.BigQueryRead';
- /**
- * The default address of the service.
- */
+ /** The default address of the service. */
const SERVICE_ADDRESS = 'bigquerystorage.googleapis.com';
- /**
- * The default port of the service.
- */
+ /** The default port of the service. */
const DEFAULT_SERVICE_PORT = 443;
- /**
- * The name of the code generator, to be included in the agent header.
- */
+ /** The name of the code generator, to be included in the agent header. */
const CODEGEN_NAME = 'gapic';
- /**
- * The default scopes required by the service.
- */
+ /** The default scopes required by the service. */
public static $serviceScopes = [
'https://www.googleapis.com/auth/bigquery',
- 'https://www.googleapis.com/auth/bigquery.readonly',
'https://www.googleapis.com/auth/cloud-platform',
];
@@ -322,9 +310,6 @@ public static function parseName($formattedName, $template = null)
* @param array $options {
* Optional. Options for configuring the service API wrapper.
*
- * @type string $serviceAddress
- * **Deprecated**. This option will be removed in a future major release. Please
- * utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address of the API remote host. May optionally include the port, formatted
* as ":". Default 'bigquerystorage.googleapis.com:443'.
@@ -354,7 +339,7 @@ public static function parseName($formattedName, $template = null)
* *Advanced usage*: Additionally, it is possible to pass in an already
* instantiated {@see \Google\ApiCore\Transport\TransportInterface} object. Note
* that when this object is provided, any settings in $transportConfig, and any
- * $serviceAddress setting, will be ignored.
+ * $apiEndpoint setting, will be ignored.
* @type array $transportConfig
* Configuration options that will be used to construct the transport. Options for
* each supported transport type should be passed in a key for that transport. For
@@ -397,7 +382,7 @@ public function __construct(array $options = [])
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
*
- * Read sessions automatically expire 24 hours after they are created and do
+ * Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
*
* Sample code:
@@ -422,16 +407,26 @@ public function __construct(array $options = [])
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
- * depending on the amount parallelism that is reasonable for the table. Error
- * will be returned if the max count is greater than the current system
- * max limit of 1,000.
- *
- * Streams must be read starting from offset 0.
+ * depending on the amount parallelism that is reasonable for the table.
+ * There is a default system max limit of 1,000.
+ *
+ * This must be greater than or equal to preferred_min_stream_count.
+ * Typically, clients should either leave this unset to let the system to
+ * determine an upper bound OR set this a size for the maximum "units of work"
+ * it can gracefully handle.
+ * @type int $preferredMinStreamCount
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ *
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
* @type RetrySettings|array $retrySettings
- * Retry settings to use for this call. Can be a
- * {@see Google\ApiCore\RetrySettings} object, or an associative array of retry
- * settings parameters. See the documentation on
- * {@see Google\ApiCore\RetrySettings} for example usage.
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
* }
*
* @return \Google\Cloud\BigQuery\Storage\V1\ReadSession
@@ -452,6 +447,12 @@ public function createReadSession(
$request->setMaxStreamCount($optionalArgs['maxStreamCount']);
}
+ if (isset($optionalArgs['preferredMinStreamCount'])) {
+ $request->setPreferredMinStreamCount(
+ $optionalArgs['preferredMinStreamCount']
+ );
+ }
+
$requestParams = new RequestParamsHeaderDescriptor(
$requestParamHeaders
);
@@ -569,10 +570,9 @@ public function readRows($readStream, array $optionalArgs = [])
* server-side unit for assigning data is collections of rows, this fraction
* will always map to a data storage boundary on the server side.
* @type RetrySettings|array $retrySettings
- * Retry settings to use for this call. Can be a
- * {@see Google\ApiCore\RetrySettings} object, or an associative array of retry
- * settings parameters. See the documentation on
- * {@see Google\ApiCore\RetrySettings} for example usage.
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
* }
*
* @return \Google\Cloud\BigQuery\Storage\V1\SplitReadStreamResponse
diff --git a/BigQueryStorage/src/V1/Gapic/BigQueryWriteGapicClient.php b/BigQueryStorage/src/V1/Gapic/BigQueryWriteGapicClient.php
new file mode 100644
index 000000000000..138733ce424d
--- /dev/null
+++ b/BigQueryStorage/src/V1/Gapic/BigQueryWriteGapicClient.php
@@ -0,0 +1,726 @@
+setWriteStream($writeStream);
+ * // Write all requests to the server, then read all responses until the
+ * // stream is complete
+ * $requests = [
+ * $request,
+ * ];
+ * $stream = $bigQueryWriteClient->appendRows();
+ * $stream->writeAll($requests);
+ * foreach ($stream->closeWriteAndReadAll() as $element) {
+ * // doSomethingWith($element);
+ * }
+ * // Alternatively:
+ * // Write requests individually, making read() calls if
+ * // required. Call closeWrite() once writes are complete, and read the
+ * // remaining responses from the server.
+ * $requests = [
+ * $request,
+ * ];
+ * $stream = $bigQueryWriteClient->appendRows();
+ * foreach ($requests as $request) {
+ * $stream->write($request);
+ * // if required, read a single response from the stream
+ * $element = $stream->read();
+ * // doSomethingWith($element)
+ * }
+ * $stream->closeWrite();
+ * $element = $stream->read();
+ * while (!is_null($element)) {
+ * // doSomethingWith($element)
+ * $element = $stream->read();
+ * }
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * Many parameters require resource names to be formatted in a particular way. To
+ * assist with these names, this class includes a format method for each type of
+ * name, and additionally a parseName method to extract the individual identifiers
+ * contained within formatted names that are returned by the API.
+ */
+class BigQueryWriteGapicClient
+{
+ use GapicClientTrait;
+
+ /** The name of the service. */
+ const SERVICE_NAME = 'google.cloud.bigquery.storage.v1.BigQueryWrite';
+
+ /** The default address of the service. */
+ const SERVICE_ADDRESS = 'bigquerystorage.googleapis.com';
+
+ /** The default port of the service. */
+ const DEFAULT_SERVICE_PORT = 443;
+
+ /** The name of the code generator, to be included in the agent header. */
+ const CODEGEN_NAME = 'gapic';
+
+ /** The default scopes required by the service. */
+ public static $serviceScopes = [
+ 'https://www.googleapis.com/auth/bigquery',
+ 'https://www.googleapis.com/auth/bigquery.insertdata',
+ 'https://www.googleapis.com/auth/cloud-platform',
+ ];
+
+ private static $tableNameTemplate;
+
+ private static $writeStreamNameTemplate;
+
+ private static $pathTemplateMap;
+
+ private static function getClientDefaults()
+ {
+ return [
+ 'serviceName' => self::SERVICE_NAME,
+ 'apiEndpoint' =>
+ self::SERVICE_ADDRESS . ':' . self::DEFAULT_SERVICE_PORT,
+ 'clientConfig' =>
+ __DIR__ . '/../resources/big_query_write_client_config.json',
+ 'descriptorsConfigPath' =>
+ __DIR__ . '/../resources/big_query_write_descriptor_config.php',
+ 'gcpApiConfigPath' =>
+ __DIR__ . '/../resources/big_query_write_grpc_config.json',
+ 'credentialsConfig' => [
+ 'defaultScopes' => self::$serviceScopes,
+ ],
+ 'transportConfig' => [
+ 'rest' => [
+ 'restClientConfigPath' =>
+ __DIR__ .
+ '/../resources/big_query_write_rest_client_config.php',
+ ],
+ ],
+ ];
+ }
+
+ private static function getTableNameTemplate()
+ {
+ if (self::$tableNameTemplate == null) {
+ self::$tableNameTemplate = new PathTemplate(
+ 'projects/{project}/datasets/{dataset}/tables/{table}'
+ );
+ }
+
+ return self::$tableNameTemplate;
+ }
+
+ private static function getWriteStreamNameTemplate()
+ {
+ if (self::$writeStreamNameTemplate == null) {
+ self::$writeStreamNameTemplate = new PathTemplate(
+ 'projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}'
+ );
+ }
+
+ return self::$writeStreamNameTemplate;
+ }
+
+ private static function getPathTemplateMap()
+ {
+ if (self::$pathTemplateMap == null) {
+ self::$pathTemplateMap = [
+ 'table' => self::getTableNameTemplate(),
+ 'writeStream' => self::getWriteStreamNameTemplate(),
+ ];
+ }
+
+ return self::$pathTemplateMap;
+ }
+
+ /**
+ * Formats a string containing the fully-qualified path to represent a table
+ * resource.
+ *
+ * @param string $project
+ * @param string $dataset
+ * @param string $table
+ *
+ * @return string The formatted table resource.
+ */
+ public static function tableName($project, $dataset, $table)
+ {
+ return self::getTableNameTemplate()->render([
+ 'project' => $project,
+ 'dataset' => $dataset,
+ 'table' => $table,
+ ]);
+ }
+
+ /**
+ * Formats a string containing the fully-qualified path to represent a write_stream
+ * resource.
+ *
+ * @param string $project
+ * @param string $dataset
+ * @param string $table
+ * @param string $stream
+ *
+ * @return string The formatted write_stream resource.
+ */
+ public static function writeStreamName($project, $dataset, $table, $stream)
+ {
+ return self::getWriteStreamNameTemplate()->render([
+ 'project' => $project,
+ 'dataset' => $dataset,
+ 'table' => $table,
+ 'stream' => $stream,
+ ]);
+ }
+
+ /**
+ * Parses a formatted name string and returns an associative array of the components in the name.
+ * The following name formats are supported:
+ * Template: Pattern
+ * - table: projects/{project}/datasets/{dataset}/tables/{table}
+ * - writeStream: projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}
+ *
+ * The optional $template argument can be supplied to specify a particular pattern,
+ * and must match one of the templates listed above. If no $template argument is
+ * provided, or if the $template argument does not match one of the templates
+ * listed, then parseName will check each of the supported templates, and return
+ * the first match.
+ *
+ * @param string $formattedName The formatted name string
+ * @param string $template Optional name of template to match
+ *
+ * @return array An associative array from name component IDs to component values.
+ *
+ * @throws ValidationException If $formattedName could not be matched.
+ */
+ public static function parseName($formattedName, $template = null)
+ {
+ $templateMap = self::getPathTemplateMap();
+ if ($template) {
+ if (!isset($templateMap[$template])) {
+ throw new ValidationException(
+ "Template name $template does not exist"
+ );
+ }
+
+ return $templateMap[$template]->match($formattedName);
+ }
+
+ foreach ($templateMap as $templateName => $pathTemplate) {
+ try {
+ return $pathTemplate->match($formattedName);
+ } catch (ValidationException $ex) {
+ // Swallow the exception to continue trying other path templates
+ }
+ }
+
+ throw new ValidationException(
+ "Input did not match any known format. Input: $formattedName"
+ );
+ }
+
+ /**
+ * Constructor.
+ *
+ * @param array $options {
+ * Optional. Options for configuring the service API wrapper.
+ *
+ * @type string $apiEndpoint
+ * The address of the API remote host. May optionally include the port, formatted
+ * as ":". Default 'bigquerystorage.googleapis.com:443'.
+ * @type string|array|FetchAuthTokenInterface|CredentialsWrapper $credentials
+ * The credentials to be used by the client to authorize API calls. This option
+ * accepts either a path to a credentials file, or a decoded credentials file as a
+ * PHP array.
+ * *Advanced usage*: In addition, this option can also accept a pre-constructed
+ * {@see \Google\Auth\FetchAuthTokenInterface} object or
+ * {@see \Google\ApiCore\CredentialsWrapper} object. Note that when one of these
+ * objects are provided, any settings in $credentialsConfig will be ignored.
+ * @type array $credentialsConfig
+ * Options used to configure credentials, including auth token caching, for the
+ * client. For a full list of supporting configuration options, see
+ * {@see \Google\ApiCore\CredentialsWrapper::build()} .
+ * @type bool $disableRetries
+ * Determines whether or not retries defined by the client configuration should be
+ * disabled. Defaults to `false`.
+ * @type string|array $clientConfig
+ * Client method configuration, including retry settings. This option can be either
+ * a path to a JSON file, or a PHP array containing the decoded JSON data. By
+ * default this settings points to the default client config file, which is
+ * provided in the resources folder.
+ * @type string|TransportInterface $transport
+ * The transport used for executing network requests. May be either the string
+ * `rest` or `grpc`. Defaults to `grpc` if gRPC support is detected on the system.
+ * *Advanced usage*: Additionally, it is possible to pass in an already
+ * instantiated {@see \Google\ApiCore\Transport\TransportInterface} object. Note
+ * that when this object is provided, any settings in $transportConfig, and any
+ * $apiEndpoint setting, will be ignored.
+ * @type array $transportConfig
+ * Configuration options that will be used to construct the transport. Options for
+ * each supported transport type should be passed in a key for that transport. For
+ * example:
+ * $transportConfig = [
+ * 'grpc' => [...],
+ * 'rest' => [...],
+ * ];
+ * See the {@see \Google\ApiCore\Transport\GrpcTransport::build()} and
+ * {@see \Google\ApiCore\Transport\RestTransport::build()} methods for the
+ * supported options.
+ * @type callable $clientCertSource
+ * A callable which returns the client cert as a string. This can be used to
+ * provide a certificate and private key to the transport layer for mTLS.
+ * }
+ *
+ * @throws ValidationException
+ */
+ public function __construct(array $options = [])
+ {
+ $clientOptions = $this->buildClientOptions($options);
+ $this->setClientOptions($clientOptions);
+ }
+
+ /**
+ * Appends data to the given stream.
+ *
+ * If `offset` is specified, the `offset` is checked against the end of
+ * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+ * attempt is made to append to an offset beyond the current end of the stream
+ * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+ * written to. User can retry with adjusted offset within the same RPC
+ * connection. If `offset` is not specified, append happens at the end of the
+ * stream.
+ *
+ * The response contains an optional offset at which the append
+ * happened. No offset information will be returned for appends to a
+ * default stream.
+ *
+ * Responses are received in the same order in which requests are sent.
+ * There will be one response for each successful inserted request. Responses
+ * may optionally embed error information if the originating AppendRequest was
+ * not successfully processed.
+ *
+ * The specifics of when successfully appended data is made visible to the
+ * table are governed by the type of stream:
+ *
+ * * For COMMITTED streams (which includes the default stream), data is
+ * visible immediately upon successful append.
+ *
+ * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+ * rpc which advances a cursor to a newer offset in the stream.
+ *
+ * * For PENDING streams, data is not made visible until the stream itself is
+ * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+ * committed via the `BatchCommitWriteStreams` rpc.
+ *
+ * Sample code:
+ * ```
+ * $bigQueryWriteClient = new BigQueryWriteClient();
+ * try {
+ * $writeStream = 'write_stream';
+ * $request = new AppendRowsRequest();
+ * $request->setWriteStream($writeStream);
+ * // Write all requests to the server, then read all responses until the
+ * // stream is complete
+ * $requests = [
+ * $request,
+ * ];
+ * $stream = $bigQueryWriteClient->appendRows();
+ * $stream->writeAll($requests);
+ * foreach ($stream->closeWriteAndReadAll() as $element) {
+ * // doSomethingWith($element);
+ * }
+ * // Alternatively:
+ * // Write requests individually, making read() calls if
+ * // required. Call closeWrite() once writes are complete, and read the
+ * // remaining responses from the server.
+ * $requests = [
+ * $request,
+ * ];
+ * $stream = $bigQueryWriteClient->appendRows();
+ * foreach ($requests as $request) {
+ * $stream->write($request);
+ * // if required, read a single response from the stream
+ * $element = $stream->read();
+ * // doSomethingWith($element)
+ * }
+ * $stream->closeWrite();
+ * $element = $stream->read();
+ * while (!is_null($element)) {
+ * // doSomethingWith($element)
+ * $element = $stream->read();
+ * }
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * @param array $optionalArgs {
+ * Optional.
+ *
+ * @type int $timeoutMillis
+ * Timeout to use for this call.
+ * }
+ *
+ * @return \Google\ApiCore\BidiStream
+ *
+ * @throws ApiException if the remote call fails
+ */
+ public function appendRows(array $optionalArgs = [])
+ {
+ return $this->startCall(
+ 'AppendRows',
+ AppendRowsResponse::class,
+ $optionalArgs,
+ null,
+ Call::BIDI_STREAMING_CALL
+ );
+ }
+
+ /**
+ * Atomically commits a group of `PENDING` streams that belong to the same
+ * `parent` table.
+ *
+ * Streams must be finalized before commit and cannot be committed multiple
+ * times. Once a stream is committed, data in the stream becomes available
+ * for read operations.
+ *
+ * Sample code:
+ * ```
+ * $bigQueryWriteClient = new BigQueryWriteClient();
+ * try {
+ * $formattedParent = $bigQueryWriteClient->tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ * $writeStreams = [];
+ * $response = $bigQueryWriteClient->batchCommitWriteStreams($formattedParent, $writeStreams);
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * @param string $parent Required. Parent table that all the streams should belong to, in the form
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ * @param string[] $writeStreams Required. The group of streams that will be committed atomically.
+ * @param array $optionalArgs {
+ * Optional.
+ *
+ * @type RetrySettings|array $retrySettings
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
+ * }
+ *
+ * @return \Google\Cloud\BigQuery\Storage\V1\BatchCommitWriteStreamsResponse
+ *
+ * @throws ApiException if the remote call fails
+ */
+ public function batchCommitWriteStreams(
+ $parent,
+ $writeStreams,
+ array $optionalArgs = []
+ ) {
+ $request = new BatchCommitWriteStreamsRequest();
+ $requestParamHeaders = [];
+ $request->setParent($parent);
+ $request->setWriteStreams($writeStreams);
+ $requestParamHeaders['parent'] = $parent;
+ $requestParams = new RequestParamsHeaderDescriptor(
+ $requestParamHeaders
+ );
+ $optionalArgs['headers'] = isset($optionalArgs['headers'])
+ ? array_merge($requestParams->getHeader(), $optionalArgs['headers'])
+ : $requestParams->getHeader();
+ return $this->startCall(
+ 'BatchCommitWriteStreams',
+ BatchCommitWriteStreamsResponse::class,
+ $optionalArgs,
+ $request
+ )->wait();
+ }
+
+ /**
+ * Creates a write stream to the given table.
+ * Additionally, every table has a special stream named '_default'
+ * to which data can be written. This stream doesn't need to be created using
+ * CreateWriteStream. It is a stream that can be used simultaneously by any
+ * number of clients. Data written to this stream is considered committed as
+ * soon as an acknowledgement is received.
+ *
+ * Sample code:
+ * ```
+ * $bigQueryWriteClient = new BigQueryWriteClient();
+ * try {
+ * $formattedParent = $bigQueryWriteClient->tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ * $writeStream = new WriteStream();
+ * $response = $bigQueryWriteClient->createWriteStream($formattedParent, $writeStream);
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * @param string $parent Required. Reference to the table to which the stream belongs, in the format
+ * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+ * @param WriteStream $writeStream Required. Stream to be created.
+ * @param array $optionalArgs {
+ * Optional.
+ *
+ * @type RetrySettings|array $retrySettings
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
+ * }
+ *
+ * @return \Google\Cloud\BigQuery\Storage\V1\WriteStream
+ *
+ * @throws ApiException if the remote call fails
+ */
+ public function createWriteStream(
+ $parent,
+ $writeStream,
+ array $optionalArgs = []
+ ) {
+ $request = new CreateWriteStreamRequest();
+ $requestParamHeaders = [];
+ $request->setParent($parent);
+ $request->setWriteStream($writeStream);
+ $requestParamHeaders['parent'] = $parent;
+ $requestParams = new RequestParamsHeaderDescriptor(
+ $requestParamHeaders
+ );
+ $optionalArgs['headers'] = isset($optionalArgs['headers'])
+ ? array_merge($requestParams->getHeader(), $optionalArgs['headers'])
+ : $requestParams->getHeader();
+ return $this->startCall(
+ 'CreateWriteStream',
+ WriteStream::class,
+ $optionalArgs,
+ $request
+ )->wait();
+ }
+
+ /**
+ * Finalize a write stream so that no new data can be appended to the
+ * stream. Finalize is not supported on the '_default' stream.
+ *
+ * Sample code:
+ * ```
+ * $bigQueryWriteClient = new BigQueryWriteClient();
+ * try {
+ * $formattedName = $bigQueryWriteClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ * $response = $bigQueryWriteClient->finalizeWriteStream($formattedName);
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * @param string $name Required. Name of the stream to finalize, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ * @param array $optionalArgs {
+ * Optional.
+ *
+ * @type RetrySettings|array $retrySettings
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
+ * }
+ *
+ * @return \Google\Cloud\BigQuery\Storage\V1\FinalizeWriteStreamResponse
+ *
+ * @throws ApiException if the remote call fails
+ */
+ public function finalizeWriteStream($name, array $optionalArgs = [])
+ {
+ $request = new FinalizeWriteStreamRequest();
+ $requestParamHeaders = [];
+ $request->setName($name);
+ $requestParamHeaders['name'] = $name;
+ $requestParams = new RequestParamsHeaderDescriptor(
+ $requestParamHeaders
+ );
+ $optionalArgs['headers'] = isset($optionalArgs['headers'])
+ ? array_merge($requestParams->getHeader(), $optionalArgs['headers'])
+ : $requestParams->getHeader();
+ return $this->startCall(
+ 'FinalizeWriteStream',
+ FinalizeWriteStreamResponse::class,
+ $optionalArgs,
+ $request
+ )->wait();
+ }
+
+ /**
+ * Flushes rows to a BUFFERED stream.
+ *
+ * If users are appending rows to BUFFERED stream, flush operation is
+ * required in order for the rows to become available for reading. A
+ * Flush operation flushes up to any previously flushed offset in a BUFFERED
+ * stream, to the offset specified in the request.
+ *
+ * Flush is not supported on the _default stream, since it is not BUFFERED.
+ *
+ * Sample code:
+ * ```
+ * $bigQueryWriteClient = new BigQueryWriteClient();
+ * try {
+ * $formattedWriteStream = $bigQueryWriteClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ * $response = $bigQueryWriteClient->flushRows($formattedWriteStream);
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * @param string $writeStream Required. The stream that is the target of the flush operation.
+ * @param array $optionalArgs {
+ * Optional.
+ *
+ * @type Int64Value $offset
+ * Ending offset of the flush operation. Rows before this offset(including
+ * this offset) will be flushed.
+ * @type RetrySettings|array $retrySettings
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
+ * }
+ *
+ * @return \Google\Cloud\BigQuery\Storage\V1\FlushRowsResponse
+ *
+ * @throws ApiException if the remote call fails
+ */
+ public function flushRows($writeStream, array $optionalArgs = [])
+ {
+ $request = new FlushRowsRequest();
+ $requestParamHeaders = [];
+ $request->setWriteStream($writeStream);
+ $requestParamHeaders['write_stream'] = $writeStream;
+ if (isset($optionalArgs['offset'])) {
+ $request->setOffset($optionalArgs['offset']);
+ }
+
+ $requestParams = new RequestParamsHeaderDescriptor(
+ $requestParamHeaders
+ );
+ $optionalArgs['headers'] = isset($optionalArgs['headers'])
+ ? array_merge($requestParams->getHeader(), $optionalArgs['headers'])
+ : $requestParams->getHeader();
+ return $this->startCall(
+ 'FlushRows',
+ FlushRowsResponse::class,
+ $optionalArgs,
+ $request
+ )->wait();
+ }
+
+ /**
+ * Gets information about a write stream.
+ *
+ * Sample code:
+ * ```
+ * $bigQueryWriteClient = new BigQueryWriteClient();
+ * try {
+ * $formattedName = $bigQueryWriteClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ * $response = $bigQueryWriteClient->getWriteStream($formattedName);
+ * } finally {
+ * $bigQueryWriteClient->close();
+ * }
+ * ```
+ *
+ * @param string $name Required. Name of the stream to get, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ * @param array $optionalArgs {
+ * Optional.
+ *
+ * @type int $view
+ * Indicates whether to get full or partial view of the WriteStream. If
+ * not set, view returned will be basic.
+ * For allowed values, use constants defined on {@see \Google\Cloud\BigQuery\Storage\V1\WriteStreamView}
+ * @type RetrySettings|array $retrySettings
+ * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an
+ * associative array of retry settings parameters. See the documentation on
+ * {@see RetrySettings} for example usage.
+ * }
+ *
+ * @return \Google\Cloud\BigQuery\Storage\V1\WriteStream
+ *
+ * @throws ApiException if the remote call fails
+ */
+ public function getWriteStream($name, array $optionalArgs = [])
+ {
+ $request = new GetWriteStreamRequest();
+ $requestParamHeaders = [];
+ $request->setName($name);
+ $requestParamHeaders['name'] = $name;
+ if (isset($optionalArgs['view'])) {
+ $request->setView($optionalArgs['view']);
+ }
+
+ $requestParams = new RequestParamsHeaderDescriptor(
+ $requestParamHeaders
+ );
+ $optionalArgs['headers'] = isset($optionalArgs['headers'])
+ ? array_merge($requestParams->getHeader(), $optionalArgs['headers'])
+ : $requestParams->getHeader();
+ return $this->startCall(
+ 'GetWriteStream',
+ WriteStream::class,
+ $optionalArgs,
+ $request
+ )->wait();
+ }
+}
diff --git a/BigQueryStorage/src/V1/GetWriteStreamRequest.php b/BigQueryStorage/src/V1/GetWriteStreamRequest.php
new file mode 100644
index 000000000000..e2939f22a6a6
--- /dev/null
+++ b/BigQueryStorage/src/V1/GetWriteStreamRequest.php
@@ -0,0 +1,109 @@
+google.cloud.bigquery.storage.v1.GetWriteStreamRequest
+ */
+class GetWriteStreamRequest extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. Name of the stream to get, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ */
+ private $name = '';
+ /**
+ * Indicates whether to get full or partial view of the WriteStream. If
+ * not set, view returned will be basic.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStreamView view = 3;
+ */
+ private $view = 0;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $name
+ * Required. Name of the stream to get, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ * @type int $view
+ * Indicates whether to get full or partial view of the WriteStream. If
+ * not set, view returned will be basic.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. Name of the stream to get, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @return string
+ */
+ public function getName()
+ {
+ return $this->name;
+ }
+
+ /**
+ * Required. Name of the stream to get, in the form of
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {
+ * @param string $var
+ * @return $this
+ */
+ public function setName($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->name = $var;
+
+ return $this;
+ }
+
+ /**
+ * Indicates whether to get full or partial view of the WriteStream. If
+ * not set, view returned will be basic.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStreamView view = 3;
+ * @return int
+ */
+ public function getView()
+ {
+ return $this->view;
+ }
+
+ /**
+ * Indicates whether to get full or partial view of the WriteStream. If
+ * not set, view returned will be basic.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStreamView view = 3;
+ * @param int $var
+ * @return $this
+ */
+ public function setView($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\WriteStreamView::class);
+ $this->view = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/ProtoRows.php b/BigQueryStorage/src/V1/ProtoRows.php
new file mode 100644
index 000000000000..da06f296e683
--- /dev/null
+++ b/BigQueryStorage/src/V1/ProtoRows.php
@@ -0,0 +1,73 @@
+google.cloud.bigquery.storage.v1.ProtoRows
+ */
+class ProtoRows extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * A sequence of rows serialized as a Protocol Buffer.
+ * See https://developers.google.com/protocol-buffers/docs/overview for more
+ * information on deserializing this field.
+ *
+ * Generated from protobuf field repeated bytes serialized_rows = 1;
+ */
+ private $serialized_rows;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type array|\Google\Protobuf\Internal\RepeatedField $serialized_rows
+ * A sequence of rows serialized as a Protocol Buffer.
+ * See https://developers.google.com/protocol-buffers/docs/overview for more
+ * information on deserializing this field.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Protobuf::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * A sequence of rows serialized as a Protocol Buffer.
+ * See https://developers.google.com/protocol-buffers/docs/overview for more
+ * information on deserializing this field.
+ *
+ * Generated from protobuf field repeated bytes serialized_rows = 1;
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getSerializedRows()
+ {
+ return $this->serialized_rows;
+ }
+
+ /**
+ * A sequence of rows serialized as a Protocol Buffer.
+ * See https://developers.google.com/protocol-buffers/docs/overview for more
+ * information on deserializing this field.
+ *
+ * Generated from protobuf field repeated bytes serialized_rows = 1;
+ * @param array|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setSerializedRows($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::BYTES);
+ $this->serialized_rows = $arr;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/ProtoSchema.php b/BigQueryStorage/src/V1/ProtoSchema.php
new file mode 100644
index 000000000000..5f6cbd5f9700
--- /dev/null
+++ b/BigQueryStorage/src/V1/ProtoSchema.php
@@ -0,0 +1,105 @@
+google.cloud.bigquery.storage.v1.ProtoSchema
+ */
+class ProtoSchema extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Descriptor for input message. The provided descriptor must be self
+ * contained, such that data rows sent can be fully decoded using only the
+ * single descriptor. For data rows that are compositions of multiple
+ * independent messages, this means the descriptor may need to be transformed
+ * to only use nested types:
+ * https://developers.google.com/protocol-buffers/docs/proto#nested
+ * For additional information for how proto types and values map onto BigQuery
+ * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+ *
+ * Generated from protobuf field .google.protobuf.DescriptorProto proto_descriptor = 1;
+ */
+ private $proto_descriptor = null;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type \Google\Protobuf\DescriptorProto $proto_descriptor
+ * Descriptor for input message. The provided descriptor must be self
+ * contained, such that data rows sent can be fully decoded using only the
+ * single descriptor. For data rows that are compositions of multiple
+ * independent messages, this means the descriptor may need to be transformed
+ * to only use nested types:
+ * https://developers.google.com/protocol-buffers/docs/proto#nested
+ * For additional information for how proto types and values map onto BigQuery
+ * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Protobuf::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Descriptor for input message. The provided descriptor must be self
+ * contained, such that data rows sent can be fully decoded using only the
+ * single descriptor. For data rows that are compositions of multiple
+ * independent messages, this means the descriptor may need to be transformed
+ * to only use nested types:
+ * https://developers.google.com/protocol-buffers/docs/proto#nested
+ * For additional information for how proto types and values map onto BigQuery
+ * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+ *
+ * Generated from protobuf field .google.protobuf.DescriptorProto proto_descriptor = 1;
+ * @return \Google\Protobuf\DescriptorProto|null
+ */
+ public function getProtoDescriptor()
+ {
+ return $this->proto_descriptor;
+ }
+
+ public function hasProtoDescriptor()
+ {
+ return isset($this->proto_descriptor);
+ }
+
+ public function clearProtoDescriptor()
+ {
+ unset($this->proto_descriptor);
+ }
+
+ /**
+ * Descriptor for input message. The provided descriptor must be self
+ * contained, such that data rows sent can be fully decoded using only the
+ * single descriptor. For data rows that are compositions of multiple
+ * independent messages, this means the descriptor may need to be transformed
+ * to only use nested types:
+ * https://developers.google.com/protocol-buffers/docs/proto#nested
+ * For additional information for how proto types and values map onto BigQuery
+ * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+ *
+ * Generated from protobuf field .google.protobuf.DescriptorProto proto_descriptor = 1;
+ * @param \Google\Protobuf\DescriptorProto $var
+ * @return $this
+ */
+ public function setProtoDescriptor($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\DescriptorProto::class);
+ $this->proto_descriptor = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/ReadRowsResponse.php b/BigQueryStorage/src/V1/ReadRowsResponse.php
index 8b40c5f2b854..baf2170113ab 100644
--- a/BigQueryStorage/src/V1/ReadRowsResponse.php
+++ b/BigQueryStorage/src/V1/ReadRowsResponse.php
@@ -162,7 +162,7 @@ public function setRowCount($var)
*/
public function getStats()
{
- return isset($this->stats) ? $this->stats : null;
+ return $this->stats;
}
public function hasStats()
@@ -199,7 +199,7 @@ public function setStats($var)
*/
public function getThrottleState()
{
- return isset($this->throttle_state) ? $this->throttle_state : null;
+ return $this->throttle_state;
}
public function hasThrottleState()
diff --git a/BigQueryStorage/src/V1/ReadSession.php b/BigQueryStorage/src/V1/ReadSession.php
index cffe46bc2208..bcfd5d9d1cf6 100644
--- a/BigQueryStorage/src/V1/ReadSession.php
+++ b/BigQueryStorage/src/V1/ReadSession.php
@@ -23,15 +23,17 @@ class ReadSession extends \Google\Protobuf\Internal\Message
*/
private $name = '';
/**
- * Output only. Time at which the session becomes invalid. After this time, subsequent
- * requests to read this Session will return errors. The expire_time is
- * automatically assigned and currently cannot be specified or updated.
+ * Output only. Time at which the session becomes invalid. After this time,
+ * subsequent requests to read this Session will return errors. The
+ * expire_time is automatically assigned and currently cannot be specified or
+ * updated.
*
* Generated from protobuf field .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
*/
private $expire_time = null;
/**
- * Immutable. Data format of the output data.
+ * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+ * supported.
*
* Generated from protobuf field .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE];
*/
@@ -44,7 +46,8 @@ class ReadSession extends \Google\Protobuf\Internal\Message
*/
private $table = '';
/**
- * Optional. Any modifiers which are applied when reading from the specified table.
+ * Optional. Any modifiers which are applied when reading from the specified
+ * table.
*
* Generated from protobuf field .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL];
*/
@@ -65,6 +68,32 @@ class ReadSession extends \Google\Protobuf\Internal\Message
* Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
*/
private $streams;
+ /**
+ * Output only. An estimate on the number of bytes this session will scan when
+ * all streams are completely consumed. This estimate is based on
+ * metadata from the table which might be incomplete or stale.
+ *
+ * Generated from protobuf field int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ */
+ private $estimated_total_bytes_scanned = 0;
+ /**
+ * Output only. An estimate on the number of rows present in this session's
+ * streams. This estimate is based on metadata from the table which might be
+ * incomplete or stale.
+ *
+ * Generated from protobuf field int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ */
+ private $estimated_row_count = 0;
+ /**
+ * Optional. ID set by client to annotate a session identity. This does not
+ * need to be strictly unique, but instead the same ID should be used to group
+ * logically connected sessions (e.g. All using the same ID for all sessions
+ * needed to complete a Spark SQL query is reasonable).
+ * Maximum length is 256 bytes.
+ *
+ * Generated from protobuf field string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $trace_id = '';
protected $schema;
/**
@@ -77,11 +106,13 @@ class ReadSession extends \Google\Protobuf\Internal\Message
* Output only. Unique identifier for the session, in the form
* `projects/{project_id}/locations/{location}/sessions/{session_id}`.
* @type \Google\Protobuf\Timestamp $expire_time
- * Output only. Time at which the session becomes invalid. After this time, subsequent
- * requests to read this Session will return errors. The expire_time is
- * automatically assigned and currently cannot be specified or updated.
+ * Output only. Time at which the session becomes invalid. After this time,
+ * subsequent requests to read this Session will return errors. The
+ * expire_time is automatically assigned and currently cannot be specified or
+ * updated.
* @type int $data_format
- * Immutable. Data format of the output data.
+ * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+ * supported.
* @type \Google\Cloud\BigQuery\Storage\V1\AvroSchema $avro_schema
* Output only. Avro schema.
* @type \Google\Cloud\BigQuery\Storage\V1\ArrowSchema $arrow_schema
@@ -90,15 +121,30 @@ class ReadSession extends \Google\Protobuf\Internal\Message
* Immutable. Table that this ReadSession is reading from, in the form
* `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
* @type \Google\Cloud\BigQuery\Storage\V1\ReadSession\TableModifiers $table_modifiers
- * Optional. Any modifiers which are applied when reading from the specified table.
+ * Optional. Any modifiers which are applied when reading from the specified
+ * table.
* @type \Google\Cloud\BigQuery\Storage\V1\ReadSession\TableReadOptions $read_options
* Optional. Read options for this session (e.g. column selection, filters).
- * @type \Google\Cloud\BigQuery\Storage\V1\ReadStream[]|\Google\Protobuf\Internal\RepeatedField $streams
+ * @type array<\Google\Cloud\BigQuery\Storage\V1\ReadStream>|\Google\Protobuf\Internal\RepeatedField $streams
* Output only. A list of streams created with the session.
* At least one stream is created with the session. In the future, larger
* request_stream_count values *may* result in this list being unpopulated,
* in that case, the user will need to use a List method to get the streams
* instead, which is not yet available.
+ * @type int|string $estimated_total_bytes_scanned
+ * Output only. An estimate on the number of bytes this session will scan when
+ * all streams are completely consumed. This estimate is based on
+ * metadata from the table which might be incomplete or stale.
+ * @type int|string $estimated_row_count
+ * Output only. An estimate on the number of rows present in this session's
+ * streams. This estimate is based on metadata from the table which might be
+ * incomplete or stale.
+ * @type string $trace_id
+ * Optional. ID set by client to annotate a session identity. This does not
+ * need to be strictly unique, but instead the same ID should be used to group
+ * logically connected sessions (e.g. All using the same ID for all sessions
+ * needed to complete a Spark SQL query is reasonable).
+ * Maximum length is 256 bytes.
* }
*/
public function __construct($data = NULL) {
@@ -135,16 +181,17 @@ public function setName($var)
}
/**
- * Output only. Time at which the session becomes invalid. After this time, subsequent
- * requests to read this Session will return errors. The expire_time is
- * automatically assigned and currently cannot be specified or updated.
+ * Output only. Time at which the session becomes invalid. After this time,
+ * subsequent requests to read this Session will return errors. The
+ * expire_time is automatically assigned and currently cannot be specified or
+ * updated.
*
* Generated from protobuf field .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
* @return \Google\Protobuf\Timestamp|null
*/
public function getExpireTime()
{
- return isset($this->expire_time) ? $this->expire_time : null;
+ return $this->expire_time;
}
public function hasExpireTime()
@@ -158,9 +205,10 @@ public function clearExpireTime()
}
/**
- * Output only. Time at which the session becomes invalid. After this time, subsequent
- * requests to read this Session will return errors. The expire_time is
- * automatically assigned and currently cannot be specified or updated.
+ * Output only. Time at which the session becomes invalid. After this time,
+ * subsequent requests to read this Session will return errors. The
+ * expire_time is automatically assigned and currently cannot be specified or
+ * updated.
*
* Generated from protobuf field .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
* @param \Google\Protobuf\Timestamp $var
@@ -175,7 +223,8 @@ public function setExpireTime($var)
}
/**
- * Immutable. Data format of the output data.
+ * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+ * supported.
*
* Generated from protobuf field .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE];
* @return int
@@ -186,7 +235,8 @@ public function getDataFormat()
}
/**
- * Immutable. Data format of the output data.
+ * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+ * supported.
*
* Generated from protobuf field .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE];
* @param int $var
@@ -291,14 +341,15 @@ public function setTable($var)
}
/**
- * Optional. Any modifiers which are applied when reading from the specified table.
+ * Optional. Any modifiers which are applied when reading from the specified
+ * table.
*
* Generated from protobuf field .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL];
* @return \Google\Cloud\BigQuery\Storage\V1\ReadSession\TableModifiers|null
*/
public function getTableModifiers()
{
- return isset($this->table_modifiers) ? $this->table_modifiers : null;
+ return $this->table_modifiers;
}
public function hasTableModifiers()
@@ -312,7 +363,8 @@ public function clearTableModifiers()
}
/**
- * Optional. Any modifiers which are applied when reading from the specified table.
+ * Optional. Any modifiers which are applied when reading from the specified
+ * table.
*
* Generated from protobuf field .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL];
* @param \Google\Cloud\BigQuery\Storage\V1\ReadSession\TableModifiers $var
@@ -334,7 +386,7 @@ public function setTableModifiers($var)
*/
public function getReadOptions()
{
- return isset($this->read_options) ? $this->read_options : null;
+ return $this->read_options;
}
public function hasReadOptions()
@@ -385,7 +437,7 @@ public function getStreams()
* instead, which is not yet available.
*
* Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
- * @param \Google\Cloud\BigQuery\Storage\V1\ReadStream[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @param array<\Google\Cloud\BigQuery\Storage\V1\ReadStream>|\Google\Protobuf\Internal\RepeatedField $var
* @return $this
*/
public function setStreams($var)
@@ -396,6 +448,100 @@ public function setStreams($var)
return $this;
}
+ /**
+ * Output only. An estimate on the number of bytes this session will scan when
+ * all streams are completely consumed. This estimate is based on
+ * metadata from the table which might be incomplete or stale.
+ *
+ * Generated from protobuf field int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @return int|string
+ */
+ public function getEstimatedTotalBytesScanned()
+ {
+ return $this->estimated_total_bytes_scanned;
+ }
+
+ /**
+ * Output only. An estimate on the number of bytes this session will scan when
+ * all streams are completely consumed. This estimate is based on
+ * metadata from the table which might be incomplete or stale.
+ *
+ * Generated from protobuf field int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @param int|string $var
+ * @return $this
+ */
+ public function setEstimatedTotalBytesScanned($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->estimated_total_bytes_scanned = $var;
+
+ return $this;
+ }
+
+ /**
+ * Output only. An estimate on the number of rows present in this session's
+ * streams. This estimate is based on metadata from the table which might be
+ * incomplete or stale.
+ *
+ * Generated from protobuf field int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @return int|string
+ */
+ public function getEstimatedRowCount()
+ {
+ return $this->estimated_row_count;
+ }
+
+ /**
+ * Output only. An estimate on the number of rows present in this session's
+ * streams. This estimate is based on metadata from the table which might be
+ * incomplete or stale.
+ *
+ * Generated from protobuf field int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @param int|string $var
+ * @return $this
+ */
+ public function setEstimatedRowCount($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->estimated_row_count = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. ID set by client to annotate a session identity. This does not
+ * need to be strictly unique, but instead the same ID should be used to group
+ * logically connected sessions (e.g. All using the same ID for all sessions
+ * needed to complete a Spark SQL query is reasonable).
+ * Maximum length is 256 bytes.
+ *
+ * Generated from protobuf field string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL];
+ * @return string
+ */
+ public function getTraceId()
+ {
+ return $this->trace_id;
+ }
+
+ /**
+ * Optional. ID set by client to annotate a session identity. This does not
+ * need to be strictly unique, but instead the same ID should be used to group
+ * logically connected sessions (e.g. All using the same ID for all sessions
+ * needed to complete a Spark SQL query is reasonable).
+ * Maximum length is 256 bytes.
+ *
+ * Generated from protobuf field string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL];
+ * @param string $var
+ * @return $this
+ */
+ public function setTraceId($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->trace_id = $var;
+
+ return $this;
+ }
+
/**
* @return string
*/
diff --git a/BigQueryStorage/src/V1/ReadSession/TableModifiers.php b/BigQueryStorage/src/V1/ReadSession/TableModifiers.php
index e5f51cdcf6f5..bacf4986f3d9 100644
--- a/BigQueryStorage/src/V1/ReadSession/TableModifiers.php
+++ b/BigQueryStorage/src/V1/ReadSession/TableModifiers.php
@@ -45,7 +45,7 @@ public function __construct($data = NULL) {
*/
public function getSnapshotTime()
{
- return isset($this->snapshot_time) ? $this->snapshot_time : null;
+ return $this->snapshot_time;
}
public function hasSnapshotTime()
diff --git a/BigQueryStorage/src/V1/ReadSession/TableReadOptions.php b/BigQueryStorage/src/V1/ReadSession/TableReadOptions.php
index b54465ae18d0..f20b47955679 100644
--- a/BigQueryStorage/src/V1/ReadSession/TableReadOptions.php
+++ b/BigQueryStorage/src/V1/ReadSession/TableReadOptions.php
@@ -16,10 +16,45 @@
class TableReadOptions extends \Google\Protobuf\Internal\Message
{
/**
- * Names of the fields in the table that should be read. If empty, all
- * fields will be read. If the specified field is a nested field, all
- * the sub-fields in the field will be selected. The output field order is
- * unrelated to the order of fields in selected_fields.
+ * Optional. The names of the fields in the table to be returned. If no
+ * field names are specified, then all fields in the table are returned.
+ * Nested fields -- the child elements of a STRUCT field -- can be selected
+ * individually using their fully-qualified names, and will be returned as
+ * record fields containing only the selected nested fields. If a STRUCT
+ * field is specified in the selected fields list, all of the child elements
+ * will be returned.
+ * As an example, consider a table with the following schema:
+ * {
+ * "name": "struct_field",
+ * "type": "RECORD",
+ * "mode": "NULLABLE",
+ * "fields": [
+ * {
+ * "name": "string_field1",
+ * "type": "STRING",
+ * . "mode": "NULLABLE"
+ * },
+ * {
+ * "name": "string_field2",
+ * "type": "STRING",
+ * "mode": "NULLABLE"
+ * }
+ * ]
+ * }
+ * Specifying "struct_field" in the selected fields list will result in a
+ * read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * string_field2
+ * }
+ * Specifying "struct_field.string_field1" in the selected fields list will
+ * result in a read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * }
+ * The order of the fields in the read session schema is derived from the
+ * table schema and does not correspond to the order in which the fields are
+ * specified in this list.
*
* Generated from protobuf field repeated string selected_fields = 1;
*/
@@ -45,11 +80,46 @@ class TableReadOptions extends \Google\Protobuf\Internal\Message
* @param array $data {
* Optional. Data for populating the Message object.
*
- * @type string[]|\Google\Protobuf\Internal\RepeatedField $selected_fields
- * Names of the fields in the table that should be read. If empty, all
- * fields will be read. If the specified field is a nested field, all
- * the sub-fields in the field will be selected. The output field order is
- * unrelated to the order of fields in selected_fields.
+ * @type array|\Google\Protobuf\Internal\RepeatedField $selected_fields
+ * Optional. The names of the fields in the table to be returned. If no
+ * field names are specified, then all fields in the table are returned.
+ * Nested fields -- the child elements of a STRUCT field -- can be selected
+ * individually using their fully-qualified names, and will be returned as
+ * record fields containing only the selected nested fields. If a STRUCT
+ * field is specified in the selected fields list, all of the child elements
+ * will be returned.
+ * As an example, consider a table with the following schema:
+ * {
+ * "name": "struct_field",
+ * "type": "RECORD",
+ * "mode": "NULLABLE",
+ * "fields": [
+ * {
+ * "name": "string_field1",
+ * "type": "STRING",
+ * . "mode": "NULLABLE"
+ * },
+ * {
+ * "name": "string_field2",
+ * "type": "STRING",
+ * "mode": "NULLABLE"
+ * }
+ * ]
+ * }
+ * Specifying "struct_field" in the selected fields list will result in a
+ * read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * string_field2
+ * }
+ * Specifying "struct_field.string_field1" in the selected fields list will
+ * result in a read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * }
+ * The order of the fields in the read session schema is derived from the
+ * table schema and does not correspond to the order in which the fields are
+ * specified in this list.
* @type string $row_restriction
* SQL text filtering statement, similar to a WHERE clause in a query.
* Aggregates are not supported.
@@ -61,6 +131,8 @@ class TableReadOptions extends \Google\Protobuf\Internal\Message
* Restricted to a maximum length for 1 MB.
* @type \Google\Cloud\BigQuery\Storage\V1\ArrowSerializationOptions $arrow_serialization_options
* Optional. Options specific to the Apache Arrow output format.
+ * @type \Google\Cloud\BigQuery\Storage\V1\AvroSerializationOptions $avro_serialization_options
+ * Optional. Options specific to the Apache Avro output format
* }
*/
public function __construct($data = NULL) {
@@ -69,10 +141,45 @@ public function __construct($data = NULL) {
}
/**
- * Names of the fields in the table that should be read. If empty, all
- * fields will be read. If the specified field is a nested field, all
- * the sub-fields in the field will be selected. The output field order is
- * unrelated to the order of fields in selected_fields.
+ * Optional. The names of the fields in the table to be returned. If no
+ * field names are specified, then all fields in the table are returned.
+ * Nested fields -- the child elements of a STRUCT field -- can be selected
+ * individually using their fully-qualified names, and will be returned as
+ * record fields containing only the selected nested fields. If a STRUCT
+ * field is specified in the selected fields list, all of the child elements
+ * will be returned.
+ * As an example, consider a table with the following schema:
+ * {
+ * "name": "struct_field",
+ * "type": "RECORD",
+ * "mode": "NULLABLE",
+ * "fields": [
+ * {
+ * "name": "string_field1",
+ * "type": "STRING",
+ * . "mode": "NULLABLE"
+ * },
+ * {
+ * "name": "string_field2",
+ * "type": "STRING",
+ * "mode": "NULLABLE"
+ * }
+ * ]
+ * }
+ * Specifying "struct_field" in the selected fields list will result in a
+ * read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * string_field2
+ * }
+ * Specifying "struct_field.string_field1" in the selected fields list will
+ * result in a read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * }
+ * The order of the fields in the read session schema is derived from the
+ * table schema and does not correspond to the order in which the fields are
+ * specified in this list.
*
* Generated from protobuf field repeated string selected_fields = 1;
* @return \Google\Protobuf\Internal\RepeatedField
@@ -83,13 +190,48 @@ public function getSelectedFields()
}
/**
- * Names of the fields in the table that should be read. If empty, all
- * fields will be read. If the specified field is a nested field, all
- * the sub-fields in the field will be selected. The output field order is
- * unrelated to the order of fields in selected_fields.
+ * Optional. The names of the fields in the table to be returned. If no
+ * field names are specified, then all fields in the table are returned.
+ * Nested fields -- the child elements of a STRUCT field -- can be selected
+ * individually using their fully-qualified names, and will be returned as
+ * record fields containing only the selected nested fields. If a STRUCT
+ * field is specified in the selected fields list, all of the child elements
+ * will be returned.
+ * As an example, consider a table with the following schema:
+ * {
+ * "name": "struct_field",
+ * "type": "RECORD",
+ * "mode": "NULLABLE",
+ * "fields": [
+ * {
+ * "name": "string_field1",
+ * "type": "STRING",
+ * . "mode": "NULLABLE"
+ * },
+ * {
+ * "name": "string_field2",
+ * "type": "STRING",
+ * "mode": "NULLABLE"
+ * }
+ * ]
+ * }
+ * Specifying "struct_field" in the selected fields list will result in a
+ * read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * string_field2
+ * }
+ * Specifying "struct_field.string_field1" in the selected fields list will
+ * result in a read session schema with the following logical structure:
+ * struct_field {
+ * string_field1
+ * }
+ * The order of the fields in the read session schema is derived from the
+ * table schema and does not correspond to the order in which the fields are
+ * specified in this list.
*
* Generated from protobuf field repeated string selected_fields = 1;
- * @param string[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @param array|\Google\Protobuf\Internal\RepeatedField $var
* @return $this
*/
public function setSelectedFields($var)
@@ -171,6 +313,37 @@ public function setArrowSerializationOptions($var)
return $this;
}
+ /**
+ * Optional. Options specific to the Apache Avro output format
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL];
+ * @return \Google\Cloud\BigQuery\Storage\V1\AvroSerializationOptions|null
+ */
+ public function getAvroSerializationOptions()
+ {
+ return $this->readOneof(4);
+ }
+
+ public function hasAvroSerializationOptions()
+ {
+ return $this->hasOneof(4);
+ }
+
+ /**
+ * Optional. Options specific to the Apache Avro output format
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL];
+ * @param \Google\Cloud\BigQuery\Storage\V1\AvroSerializationOptions $var
+ * @return $this
+ */
+ public function setAvroSerializationOptions($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\AvroSerializationOptions::class);
+ $this->writeOneof(4, $var);
+
+ return $this;
+ }
+
/**
* @return string
*/
diff --git a/BigQueryStorage/src/V1/RowError.php b/BigQueryStorage/src/V1/RowError.php
new file mode 100644
index 000000000000..98c603c656cb
--- /dev/null
+++ b/BigQueryStorage/src/V1/RowError.php
@@ -0,0 +1,135 @@
+google.cloud.bigquery.storage.v1.RowError
+ */
+class RowError extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Index of the malformed row in the request.
+ *
+ * Generated from protobuf field int64 index = 1;
+ */
+ private $index = 0;
+ /**
+ * Structured error reason for a row error.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2;
+ */
+ private $code = 0;
+ /**
+ * Description of the issue encountered when processing the row.
+ *
+ * Generated from protobuf field string message = 3;
+ */
+ private $message = '';
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type int|string $index
+ * Index of the malformed row in the request.
+ * @type int $code
+ * Structured error reason for a row error.
+ * @type string $message
+ * Description of the issue encountered when processing the row.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Index of the malformed row in the request.
+ *
+ * Generated from protobuf field int64 index = 1;
+ * @return int|string
+ */
+ public function getIndex()
+ {
+ return $this->index;
+ }
+
+ /**
+ * Index of the malformed row in the request.
+ *
+ * Generated from protobuf field int64 index = 1;
+ * @param int|string $var
+ * @return $this
+ */
+ public function setIndex($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->index = $var;
+
+ return $this;
+ }
+
+ /**
+ * Structured error reason for a row error.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2;
+ * @return int
+ */
+ public function getCode()
+ {
+ return $this->code;
+ }
+
+ /**
+ * Structured error reason for a row error.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2;
+ * @param int $var
+ * @return $this
+ */
+ public function setCode($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\RowError\RowErrorCode::class);
+ $this->code = $var;
+
+ return $this;
+ }
+
+ /**
+ * Description of the issue encountered when processing the row.
+ *
+ * Generated from protobuf field string message = 3;
+ * @return string
+ */
+ public function getMessage()
+ {
+ return $this->message;
+ }
+
+ /**
+ * Description of the issue encountered when processing the row.
+ *
+ * Generated from protobuf field string message = 3;
+ * @param string $var
+ * @return $this
+ */
+ public function setMessage($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->message = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/RowError/RowErrorCode.php b/BigQueryStorage/src/V1/RowError/RowErrorCode.php
new file mode 100644
index 000000000000..753c651381c7
--- /dev/null
+++ b/BigQueryStorage/src/V1/RowError/RowErrorCode.php
@@ -0,0 +1,57 @@
+google.cloud.bigquery.storage.v1.RowError.RowErrorCode
+ */
+class RowErrorCode
+{
+ /**
+ * Default error.
+ *
+ * Generated from protobuf enum ROW_ERROR_CODE_UNSPECIFIED = 0;
+ */
+ const ROW_ERROR_CODE_UNSPECIFIED = 0;
+ /**
+ * One or more fields in the row has errors.
+ *
+ * Generated from protobuf enum FIELDS_ERROR = 1;
+ */
+ const FIELDS_ERROR = 1;
+
+ private static $valueToName = [
+ self::ROW_ERROR_CODE_UNSPECIFIED => 'ROW_ERROR_CODE_UNSPECIFIED',
+ self::FIELDS_ERROR => 'FIELDS_ERROR',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(RowErrorCode::class, \Google\Cloud\BigQuery\Storage\V1\RowError_RowErrorCode::class);
+
diff --git a/BigQueryStorage/src/V1/RowError_RowErrorCode.php b/BigQueryStorage/src/V1/RowError_RowErrorCode.php
new file mode 100644
index 000000000000..c9c9fa422f09
--- /dev/null
+++ b/BigQueryStorage/src/V1/RowError_RowErrorCode.php
@@ -0,0 +1,16 @@
+primary_stream) ? $this->primary_stream : null;
+ return $this->primary_stream;
}
public function hasPrimaryStream()
@@ -100,7 +100,7 @@ public function setPrimaryStream($var)
*/
public function getRemainderStream()
{
- return isset($this->remainder_stream) ? $this->remainder_stream : null;
+ return $this->remainder_stream;
}
public function hasRemainderStream()
diff --git a/BigQueryStorage/src/V1/StorageError.php b/BigQueryStorage/src/V1/StorageError.php
new file mode 100644
index 000000000000..1fa036df1818
--- /dev/null
+++ b/BigQueryStorage/src/V1/StorageError.php
@@ -0,0 +1,138 @@
+google.cloud.bigquery.storage.v1.StorageError
+ */
+class StorageError extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * BigQuery Storage specific error code.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1;
+ */
+ private $code = 0;
+ /**
+ * Name of the failed entity.
+ *
+ * Generated from protobuf field string entity = 2;
+ */
+ private $entity = '';
+ /**
+ * Message that describes the error.
+ *
+ * Generated from protobuf field string error_message = 3;
+ */
+ private $error_message = '';
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type int $code
+ * BigQuery Storage specific error code.
+ * @type string $entity
+ * Name of the failed entity.
+ * @type string $error_message
+ * Message that describes the error.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Storage::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * BigQuery Storage specific error code.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1;
+ * @return int
+ */
+ public function getCode()
+ {
+ return $this->code;
+ }
+
+ /**
+ * BigQuery Storage specific error code.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1;
+ * @param int $var
+ * @return $this
+ */
+ public function setCode($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\StorageError\StorageErrorCode::class);
+ $this->code = $var;
+
+ return $this;
+ }
+
+ /**
+ * Name of the failed entity.
+ *
+ * Generated from protobuf field string entity = 2;
+ * @return string
+ */
+ public function getEntity()
+ {
+ return $this->entity;
+ }
+
+ /**
+ * Name of the failed entity.
+ *
+ * Generated from protobuf field string entity = 2;
+ * @param string $var
+ * @return $this
+ */
+ public function setEntity($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->entity = $var;
+
+ return $this;
+ }
+
+ /**
+ * Message that describes the error.
+ *
+ * Generated from protobuf field string error_message = 3;
+ * @return string
+ */
+ public function getErrorMessage()
+ {
+ return $this->error_message;
+ }
+
+ /**
+ * Message that describes the error.
+ *
+ * Generated from protobuf field string error_message = 3;
+ * @param string $var
+ * @return $this
+ */
+ public function setErrorMessage($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->error_message = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/StorageError/StorageErrorCode.php b/BigQueryStorage/src/V1/StorageError/StorageErrorCode.php
new file mode 100644
index 000000000000..b72fc307302f
--- /dev/null
+++ b/BigQueryStorage/src/V1/StorageError/StorageErrorCode.php
@@ -0,0 +1,117 @@
+google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode
+ */
+class StorageErrorCode
+{
+ /**
+ * Default error.
+ *
+ * Generated from protobuf enum STORAGE_ERROR_CODE_UNSPECIFIED = 0;
+ */
+ const STORAGE_ERROR_CODE_UNSPECIFIED = 0;
+ /**
+ * Table is not found in the system.
+ *
+ * Generated from protobuf enum TABLE_NOT_FOUND = 1;
+ */
+ const TABLE_NOT_FOUND = 1;
+ /**
+ * Stream is already committed.
+ *
+ * Generated from protobuf enum STREAM_ALREADY_COMMITTED = 2;
+ */
+ const STREAM_ALREADY_COMMITTED = 2;
+ /**
+ * Stream is not found.
+ *
+ * Generated from protobuf enum STREAM_NOT_FOUND = 3;
+ */
+ const STREAM_NOT_FOUND = 3;
+ /**
+ * Invalid Stream type.
+ * For example, you try to commit a stream that is not pending.
+ *
+ * Generated from protobuf enum INVALID_STREAM_TYPE = 4;
+ */
+ const INVALID_STREAM_TYPE = 4;
+ /**
+ * Invalid Stream state.
+ * For example, you try to commit a stream that is not finalized or is
+ * garbaged.
+ *
+ * Generated from protobuf enum INVALID_STREAM_STATE = 5;
+ */
+ const INVALID_STREAM_STATE = 5;
+ /**
+ * Stream is finalized.
+ *
+ * Generated from protobuf enum STREAM_FINALIZED = 6;
+ */
+ const STREAM_FINALIZED = 6;
+ /**
+ * There is a schema mismatch and it is caused by user schema has extra
+ * field than bigquery schema.
+ *
+ * Generated from protobuf enum SCHEMA_MISMATCH_EXTRA_FIELDS = 7;
+ */
+ const SCHEMA_MISMATCH_EXTRA_FIELDS = 7;
+ /**
+ * Offset already exists.
+ *
+ * Generated from protobuf enum OFFSET_ALREADY_EXISTS = 8;
+ */
+ const OFFSET_ALREADY_EXISTS = 8;
+ /**
+ * Offset out of range.
+ *
+ * Generated from protobuf enum OFFSET_OUT_OF_RANGE = 9;
+ */
+ const OFFSET_OUT_OF_RANGE = 9;
+
+ private static $valueToName = [
+ self::STORAGE_ERROR_CODE_UNSPECIFIED => 'STORAGE_ERROR_CODE_UNSPECIFIED',
+ self::TABLE_NOT_FOUND => 'TABLE_NOT_FOUND',
+ self::STREAM_ALREADY_COMMITTED => 'STREAM_ALREADY_COMMITTED',
+ self::STREAM_NOT_FOUND => 'STREAM_NOT_FOUND',
+ self::INVALID_STREAM_TYPE => 'INVALID_STREAM_TYPE',
+ self::INVALID_STREAM_STATE => 'INVALID_STREAM_STATE',
+ self::STREAM_FINALIZED => 'STREAM_FINALIZED',
+ self::SCHEMA_MISMATCH_EXTRA_FIELDS => 'SCHEMA_MISMATCH_EXTRA_FIELDS',
+ self::OFFSET_ALREADY_EXISTS => 'OFFSET_ALREADY_EXISTS',
+ self::OFFSET_OUT_OF_RANGE => 'OFFSET_OUT_OF_RANGE',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(StorageErrorCode::class, \Google\Cloud\BigQuery\Storage\V1\StorageError_StorageErrorCode::class);
+
diff --git a/BigQueryStorage/src/V1/StorageError_StorageErrorCode.php b/BigQueryStorage/src/V1/StorageError_StorageErrorCode.php
new file mode 100644
index 000000000000..8e6d4afc9b45
--- /dev/null
+++ b/BigQueryStorage/src/V1/StorageError_StorageErrorCode.php
@@ -0,0 +1,16 @@
+google.cloud.bigquery.storage.v1.StreamStats
*/
@@ -45,7 +45,7 @@ public function __construct($data = NULL) {
*/
public function getProgress()
{
- return isset($this->progress) ? $this->progress : null;
+ return $this->progress;
}
public function hasProgress()
diff --git a/BigQueryStorage/src/V1/TableFieldSchema.php b/BigQueryStorage/src/V1/TableFieldSchema.php
new file mode 100644
index 000000000000..461e65dd049a
--- /dev/null
+++ b/BigQueryStorage/src/V1/TableFieldSchema.php
@@ -0,0 +1,471 @@
+google.cloud.bigquery.storage.v1.TableFieldSchema
+ */
+class TableFieldSchema extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Required. The field name. The name must contain only letters (a-z, A-Z),
+ * numbers (0-9), or underscores (_), and must start with a letter or
+ * underscore. The maximum length is 128 characters.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED];
+ */
+ private $name = '';
+ /**
+ * Required. The field data type.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED];
+ */
+ private $type = 0;
+ /**
+ * Optional. The field mode. The default value is NULLABLE.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $mode = 0;
+ /**
+ * Optional. Describes the nested schema fields if the type property is set to
+ * STRUCT.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $fields;
+ /**
+ * Optional. The field description. The maximum length is 1,024 characters.
+ *
+ * Generated from protobuf field string description = 6 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $description = '';
+ /**
+ * Optional. Maximum length of values of this field for STRINGS or BYTES.
+ * If max_length is not specified, no maximum length constraint is imposed
+ * on this field.
+ * If type = "STRING", then max_length represents the maximum UTF-8
+ * length of strings in this field.
+ * If type = "BYTES", then max_length represents the maximum number of
+ * bytes in this field.
+ * It is invalid to set this field if type is not "STRING" or "BYTES".
+ *
+ * Generated from protobuf field int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $max_length = 0;
+ /**
+ * Optional. Precision (maximum number of total digits in base 10) and scale
+ * (maximum number of digits in the fractional part in base 10) constraints
+ * for values of this field for NUMERIC or BIGNUMERIC.
+ * It is invalid to set precision or scale if type is not "NUMERIC" or
+ * "BIGNUMERIC".
+ * If precision and scale are not specified, no value range constraint is
+ * imposed on this field insofar as values are permitted by the type.
+ * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+ * * Precision (P) and scale (S) are specified:
+ * [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+ * * Precision (P) is specified but not scale (and thus scale is
+ * interpreted to be equal to zero):
+ * [-10^P + 1, 10^P - 1].
+ * Acceptable values for precision and scale if both are specified:
+ * * If type = "NUMERIC":
+ * 1 <= precision - scale <= 29 and 0 <= scale <= 9.
+ * * If type = "BIGNUMERIC":
+ * 1 <= precision - scale <= 38 and 0 <= scale <= 38.
+ * Acceptable values for precision if only precision is specified but not
+ * scale (and thus scale is interpreted to be equal to zero):
+ * * If type = "NUMERIC": 1 <= precision <= 29.
+ * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+ * If scale is specified but not precision, then it is invalid.
+ *
+ * Generated from protobuf field int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $precision = 0;
+ /**
+ * Optional. See documentation for precision.
+ *
+ * Generated from protobuf field int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $scale = 0;
+ /**
+ * Optional. A SQL expression to specify the [default value]
+ * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+ *
+ * Generated from protobuf field string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL];
+ */
+ private $default_value_expression = '';
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $name
+ * Required. The field name. The name must contain only letters (a-z, A-Z),
+ * numbers (0-9), or underscores (_), and must start with a letter or
+ * underscore. The maximum length is 128 characters.
+ * @type int $type
+ * Required. The field data type.
+ * @type int $mode
+ * Optional. The field mode. The default value is NULLABLE.
+ * @type array<\Google\Cloud\BigQuery\Storage\V1\TableFieldSchema>|\Google\Protobuf\Internal\RepeatedField $fields
+ * Optional. Describes the nested schema fields if the type property is set to
+ * STRUCT.
+ * @type string $description
+ * Optional. The field description. The maximum length is 1,024 characters.
+ * @type int|string $max_length
+ * Optional. Maximum length of values of this field for STRINGS or BYTES.
+ * If max_length is not specified, no maximum length constraint is imposed
+ * on this field.
+ * If type = "STRING", then max_length represents the maximum UTF-8
+ * length of strings in this field.
+ * If type = "BYTES", then max_length represents the maximum number of
+ * bytes in this field.
+ * It is invalid to set this field if type is not "STRING" or "BYTES".
+ * @type int|string $precision
+ * Optional. Precision (maximum number of total digits in base 10) and scale
+ * (maximum number of digits in the fractional part in base 10) constraints
+ * for values of this field for NUMERIC or BIGNUMERIC.
+ * It is invalid to set precision or scale if type is not "NUMERIC" or
+ * "BIGNUMERIC".
+ * If precision and scale are not specified, no value range constraint is
+ * imposed on this field insofar as values are permitted by the type.
+ * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+ * * Precision (P) and scale (S) are specified:
+ * [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+ * * Precision (P) is specified but not scale (and thus scale is
+ * interpreted to be equal to zero):
+ * [-10^P + 1, 10^P - 1].
+ * Acceptable values for precision and scale if both are specified:
+ * * If type = "NUMERIC":
+ * 1 <= precision - scale <= 29 and 0 <= scale <= 9.
+ * * If type = "BIGNUMERIC":
+ * 1 <= precision - scale <= 38 and 0 <= scale <= 38.
+ * Acceptable values for precision if only precision is specified but not
+ * scale (and thus scale is interpreted to be equal to zero):
+ * * If type = "NUMERIC": 1 <= precision <= 29.
+ * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+ * If scale is specified but not precision, then it is invalid.
+ * @type int|string $scale
+ * Optional. See documentation for precision.
+ * @type string $default_value_expression
+ * Optional. A SQL expression to specify the [default value]
+ * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Table::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Required. The field name. The name must contain only letters (a-z, A-Z),
+ * numbers (0-9), or underscores (_), and must start with a letter or
+ * underscore. The maximum length is 128 characters.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED];
+ * @return string
+ */
+ public function getName()
+ {
+ return $this->name;
+ }
+
+ /**
+ * Required. The field name. The name must contain only letters (a-z, A-Z),
+ * numbers (0-9), or underscores (_), and must start with a letter or
+ * underscore. The maximum length is 128 characters.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED];
+ * @param string $var
+ * @return $this
+ */
+ public function setName($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->name = $var;
+
+ return $this;
+ }
+
+ /**
+ * Required. The field data type.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED];
+ * @return int
+ */
+ public function getType()
+ {
+ return $this->type;
+ }
+
+ /**
+ * Required. The field data type.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED];
+ * @param int $var
+ * @return $this
+ */
+ public function setType($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\TableFieldSchema\Type::class);
+ $this->type = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. The field mode. The default value is NULLABLE.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL];
+ * @return int
+ */
+ public function getMode()
+ {
+ return $this->mode;
+ }
+
+ /**
+ * Optional. The field mode. The default value is NULLABLE.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL];
+ * @param int $var
+ * @return $this
+ */
+ public function setMode($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\TableFieldSchema\Mode::class);
+ $this->mode = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. Describes the nested schema fields if the type property is set to
+ * STRUCT.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL];
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getFields()
+ {
+ return $this->fields;
+ }
+
+ /**
+ * Optional. Describes the nested schema fields if the type property is set to
+ * STRUCT.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL];
+ * @param array<\Google\Cloud\BigQuery\Storage\V1\TableFieldSchema>|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setFields($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\BigQuery\Storage\V1\TableFieldSchema::class);
+ $this->fields = $arr;
+
+ return $this;
+ }
+
+ /**
+ * Optional. The field description. The maximum length is 1,024 characters.
+ *
+ * Generated from protobuf field string description = 6 [(.google.api.field_behavior) = OPTIONAL];
+ * @return string
+ */
+ public function getDescription()
+ {
+ return $this->description;
+ }
+
+ /**
+ * Optional. The field description. The maximum length is 1,024 characters.
+ *
+ * Generated from protobuf field string description = 6 [(.google.api.field_behavior) = OPTIONAL];
+ * @param string $var
+ * @return $this
+ */
+ public function setDescription($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->description = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. Maximum length of values of this field for STRINGS or BYTES.
+ * If max_length is not specified, no maximum length constraint is imposed
+ * on this field.
+ * If type = "STRING", then max_length represents the maximum UTF-8
+ * length of strings in this field.
+ * If type = "BYTES", then max_length represents the maximum number of
+ * bytes in this field.
+ * It is invalid to set this field if type is not "STRING" or "BYTES".
+ *
+ * Generated from protobuf field int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL];
+ * @return int|string
+ */
+ public function getMaxLength()
+ {
+ return $this->max_length;
+ }
+
+ /**
+ * Optional. Maximum length of values of this field for STRINGS or BYTES.
+ * If max_length is not specified, no maximum length constraint is imposed
+ * on this field.
+ * If type = "STRING", then max_length represents the maximum UTF-8
+ * length of strings in this field.
+ * If type = "BYTES", then max_length represents the maximum number of
+ * bytes in this field.
+ * It is invalid to set this field if type is not "STRING" or "BYTES".
+ *
+ * Generated from protobuf field int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL];
+ * @param int|string $var
+ * @return $this
+ */
+ public function setMaxLength($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->max_length = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. Precision (maximum number of total digits in base 10) and scale
+ * (maximum number of digits in the fractional part in base 10) constraints
+ * for values of this field for NUMERIC or BIGNUMERIC.
+ * It is invalid to set precision or scale if type is not "NUMERIC" or
+ * "BIGNUMERIC".
+ * If precision and scale are not specified, no value range constraint is
+ * imposed on this field insofar as values are permitted by the type.
+ * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+ * * Precision (P) and scale (S) are specified:
+ * [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+ * * Precision (P) is specified but not scale (and thus scale is
+ * interpreted to be equal to zero):
+ * [-10^P + 1, 10^P - 1].
+ * Acceptable values for precision and scale if both are specified:
+ * * If type = "NUMERIC":
+ * 1 <= precision - scale <= 29 and 0 <= scale <= 9.
+ * * If type = "BIGNUMERIC":
+ * 1 <= precision - scale <= 38 and 0 <= scale <= 38.
+ * Acceptable values for precision if only precision is specified but not
+ * scale (and thus scale is interpreted to be equal to zero):
+ * * If type = "NUMERIC": 1 <= precision <= 29.
+ * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+ * If scale is specified but not precision, then it is invalid.
+ *
+ * Generated from protobuf field int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL];
+ * @return int|string
+ */
+ public function getPrecision()
+ {
+ return $this->precision;
+ }
+
+ /**
+ * Optional. Precision (maximum number of total digits in base 10) and scale
+ * (maximum number of digits in the fractional part in base 10) constraints
+ * for values of this field for NUMERIC or BIGNUMERIC.
+ * It is invalid to set precision or scale if type is not "NUMERIC" or
+ * "BIGNUMERIC".
+ * If precision and scale are not specified, no value range constraint is
+ * imposed on this field insofar as values are permitted by the type.
+ * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+ * * Precision (P) and scale (S) are specified:
+ * [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+ * * Precision (P) is specified but not scale (and thus scale is
+ * interpreted to be equal to zero):
+ * [-10^P + 1, 10^P - 1].
+ * Acceptable values for precision and scale if both are specified:
+ * * If type = "NUMERIC":
+ * 1 <= precision - scale <= 29 and 0 <= scale <= 9.
+ * * If type = "BIGNUMERIC":
+ * 1 <= precision - scale <= 38 and 0 <= scale <= 38.
+ * Acceptable values for precision if only precision is specified but not
+ * scale (and thus scale is interpreted to be equal to zero):
+ * * If type = "NUMERIC": 1 <= precision <= 29.
+ * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+ * If scale is specified but not precision, then it is invalid.
+ *
+ * Generated from protobuf field int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL];
+ * @param int|string $var
+ * @return $this
+ */
+ public function setPrecision($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->precision = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. See documentation for precision.
+ *
+ * Generated from protobuf field int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL];
+ * @return int|string
+ */
+ public function getScale()
+ {
+ return $this->scale;
+ }
+
+ /**
+ * Optional. See documentation for precision.
+ *
+ * Generated from protobuf field int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL];
+ * @param int|string $var
+ * @return $this
+ */
+ public function setScale($var)
+ {
+ GPBUtil::checkInt64($var);
+ $this->scale = $var;
+
+ return $this;
+ }
+
+ /**
+ * Optional. A SQL expression to specify the [default value]
+ * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+ *
+ * Generated from protobuf field string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL];
+ * @return string
+ */
+ public function getDefaultValueExpression()
+ {
+ return $this->default_value_expression;
+ }
+
+ /**
+ * Optional. A SQL expression to specify the [default value]
+ * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+ *
+ * Generated from protobuf field string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL];
+ * @param string $var
+ * @return $this
+ */
+ public function setDefaultValueExpression($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->default_value_expression = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/TableFieldSchema/Mode.php b/BigQueryStorage/src/V1/TableFieldSchema/Mode.php
new file mode 100644
index 000000000000..8fc99eec5196
--- /dev/null
+++ b/BigQueryStorage/src/V1/TableFieldSchema/Mode.php
@@ -0,0 +1,63 @@
+google.cloud.bigquery.storage.v1.TableFieldSchema.Mode
+ */
+class Mode
+{
+ /**
+ * Illegal value
+ *
+ * Generated from protobuf enum MODE_UNSPECIFIED = 0;
+ */
+ const MODE_UNSPECIFIED = 0;
+ /**
+ * Generated from protobuf enum NULLABLE = 1;
+ */
+ const NULLABLE = 1;
+ /**
+ * Generated from protobuf enum REQUIRED = 2;
+ */
+ const REQUIRED = 2;
+ /**
+ * Generated from protobuf enum REPEATED = 3;
+ */
+ const REPEATED = 3;
+
+ private static $valueToName = [
+ self::MODE_UNSPECIFIED => 'MODE_UNSPECIFIED',
+ self::NULLABLE => 'NULLABLE',
+ self::REQUIRED => 'REQUIRED',
+ self::REPEATED => 'REPEATED',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(Mode::class, \Google\Cloud\BigQuery\Storage\V1\TableFieldSchema_Mode::class);
+
diff --git a/BigQueryStorage/src/V1/TableFieldSchema/Type.php b/BigQueryStorage/src/V1/TableFieldSchema/Type.php
new file mode 100644
index 000000000000..8a45d4c2b98f
--- /dev/null
+++ b/BigQueryStorage/src/V1/TableFieldSchema/Type.php
@@ -0,0 +1,153 @@
+google.cloud.bigquery.storage.v1.TableFieldSchema.Type
+ */
+class Type
+{
+ /**
+ * Illegal value
+ *
+ * Generated from protobuf enum TYPE_UNSPECIFIED = 0;
+ */
+ const TYPE_UNSPECIFIED = 0;
+ /**
+ * 64K, UTF8
+ *
+ * Generated from protobuf enum STRING = 1;
+ */
+ const STRING = 1;
+ /**
+ * 64-bit signed
+ *
+ * Generated from protobuf enum INT64 = 2;
+ */
+ const INT64 = 2;
+ /**
+ * 64-bit IEEE floating point
+ *
+ * Generated from protobuf enum DOUBLE = 3;
+ */
+ const DOUBLE = 3;
+ /**
+ * Aggregate type
+ *
+ * Generated from protobuf enum STRUCT = 4;
+ */
+ const STRUCT = 4;
+ /**
+ * 64K, Binary
+ *
+ * Generated from protobuf enum BYTES = 5;
+ */
+ const BYTES = 5;
+ /**
+ * 2-valued
+ *
+ * Generated from protobuf enum BOOL = 6;
+ */
+ const BOOL = 6;
+ /**
+ * 64-bit signed usec since UTC epoch
+ *
+ * Generated from protobuf enum TIMESTAMP = 7;
+ */
+ const TIMESTAMP = 7;
+ /**
+ * Civil date - Year, Month, Day
+ *
+ * Generated from protobuf enum DATE = 8;
+ */
+ const DATE = 8;
+ /**
+ * Civil time - Hour, Minute, Second, Microseconds
+ *
+ * Generated from protobuf enum TIME = 9;
+ */
+ const TIME = 9;
+ /**
+ * Combination of civil date and civil time
+ *
+ * Generated from protobuf enum DATETIME = 10;
+ */
+ const DATETIME = 10;
+ /**
+ * Geography object
+ *
+ * Generated from protobuf enum GEOGRAPHY = 11;
+ */
+ const GEOGRAPHY = 11;
+ /**
+ * Numeric value
+ *
+ * Generated from protobuf enum NUMERIC = 12;
+ */
+ const NUMERIC = 12;
+ /**
+ * BigNumeric value
+ *
+ * Generated from protobuf enum BIGNUMERIC = 13;
+ */
+ const BIGNUMERIC = 13;
+ /**
+ * Interval
+ *
+ * Generated from protobuf enum INTERVAL = 14;
+ */
+ const INTERVAL = 14;
+ /**
+ * JSON, String
+ *
+ * Generated from protobuf enum JSON = 15;
+ */
+ const JSON = 15;
+
+ private static $valueToName = [
+ self::TYPE_UNSPECIFIED => 'TYPE_UNSPECIFIED',
+ self::STRING => 'STRING',
+ self::INT64 => 'INT64',
+ self::DOUBLE => 'DOUBLE',
+ self::STRUCT => 'STRUCT',
+ self::BYTES => 'BYTES',
+ self::BOOL => 'BOOL',
+ self::TIMESTAMP => 'TIMESTAMP',
+ self::DATE => 'DATE',
+ self::TIME => 'TIME',
+ self::DATETIME => 'DATETIME',
+ self::GEOGRAPHY => 'GEOGRAPHY',
+ self::NUMERIC => 'NUMERIC',
+ self::BIGNUMERIC => 'BIGNUMERIC',
+ self::INTERVAL => 'INTERVAL',
+ self::JSON => 'JSON',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(Type::class, \Google\Cloud\BigQuery\Storage\V1\TableFieldSchema_Type::class);
+
diff --git a/BigQueryStorage/src/V1/TableFieldSchema_Mode.php b/BigQueryStorage/src/V1/TableFieldSchema_Mode.php
new file mode 100644
index 000000000000..df97c0408cb6
--- /dev/null
+++ b/BigQueryStorage/src/V1/TableFieldSchema_Mode.php
@@ -0,0 +1,16 @@
+google.cloud.bigquery.storage.v1.TableSchema
+ */
+class TableSchema extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Describes the fields in a table.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1;
+ */
+ private $fields;
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type array<\Google\Cloud\BigQuery\Storage\V1\TableFieldSchema>|\Google\Protobuf\Internal\RepeatedField $fields
+ * Describes the fields in a table.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Table::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Describes the fields in a table.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1;
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getFields()
+ {
+ return $this->fields;
+ }
+
+ /**
+ * Describes the fields in a table.
+ *
+ * Generated from protobuf field repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1;
+ * @param array<\Google\Cloud\BigQuery\Storage\V1\TableFieldSchema>|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setFields($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\BigQuery\Storage\V1\TableFieldSchema::class);
+ $this->fields = $arr;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/WriteStream.php b/BigQueryStorage/src/V1/WriteStream.php
new file mode 100644
index 000000000000..c937f1e23d39
--- /dev/null
+++ b/BigQueryStorage/src/V1/WriteStream.php
@@ -0,0 +1,341 @@
+google.cloud.bigquery.storage.v1.WriteStream
+ */
+class WriteStream extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Output only. Name of the stream, in the form
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ */
+ private $name = '';
+ /**
+ * Immutable. Type of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE];
+ */
+ private $type = 0;
+ /**
+ * Output only. Create time of the stream. For the _default stream, this is
+ * the creation_time of the table.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ */
+ private $create_time = null;
+ /**
+ * Output only. Commit time of the stream.
+ * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+ * `create_time`. If the stream is of `PENDING` type, empty commit_time
+ * means it is not committed.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ */
+ private $commit_time = null;
+ /**
+ * Output only. The schema of the destination table. It is only returned in
+ * `CreateWriteStream` response. Caller should generate data that's
+ * compatible with this schema to send in initial `AppendRowsRequest`.
+ * The table schema could go out of date during the life time of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ */
+ private $table_schema = null;
+ /**
+ * Immutable. Mode of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE];
+ */
+ private $write_mode = 0;
+ /**
+ * Immutable. The geographic location where the stream's dataset resides. See
+ * https://cloud.google.com/bigquery/docs/locations for supported
+ * locations.
+ *
+ * Generated from protobuf field string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ */
+ private $location = '';
+
+ /**
+ * Constructor.
+ *
+ * @param array $data {
+ * Optional. Data for populating the Message object.
+ *
+ * @type string $name
+ * Output only. Name of the stream, in the form
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ * @type int $type
+ * Immutable. Type of the stream.
+ * @type \Google\Protobuf\Timestamp $create_time
+ * Output only. Create time of the stream. For the _default stream, this is
+ * the creation_time of the table.
+ * @type \Google\Protobuf\Timestamp $commit_time
+ * Output only. Commit time of the stream.
+ * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+ * `create_time`. If the stream is of `PENDING` type, empty commit_time
+ * means it is not committed.
+ * @type \Google\Cloud\BigQuery\Storage\V1\TableSchema $table_schema
+ * Output only. The schema of the destination table. It is only returned in
+ * `CreateWriteStream` response. Caller should generate data that's
+ * compatible with this schema to send in initial `AppendRowsRequest`.
+ * The table schema could go out of date during the life time of the stream.
+ * @type int $write_mode
+ * Immutable. Mode of the stream.
+ * @type string $location
+ * Immutable. The geographic location where the stream's dataset resides. See
+ * https://cloud.google.com/bigquery/docs/locations for supported
+ * locations.
+ * }
+ */
+ public function __construct($data = NULL) {
+ \GPBMetadata\Google\Cloud\Bigquery\Storage\V1\Stream::initOnce();
+ parent::__construct($data);
+ }
+
+ /**
+ * Output only. Name of the stream, in the form
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @return string
+ */
+ public function getName()
+ {
+ return $this->name;
+ }
+
+ /**
+ * Output only. Name of the stream, in the form
+ * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+ *
+ * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @param string $var
+ * @return $this
+ */
+ public function setName($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->name = $var;
+
+ return $this;
+ }
+
+ /**
+ * Immutable. Type of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE];
+ * @return int
+ */
+ public function getType()
+ {
+ return $this->type;
+ }
+
+ /**
+ * Immutable. Type of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE];
+ * @param int $var
+ * @return $this
+ */
+ public function setType($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\WriteStream\Type::class);
+ $this->type = $var;
+
+ return $this;
+ }
+
+ /**
+ * Output only. Create time of the stream. For the _default stream, this is
+ * the creation_time of the table.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @return \Google\Protobuf\Timestamp|null
+ */
+ public function getCreateTime()
+ {
+ return $this->create_time;
+ }
+
+ public function hasCreateTime()
+ {
+ return isset($this->create_time);
+ }
+
+ public function clearCreateTime()
+ {
+ unset($this->create_time);
+ }
+
+ /**
+ * Output only. Create time of the stream. For the _default stream, this is
+ * the creation_time of the table.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @param \Google\Protobuf\Timestamp $var
+ * @return $this
+ */
+ public function setCreateTime($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class);
+ $this->create_time = $var;
+
+ return $this;
+ }
+
+ /**
+ * Output only. Commit time of the stream.
+ * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+ * `create_time`. If the stream is of `PENDING` type, empty commit_time
+ * means it is not committed.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @return \Google\Protobuf\Timestamp|null
+ */
+ public function getCommitTime()
+ {
+ return $this->commit_time;
+ }
+
+ public function hasCommitTime()
+ {
+ return isset($this->commit_time);
+ }
+
+ public function clearCommitTime()
+ {
+ unset($this->commit_time);
+ }
+
+ /**
+ * Output only. Commit time of the stream.
+ * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+ * `create_time`. If the stream is of `PENDING` type, empty commit_time
+ * means it is not committed.
+ *
+ * Generated from protobuf field .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @param \Google\Protobuf\Timestamp $var
+ * @return $this
+ */
+ public function setCommitTime($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class);
+ $this->commit_time = $var;
+
+ return $this;
+ }
+
+ /**
+ * Output only. The schema of the destination table. It is only returned in
+ * `CreateWriteStream` response. Caller should generate data that's
+ * compatible with this schema to send in initial `AppendRowsRequest`.
+ * The table schema could go out of date during the life time of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @return \Google\Cloud\BigQuery\Storage\V1\TableSchema|null
+ */
+ public function getTableSchema()
+ {
+ return $this->table_schema;
+ }
+
+ public function hasTableSchema()
+ {
+ return isset($this->table_schema);
+ }
+
+ public function clearTableSchema()
+ {
+ unset($this->table_schema);
+ }
+
+ /**
+ * Output only. The schema of the destination table. It is only returned in
+ * `CreateWriteStream` response. Caller should generate data that's
+ * compatible with this schema to send in initial `AppendRowsRequest`.
+ * The table schema could go out of date during the life time of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ * @param \Google\Cloud\BigQuery\Storage\V1\TableSchema $var
+ * @return $this
+ */
+ public function setTableSchema($var)
+ {
+ GPBUtil::checkMessage($var, \Google\Cloud\BigQuery\Storage\V1\TableSchema::class);
+ $this->table_schema = $var;
+
+ return $this;
+ }
+
+ /**
+ * Immutable. Mode of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE];
+ * @return int
+ */
+ public function getWriteMode()
+ {
+ return $this->write_mode;
+ }
+
+ /**
+ * Immutable. Mode of the stream.
+ *
+ * Generated from protobuf field .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE];
+ * @param int $var
+ * @return $this
+ */
+ public function setWriteMode($var)
+ {
+ GPBUtil::checkEnum($var, \Google\Cloud\BigQuery\Storage\V1\WriteStream\WriteMode::class);
+ $this->write_mode = $var;
+
+ return $this;
+ }
+
+ /**
+ * Immutable. The geographic location where the stream's dataset resides. See
+ * https://cloud.google.com/bigquery/docs/locations for supported
+ * locations.
+ *
+ * Generated from protobuf field string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * @return string
+ */
+ public function getLocation()
+ {
+ return $this->location;
+ }
+
+ /**
+ * Immutable. The geographic location where the stream's dataset resides. See
+ * https://cloud.google.com/bigquery/docs/locations for supported
+ * locations.
+ *
+ * Generated from protobuf field string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * @param string $var
+ * @return $this
+ */
+ public function setLocation($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->location = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/BigQueryStorage/src/V1/WriteStream/Type.php b/BigQueryStorage/src/V1/WriteStream/Type.php
new file mode 100644
index 000000000000..f8c772aef35d
--- /dev/null
+++ b/BigQueryStorage/src/V1/WriteStream/Type.php
@@ -0,0 +1,72 @@
+google.cloud.bigquery.storage.v1.WriteStream.Type
+ */
+class Type
+{
+ /**
+ * Unknown type.
+ *
+ * Generated from protobuf enum TYPE_UNSPECIFIED = 0;
+ */
+ const TYPE_UNSPECIFIED = 0;
+ /**
+ * Data will commit automatically and appear as soon as the write is
+ * acknowledged.
+ *
+ * Generated from protobuf enum COMMITTED = 1;
+ */
+ const COMMITTED = 1;
+ /**
+ * Data is invisible until the stream is committed.
+ *
+ * Generated from protobuf enum PENDING = 2;
+ */
+ const PENDING = 2;
+ /**
+ * Data is only visible up to the offset to which it was flushed.
+ *
+ * Generated from protobuf enum BUFFERED = 3;
+ */
+ const BUFFERED = 3;
+
+ private static $valueToName = [
+ self::TYPE_UNSPECIFIED => 'TYPE_UNSPECIFIED',
+ self::COMMITTED => 'COMMITTED',
+ self::PENDING => 'PENDING',
+ self::BUFFERED => 'BUFFERED',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(Type::class, \Google\Cloud\BigQuery\Storage\V1\WriteStream_Type::class);
+
diff --git a/BigQueryStorage/src/V1/WriteStream/WriteMode.php b/BigQueryStorage/src/V1/WriteStream/WriteMode.php
new file mode 100644
index 000000000000..1f21103bc93d
--- /dev/null
+++ b/BigQueryStorage/src/V1/WriteStream/WriteMode.php
@@ -0,0 +1,58 @@
+google.cloud.bigquery.storage.v1.WriteStream.WriteMode
+ */
+class WriteMode
+{
+ /**
+ * Unknown type.
+ *
+ * Generated from protobuf enum WRITE_MODE_UNSPECIFIED = 0;
+ */
+ const WRITE_MODE_UNSPECIFIED = 0;
+ /**
+ * Insert new records into the table.
+ * It is the default value if customers do not specify it.
+ *
+ * Generated from protobuf enum INSERT = 1;
+ */
+ const INSERT = 1;
+
+ private static $valueToName = [
+ self::WRITE_MODE_UNSPECIFIED => 'WRITE_MODE_UNSPECIFIED',
+ self::INSERT => 'INSERT',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
+// Adding a class alias for backwards compatibility with the previous class name.
+class_alias(WriteMode::class, \Google\Cloud\BigQuery\Storage\V1\WriteStream_WriteMode::class);
+
diff --git a/BigQueryStorage/src/V1/WriteStreamView.php b/BigQueryStorage/src/V1/WriteStreamView.php
new file mode 100644
index 000000000000..5d008746e9fe
--- /dev/null
+++ b/BigQueryStorage/src/V1/WriteStreamView.php
@@ -0,0 +1,66 @@
+google.cloud.bigquery.storage.v1.WriteStreamView
+ */
+class WriteStreamView
+{
+ /**
+ * The default / unset value.
+ *
+ * Generated from protobuf enum WRITE_STREAM_VIEW_UNSPECIFIED = 0;
+ */
+ const WRITE_STREAM_VIEW_UNSPECIFIED = 0;
+ /**
+ * The BASIC projection returns basic metadata about a write stream. The
+ * basic view does not include schema information. This is the default view
+ * returned by GetWriteStream.
+ *
+ * Generated from protobuf enum BASIC = 1;
+ */
+ const BASIC = 1;
+ /**
+ * The FULL projection returns all available write stream metadata, including
+ * the schema. CreateWriteStream returns the full projection of write stream
+ * metadata.
+ *
+ * Generated from protobuf enum FULL = 2;
+ */
+ const FULL = 2;
+
+ private static $valueToName = [
+ self::WRITE_STREAM_VIEW_UNSPECIFIED => 'WRITE_STREAM_VIEW_UNSPECIFIED',
+ self::BASIC => 'BASIC',
+ self::FULL => 'FULL',
+ ];
+
+ public static function name($value)
+ {
+ if (!isset(self::$valueToName[$value])) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no name defined for value %s', __CLASS__, $value));
+ }
+ return self::$valueToName[$value];
+ }
+
+
+ public static function value($name)
+ {
+ $const = __CLASS__ . '::' . strtoupper($name);
+ if (!defined($const)) {
+ throw new UnexpectedValueException(sprintf(
+ 'Enum %s has no value defined for name %s', __CLASS__, $name));
+ }
+ return constant($const);
+ }
+}
+
diff --git a/BigQueryStorage/src/V1/WriteStream_Type.php b/BigQueryStorage/src/V1/WriteStream_Type.php
new file mode 100644
index 000000000000..9e8669f6ee55
--- /dev/null
+++ b/BigQueryStorage/src/V1/WriteStream_Type.php
@@ -0,0 +1,16 @@
+ [
+ 'method' => 'get',
+ 'uriTemplate' => '/v1/{read_stream=projects/*/locations/*/sessions/*/streams/*}',
+ 'placeholders' => [
+ 'read_stream' => [
+ 'getters' => [
+ 'getReadStream',
+ ],
+ ],
+ ],
+ ],
'SplitReadStream' => [
'method' => 'get',
'uriTemplate' => '/v1/{name=projects/*/locations/*/sessions/*/streams/*}',
diff --git a/BigQueryStorage/src/V1/resources/big_query_write_client_config.json b/BigQueryStorage/src/V1/resources/big_query_write_client_config.json
new file mode 100644
index 000000000000..7e453adb3251
--- /dev/null
+++ b/BigQueryStorage/src/V1/resources/big_query_write_client_config.json
@@ -0,0 +1,89 @@
+{
+ "interfaces": {
+ "google.cloud.bigquery.storage.v1.BigQueryWrite": {
+ "retry_codes": {
+ "no_retry_codes": [],
+ "retry_policy_4_codes": [
+ "UNAVAILABLE"
+ ],
+ "retry_policy_5_codes": [
+ "DEADLINE_EXCEEDED",
+ "UNAVAILABLE"
+ ],
+ "retry_policy_6_codes": [
+ "DEADLINE_EXCEEDED",
+ "UNAVAILABLE",
+ "RESOURCE_EXHAUSTED"
+ ]
+ },
+ "retry_params": {
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0
+ },
+ "retry_policy_4_params": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 86400000,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 86400000,
+ "total_timeout_millis": 86400000
+ },
+ "retry_policy_5_params": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 600000,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 600000,
+ "total_timeout_millis": 600000
+ },
+ "retry_policy_6_params": {
+ "initial_retry_delay_millis": 10000,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 120000,
+ "initial_rpc_timeout_millis": 1200000,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 1200000,
+ "total_timeout_millis": 1200000
+ }
+ },
+ "methods": {
+ "AppendRows": {
+ "timeout_millis": 86400000
+ },
+ "BatchCommitWriteStreams": {
+ "timeout_millis": 600000,
+ "retry_codes_name": "retry_policy_5_codes",
+ "retry_params_name": "retry_policy_5_params"
+ },
+ "CreateWriteStream": {
+ "timeout_millis": 1200000,
+ "retry_codes_name": "retry_policy_6_codes",
+ "retry_params_name": "retry_policy_6_params"
+ },
+ "FinalizeWriteStream": {
+ "timeout_millis": 600000,
+ "retry_codes_name": "retry_policy_5_codes",
+ "retry_params_name": "retry_policy_5_params"
+ },
+ "FlushRows": {
+ "timeout_millis": 600000,
+ "retry_codes_name": "retry_policy_5_codes",
+ "retry_params_name": "retry_policy_5_params"
+ },
+ "GetWriteStream": {
+ "timeout_millis": 600000,
+ "retry_codes_name": "retry_policy_5_codes",
+ "retry_params_name": "retry_policy_5_params"
+ }
+ }
+ }
+ }
+}
diff --git a/BigQueryStorage/src/V1/resources/big_query_write_descriptor_config.php b/BigQueryStorage/src/V1/resources/big_query_write_descriptor_config.php
new file mode 100644
index 000000000000..fb69d340527b
--- /dev/null
+++ b/BigQueryStorage/src/V1/resources/big_query_write_descriptor_config.php
@@ -0,0 +1,13 @@
+ [
+ 'google.cloud.bigquery.storage.v1.BigQueryWrite' => [
+ 'AppendRows' => [
+ 'grpcStreaming' => [
+ 'grpcStreamingType' => 'BidiStreaming',
+ ],
+ ],
+ ],
+ ],
+];
diff --git a/BigQueryStorage/src/V1/resources/big_query_write_rest_client_config.php b/BigQueryStorage/src/V1/resources/big_query_write_rest_client_config.php
new file mode 100644
index 000000000000..6dddf701ad6e
--- /dev/null
+++ b/BigQueryStorage/src/V1/resources/big_query_write_rest_client_config.php
@@ -0,0 +1,70 @@
+ [
+ 'google.cloud.bigquery.storage.v1.BigQueryWrite' => [
+ 'BatchCommitWriteStreams' => [
+ 'method' => 'get',
+ 'uriTemplate' => '/v1/{parent=projects/*/datasets/*/tables/*}',
+ 'placeholders' => [
+ 'parent' => [
+ 'getters' => [
+ 'getParent',
+ ],
+ ],
+ ],
+ 'queryParams' => [
+ 'write_streams',
+ ],
+ ],
+ 'CreateWriteStream' => [
+ 'method' => 'post',
+ 'uriTemplate' => '/v1/{parent=projects/*/datasets/*/tables/*}',
+ 'body' => 'write_stream',
+ 'placeholders' => [
+ 'parent' => [
+ 'getters' => [
+ 'getParent',
+ ],
+ ],
+ ],
+ ],
+ 'FinalizeWriteStream' => [
+ 'method' => 'post',
+ 'uriTemplate' => '/v1/{name=projects/*/datasets/*/tables/*/streams/*}',
+ 'body' => '*',
+ 'placeholders' => [
+ 'name' => [
+ 'getters' => [
+ 'getName',
+ ],
+ ],
+ ],
+ ],
+ 'FlushRows' => [
+ 'method' => 'post',
+ 'uriTemplate' => '/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}',
+ 'body' => '*',
+ 'placeholders' => [
+ 'write_stream' => [
+ 'getters' => [
+ 'getWriteStream',
+ ],
+ ],
+ ],
+ ],
+ 'GetWriteStream' => [
+ 'method' => 'post',
+ 'uriTemplate' => '/v1/{name=projects/*/datasets/*/tables/*/streams/*}',
+ 'body' => '*',
+ 'placeholders' => [
+ 'name' => [
+ 'getters' => [
+ 'getName',
+ ],
+ ],
+ ],
+ ],
+ ],
+ ],
+];
diff --git a/BigQueryStorage/tests/Unit/V1/BigQueryReadClientTest.php b/BigQueryStorage/tests/Unit/V1/BigQueryReadClientTest.php
index a6699c204188..95be32bbca94 100644
--- a/BigQueryStorage/tests/Unit/V1/BigQueryReadClientTest.php
+++ b/BigQueryStorage/tests/Unit/V1/BigQueryReadClientTest.php
@@ -23,11 +23,9 @@
namespace Google\Cloud\BigQuery\Storage\Tests\Unit\V1;
use Google\ApiCore\ApiException;
-
use Google\ApiCore\CredentialsWrapper;
use Google\ApiCore\ServerStream;
use Google\ApiCore\Testing\GeneratedTest;
-
use Google\ApiCore\Testing\MockTransport;
use Google\Cloud\BigQuery\Storage\V1\BigQueryReadClient;
use Google\Cloud\BigQuery\Storage\V1\ReadRowsResponse;
@@ -43,25 +41,19 @@
*/
class BigQueryReadClientTest extends GeneratedTest
{
- /**
- * @return TransportInterface
- */
+ /** @return TransportInterface */
private function createTransport($deserialize = null)
{
return new MockTransport($deserialize);
}
- /**
- * @return CredentialsWrapper
- */
+ /** @return CredentialsWrapper */
private function createCredentials()
{
return $this->getMockBuilder(CredentialsWrapper::class)->disableOriginalConstructor()->getMock();
}
- /**
- * @return BigQueryReadClient
- */
+ /** @return BigQueryReadClient */
private function createClient(array $options = [])
{
$options += [
@@ -70,27 +62,31 @@ private function createClient(array $options = [])
return new BigQueryReadClient($options);
}
- /**
- * @test
- */
+ /** @test */
public function createReadSessionTest()
{
$transport = $this->createTransport();
- $client = $this->createClient([
+ $gapicClient = $this->createClient([
'transport' => $transport,
]);
$this->assertTrue($transport->isExhausted());
// Mock response
$name = 'name3373707';
$table = 'table110115790';
+ $estimatedTotalBytesScanned = 452788190;
+ $estimatedRowCount = 1745583577;
+ $traceId = 'traceId1270300245';
$expectedResponse = new ReadSession();
$expectedResponse->setName($name);
$expectedResponse->setTable($table);
+ $expectedResponse->setEstimatedTotalBytesScanned($estimatedTotalBytesScanned);
+ $expectedResponse->setEstimatedRowCount($estimatedRowCount);
+ $expectedResponse->setTraceId($traceId);
$transport->addResponse($expectedResponse);
// Mock request
- $formattedParent = $client->projectName('[PROJECT]');
+ $formattedParent = $gapicClient->projectName('[PROJECT]');
$readSession = new ReadSession();
- $response = $client->createReadSession($formattedParent, $readSession);
+ $response = $gapicClient->createReadSession($formattedParent, $readSession);
$this->assertEquals($expectedResponse, $response);
$actualRequests = $transport->popReceivedCalls();
$this->assertSame(1, count($actualRequests));
@@ -104,13 +100,11 @@ public function createReadSessionTest()
$this->assertTrue($transport->isExhausted());
}
- /**
- * @test
- */
+ /** @test */
public function createReadSessionExceptionTest()
{
$transport = $this->createTransport();
- $client = $this->createClient([
+ $gapicClient = $this->createClient([
'transport' => $transport,
]);
$this->assertTrue($transport->isExhausted());
@@ -125,11 +119,11 @@ public function createReadSessionExceptionTest()
], JSON_PRETTY_PRINT);
$transport->addResponse(null, $status);
// Mock request
- $formattedParent = $client->projectName('[PROJECT]');
+ $formattedParent = $gapicClient->projectName('[PROJECT]');
$readSession = new ReadSession();
try {
- $client->createReadSession($formattedParent, $readSession);
- // If the $client method call did not throw, fail the test
+ $gapicClient->createReadSession($formattedParent, $readSession);
+ // If the $gapicClient method call did not throw, fail the test
$this->fail('Expected an ApiException, but no exception was thrown.');
} catch (ApiException $ex) {
$this->assertEquals($status->code, $ex->getCode());
@@ -140,13 +134,11 @@ public function createReadSessionExceptionTest()
$this->assertTrue($transport->isExhausted());
}
- /**
- * @test
- */
+ /** @test */
public function readRowsTest()
{
$transport = $this->createTransport();
- $client = $this->createClient([
+ $gapicClient = $this->createClient([
'transport' => $transport,
]);
$this->assertTrue($transport->isExhausted());
@@ -164,8 +156,8 @@ public function readRowsTest()
$expectedResponse3->setRowCount($rowCount3);
$transport->addResponse($expectedResponse3);
// Mock request
- $formattedReadStream = $client->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
- $serverStream = $client->readRows($formattedReadStream);
+ $formattedReadStream = $gapicClient->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
+ $serverStream = $gapicClient->readRows($formattedReadStream);
$this->assertInstanceOf(ServerStream::class, $serverStream);
$responses = iterator_to_array($serverStream->readAll());
$expectedResponses = [];
@@ -183,13 +175,11 @@ public function readRowsTest()
$this->assertTrue($transport->isExhausted());
}
- /**
- * @test
- */
+ /** @test */
public function readRowsExceptionTest()
{
$transport = $this->createTransport();
- $client = $this->createClient([
+ $gapicClient = $this->createClient([
'transport' => $transport,
]);
$status = new stdClass();
@@ -204,8 +194,8 @@ public function readRowsExceptionTest()
$transport->setStreamingStatus($status);
$this->assertTrue($transport->isExhausted());
// Mock request
- $formattedReadStream = $client->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
- $serverStream = $client->readRows($formattedReadStream);
+ $formattedReadStream = $gapicClient->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
+ $serverStream = $gapicClient->readRows($formattedReadStream);
$results = $serverStream->readAll();
try {
iterator_to_array($results);
@@ -220,13 +210,11 @@ public function readRowsExceptionTest()
$this->assertTrue($transport->isExhausted());
}
- /**
- * @test
- */
+ /** @test */
public function splitReadStreamTest()
{
$transport = $this->createTransport();
- $client = $this->createClient([
+ $gapicClient = $this->createClient([
'transport' => $transport,
]);
$this->assertTrue($transport->isExhausted());
@@ -234,8 +222,8 @@ public function splitReadStreamTest()
$expectedResponse = new SplitReadStreamResponse();
$transport->addResponse($expectedResponse);
// Mock request
- $formattedName = $client->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
- $response = $client->splitReadStream($formattedName);
+ $formattedName = $gapicClient->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
+ $response = $gapicClient->splitReadStream($formattedName);
$this->assertEquals($expectedResponse, $response);
$actualRequests = $transport->popReceivedCalls();
$this->assertSame(1, count($actualRequests));
@@ -247,13 +235,11 @@ public function splitReadStreamTest()
$this->assertTrue($transport->isExhausted());
}
- /**
- * @test
- */
+ /** @test */
public function splitReadStreamExceptionTest()
{
$transport = $this->createTransport();
- $client = $this->createClient([
+ $gapicClient = $this->createClient([
'transport' => $transport,
]);
$this->assertTrue($transport->isExhausted());
@@ -268,10 +254,10 @@ public function splitReadStreamExceptionTest()
], JSON_PRETTY_PRINT);
$transport->addResponse(null, $status);
// Mock request
- $formattedName = $client->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
+ $formattedName = $gapicClient->readStreamName('[PROJECT]', '[LOCATION]', '[SESSION]', '[STREAM]');
try {
- $client->splitReadStream($formattedName);
- // If the $client method call did not throw, fail the test
+ $gapicClient->splitReadStream($formattedName);
+ // If the $gapicClient method call did not throw, fail the test
$this->fail('Expected an ApiException, but no exception was thrown.');
} catch (ApiException $ex) {
$this->assertEquals($status->code, $ex->getCode());
diff --git a/BigQueryStorage/tests/Unit/V1/BigQueryWriteClientTest.php b/BigQueryStorage/tests/Unit/V1/BigQueryWriteClientTest.php
new file mode 100644
index 000000000000..97c0d9f6b907
--- /dev/null
+++ b/BigQueryStorage/tests/Unit/V1/BigQueryWriteClientTest.php
@@ -0,0 +1,477 @@
+getMockBuilder(CredentialsWrapper::class)->disableOriginalConstructor()->getMock();
+ }
+
+ /** @return BigQueryWriteClient */
+ private function createClient(array $options = [])
+ {
+ $options += [
+ 'credentials' => $this->createCredentials(),
+ ];
+ return new BigQueryWriteClient($options);
+ }
+
+ /** @test */
+ public function appendRowsTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ // Mock response
+ $writeStream = 'writeStream-1431753760';
+ $expectedResponse = new AppendRowsResponse();
+ $expectedResponse->setWriteStream($writeStream);
+ $transport->addResponse($expectedResponse);
+ $writeStream2 = 'writeStream2-1525825645';
+ $expectedResponse2 = new AppendRowsResponse();
+ $expectedResponse2->setWriteStream($writeStream2);
+ $transport->addResponse($expectedResponse2);
+ $writeStream3 = 'writeStream3-1525825644';
+ $expectedResponse3 = new AppendRowsResponse();
+ $expectedResponse3->setWriteStream($writeStream3);
+ $transport->addResponse($expectedResponse3);
+ // Mock request
+ $formattedWriteStream4 = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ $request = new AppendRowsRequest();
+ $request->setWriteStream($formattedWriteStream4);
+ $formattedWriteStream5 = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ $request2 = new AppendRowsRequest();
+ $request2->setWriteStream($formattedWriteStream5);
+ $formattedWriteStream6 = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ $request3 = new AppendRowsRequest();
+ $request3->setWriteStream($formattedWriteStream6);
+ $bidi = $gapicClient->appendRows();
+ $this->assertInstanceOf(BidiStream::class, $bidi);
+ $bidi->write($request);
+ $responses = [];
+ $responses[] = $bidi->read();
+ $bidi->writeAll([
+ $request2,
+ $request3,
+ ]);
+ foreach ($bidi->closeWriteAndReadAll() as $response) {
+ $responses[] = $response;
+ }
+
+ $expectedResponses = [];
+ $expectedResponses[] = $expectedResponse;
+ $expectedResponses[] = $expectedResponse2;
+ $expectedResponses[] = $expectedResponse3;
+ $this->assertEquals($expectedResponses, $responses);
+ $createStreamRequests = $transport->popReceivedCalls();
+ $this->assertSame(1, count($createStreamRequests));
+ $streamFuncCall = $createStreamRequests[0]->getFuncCall();
+ $streamRequestObject = $createStreamRequests[0]->getRequestObject();
+ $this->assertSame('/google.cloud.bigquery.storage.v1.BigQueryWrite/AppendRows', $streamFuncCall);
+ $this->assertNull($streamRequestObject);
+ $callObjects = $transport->popCallObjects();
+ $this->assertSame(1, count($callObjects));
+ $bidiCall = $callObjects[0];
+ $writeRequests = $bidiCall->popReceivedCalls();
+ $expectedRequests = [];
+ $expectedRequests[] = $request;
+ $expectedRequests[] = $request2;
+ $expectedRequests[] = $request3;
+ $this->assertEquals($expectedRequests, $writeRequests);
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function appendRowsExceptionTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $status = new stdClass();
+ $status->code = Code::DATA_LOSS;
+ $status->details = 'internal error';
+ $expectedExceptionMessage = json_encode([
+ 'message' => 'internal error',
+ 'code' => Code::DATA_LOSS,
+ 'status' => 'DATA_LOSS',
+ 'details' => [],
+ ], JSON_PRETTY_PRINT);
+ $transport->setStreamingStatus($status);
+ $this->assertTrue($transport->isExhausted());
+ $bidi = $gapicClient->appendRows();
+ $results = $bidi->closeWriteAndReadAll();
+ try {
+ iterator_to_array($results);
+ // If the close stream method call did not throw, fail the test
+ $this->fail('Expected an ApiException, but no exception was thrown.');
+ } catch (ApiException $ex) {
+ $this->assertEquals($status->code, $ex->getCode());
+ $this->assertEquals($expectedExceptionMessage, $ex->getMessage());
+ }
+ // Call popReceivedCalls to ensure the stub is exhausted
+ $transport->popReceivedCalls();
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function batchCommitWriteStreamsTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ // Mock response
+ $expectedResponse = new BatchCommitWriteStreamsResponse();
+ $transport->addResponse($expectedResponse);
+ // Mock request
+ $formattedParent = $gapicClient->tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ $writeStreams = [];
+ $response = $gapicClient->batchCommitWriteStreams($formattedParent, $writeStreams);
+ $this->assertEquals($expectedResponse, $response);
+ $actualRequests = $transport->popReceivedCalls();
+ $this->assertSame(1, count($actualRequests));
+ $actualFuncCall = $actualRequests[0]->getFuncCall();
+ $actualRequestObject = $actualRequests[0]->getRequestObject();
+ $this->assertSame('/google.cloud.bigquery.storage.v1.BigQueryWrite/BatchCommitWriteStreams', $actualFuncCall);
+ $actualValue = $actualRequestObject->getParent();
+ $this->assertProtobufEquals($formattedParent, $actualValue);
+ $actualValue = $actualRequestObject->getWriteStreams();
+ $this->assertProtobufEquals($writeStreams, $actualValue);
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function batchCommitWriteStreamsExceptionTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ $status = new stdClass();
+ $status->code = Code::DATA_LOSS;
+ $status->details = 'internal error';
+ $expectedExceptionMessage = json_encode([
+ 'message' => 'internal error',
+ 'code' => Code::DATA_LOSS,
+ 'status' => 'DATA_LOSS',
+ 'details' => [],
+ ], JSON_PRETTY_PRINT);
+ $transport->addResponse(null, $status);
+ // Mock request
+ $formattedParent = $gapicClient->tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ $writeStreams = [];
+ try {
+ $gapicClient->batchCommitWriteStreams($formattedParent, $writeStreams);
+ // If the $gapicClient method call did not throw, fail the test
+ $this->fail('Expected an ApiException, but no exception was thrown.');
+ } catch (ApiException $ex) {
+ $this->assertEquals($status->code, $ex->getCode());
+ $this->assertEquals($expectedExceptionMessage, $ex->getMessage());
+ }
+ // Call popReceivedCalls to ensure the stub is exhausted
+ $transport->popReceivedCalls();
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function createWriteStreamTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ // Mock response
+ $name = 'name3373707';
+ $location = 'location1901043637';
+ $expectedResponse = new WriteStream();
+ $expectedResponse->setName($name);
+ $expectedResponse->setLocation($location);
+ $transport->addResponse($expectedResponse);
+ // Mock request
+ $formattedParent = $gapicClient->tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ $writeStream = new WriteStream();
+ $response = $gapicClient->createWriteStream($formattedParent, $writeStream);
+ $this->assertEquals($expectedResponse, $response);
+ $actualRequests = $transport->popReceivedCalls();
+ $this->assertSame(1, count($actualRequests));
+ $actualFuncCall = $actualRequests[0]->getFuncCall();
+ $actualRequestObject = $actualRequests[0]->getRequestObject();
+ $this->assertSame('/google.cloud.bigquery.storage.v1.BigQueryWrite/CreateWriteStream', $actualFuncCall);
+ $actualValue = $actualRequestObject->getParent();
+ $this->assertProtobufEquals($formattedParent, $actualValue);
+ $actualValue = $actualRequestObject->getWriteStream();
+ $this->assertProtobufEquals($writeStream, $actualValue);
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function createWriteStreamExceptionTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ $status = new stdClass();
+ $status->code = Code::DATA_LOSS;
+ $status->details = 'internal error';
+ $expectedExceptionMessage = json_encode([
+ 'message' => 'internal error',
+ 'code' => Code::DATA_LOSS,
+ 'status' => 'DATA_LOSS',
+ 'details' => [],
+ ], JSON_PRETTY_PRINT);
+ $transport->addResponse(null, $status);
+ // Mock request
+ $formattedParent = $gapicClient->tableName('[PROJECT]', '[DATASET]', '[TABLE]');
+ $writeStream = new WriteStream();
+ try {
+ $gapicClient->createWriteStream($formattedParent, $writeStream);
+ // If the $gapicClient method call did not throw, fail the test
+ $this->fail('Expected an ApiException, but no exception was thrown.');
+ } catch (ApiException $ex) {
+ $this->assertEquals($status->code, $ex->getCode());
+ $this->assertEquals($expectedExceptionMessage, $ex->getMessage());
+ }
+ // Call popReceivedCalls to ensure the stub is exhausted
+ $transport->popReceivedCalls();
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function finalizeWriteStreamTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ // Mock response
+ $rowCount = 1340416618;
+ $expectedResponse = new FinalizeWriteStreamResponse();
+ $expectedResponse->setRowCount($rowCount);
+ $transport->addResponse($expectedResponse);
+ // Mock request
+ $formattedName = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ $response = $gapicClient->finalizeWriteStream($formattedName);
+ $this->assertEquals($expectedResponse, $response);
+ $actualRequests = $transport->popReceivedCalls();
+ $this->assertSame(1, count($actualRequests));
+ $actualFuncCall = $actualRequests[0]->getFuncCall();
+ $actualRequestObject = $actualRequests[0]->getRequestObject();
+ $this->assertSame('/google.cloud.bigquery.storage.v1.BigQueryWrite/FinalizeWriteStream', $actualFuncCall);
+ $actualValue = $actualRequestObject->getName();
+ $this->assertProtobufEquals($formattedName, $actualValue);
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function finalizeWriteStreamExceptionTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ $status = new stdClass();
+ $status->code = Code::DATA_LOSS;
+ $status->details = 'internal error';
+ $expectedExceptionMessage = json_encode([
+ 'message' => 'internal error',
+ 'code' => Code::DATA_LOSS,
+ 'status' => 'DATA_LOSS',
+ 'details' => [],
+ ], JSON_PRETTY_PRINT);
+ $transport->addResponse(null, $status);
+ // Mock request
+ $formattedName = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ try {
+ $gapicClient->finalizeWriteStream($formattedName);
+ // If the $gapicClient method call did not throw, fail the test
+ $this->fail('Expected an ApiException, but no exception was thrown.');
+ } catch (ApiException $ex) {
+ $this->assertEquals($status->code, $ex->getCode());
+ $this->assertEquals($expectedExceptionMessage, $ex->getMessage());
+ }
+ // Call popReceivedCalls to ensure the stub is exhausted
+ $transport->popReceivedCalls();
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function flushRowsTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ // Mock response
+ $offset2 = 755984506;
+ $expectedResponse = new FlushRowsResponse();
+ $expectedResponse->setOffset($offset2);
+ $transport->addResponse($expectedResponse);
+ // Mock request
+ $formattedWriteStream = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ $response = $gapicClient->flushRows($formattedWriteStream);
+ $this->assertEquals($expectedResponse, $response);
+ $actualRequests = $transport->popReceivedCalls();
+ $this->assertSame(1, count($actualRequests));
+ $actualFuncCall = $actualRequests[0]->getFuncCall();
+ $actualRequestObject = $actualRequests[0]->getRequestObject();
+ $this->assertSame('/google.cloud.bigquery.storage.v1.BigQueryWrite/FlushRows', $actualFuncCall);
+ $actualValue = $actualRequestObject->getWriteStream();
+ $this->assertProtobufEquals($formattedWriteStream, $actualValue);
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function flushRowsExceptionTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ $status = new stdClass();
+ $status->code = Code::DATA_LOSS;
+ $status->details = 'internal error';
+ $expectedExceptionMessage = json_encode([
+ 'message' => 'internal error',
+ 'code' => Code::DATA_LOSS,
+ 'status' => 'DATA_LOSS',
+ 'details' => [],
+ ], JSON_PRETTY_PRINT);
+ $transport->addResponse(null, $status);
+ // Mock request
+ $formattedWriteStream = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ try {
+ $gapicClient->flushRows($formattedWriteStream);
+ // If the $gapicClient method call did not throw, fail the test
+ $this->fail('Expected an ApiException, but no exception was thrown.');
+ } catch (ApiException $ex) {
+ $this->assertEquals($status->code, $ex->getCode());
+ $this->assertEquals($expectedExceptionMessage, $ex->getMessage());
+ }
+ // Call popReceivedCalls to ensure the stub is exhausted
+ $transport->popReceivedCalls();
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function getWriteStreamTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ // Mock response
+ $name2 = 'name2-1052831874';
+ $location = 'location1901043637';
+ $expectedResponse = new WriteStream();
+ $expectedResponse->setName($name2);
+ $expectedResponse->setLocation($location);
+ $transport->addResponse($expectedResponse);
+ // Mock request
+ $formattedName = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ $response = $gapicClient->getWriteStream($formattedName);
+ $this->assertEquals($expectedResponse, $response);
+ $actualRequests = $transport->popReceivedCalls();
+ $this->assertSame(1, count($actualRequests));
+ $actualFuncCall = $actualRequests[0]->getFuncCall();
+ $actualRequestObject = $actualRequests[0]->getRequestObject();
+ $this->assertSame('/google.cloud.bigquery.storage.v1.BigQueryWrite/GetWriteStream', $actualFuncCall);
+ $actualValue = $actualRequestObject->getName();
+ $this->assertProtobufEquals($formattedName, $actualValue);
+ $this->assertTrue($transport->isExhausted());
+ }
+
+ /** @test */
+ public function getWriteStreamExceptionTest()
+ {
+ $transport = $this->createTransport();
+ $gapicClient = $this->createClient([
+ 'transport' => $transport,
+ ]);
+ $this->assertTrue($transport->isExhausted());
+ $status = new stdClass();
+ $status->code = Code::DATA_LOSS;
+ $status->details = 'internal error';
+ $expectedExceptionMessage = json_encode([
+ 'message' => 'internal error',
+ 'code' => Code::DATA_LOSS,
+ 'status' => 'DATA_LOSS',
+ 'details' => [],
+ ], JSON_PRETTY_PRINT);
+ $transport->addResponse(null, $status);
+ // Mock request
+ $formattedName = $gapicClient->writeStreamName('[PROJECT]', '[DATASET]', '[TABLE]', '[STREAM]');
+ try {
+ $gapicClient->getWriteStream($formattedName);
+ // If the $gapicClient method call did not throw, fail the test
+ $this->fail('Expected an ApiException, but no exception was thrown.');
+ } catch (ApiException $ex) {
+ $this->assertEquals($status->code, $ex->getCode());
+ $this->assertEquals($expectedExceptionMessage, $ex->getMessage());
+ }
+ // Call popReceivedCalls to ensure the stub is exhausted
+ $transport->popReceivedCalls();
+ $this->assertTrue($transport->isExhausted());
+ }
+}