diff --git a/linkinator.config.json b/linkinator.config.json index 0121dfa6..befd23c8 100644 --- a/linkinator.config.json +++ b/linkinator.config.json @@ -3,8 +3,14 @@ "skip": [ "https://codecov.io/gh/googleapis/", "www.googleapis.com", - "img.shields.io" + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" ], "silent": true, - "concurrency": 5 + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 } diff --git a/protos/google/cloud/bigquery/storage/v1/arrow.proto b/protos/google/cloud/bigquery/storage/v1/arrow.proto index 514b77e6..6d3f6080 100644 --- a/protos/google/cloud/bigquery/storage/v1/arrow.proto +++ b/protos/google/cloud/bigquery/storage/v1/arrow.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -39,8 +39,9 @@ message ArrowRecordBatch { // IPC-serialized Arrow RecordBatch. bytes serialized_record_batch = 1; - // The count of rows in `serialized_record_batch`. - int64 row_count = 2; + // [Deprecated] The count of rows in `serialized_record_batch`. + // Please use the format-independent ReadRowsResponse.row_count instead. + int64 row_count = 2 [deprecated = true]; } // Contains options specific to Arrow Serialization. diff --git a/protos/google/cloud/bigquery/storage/v1/avro.proto b/protos/google/cloud/bigquery/storage/v1/avro.proto index dee4a6ed..15de2db5 100644 --- a/protos/google/cloud/bigquery/storage/v1/avro.proto +++ b/protos/google/cloud/bigquery/storage/v1/avro.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,6 +35,7 @@ message AvroRows { // Binary serialized rows in a block. bytes serialized_binary_rows = 1; - // The count of rows in the returning block. - int64 row_count = 2; + // [Deprecated] The count of rows in the returning block. + // Please use the format-independent ReadRowsResponse.row_count instead. + int64 row_count = 2 [deprecated = true]; } diff --git a/protos/google/cloud/bigquery/storage/v1/protobuf.proto b/protos/google/cloud/bigquery/storage/v1/protobuf.proto index f987467d..b3754acf 100644 --- a/protos/google/cloud/bigquery/storage/v1/protobuf.proto +++ b/protos/google/cloud/bigquery/storage/v1/protobuf.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/protos/google/cloud/bigquery/storage/v1/storage.proto b/protos/google/cloud/bigquery/storage/v1/storage.proto index ab5a46cf..67c6c8a0 100644 --- a/protos/google/cloud/bigquery/storage/v1/storage.proto +++ b/protos/google/cloud/bigquery/storage/v1/storage.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,7 +47,6 @@ service BigQueryRead { option (google.api.default_host) = "bigquerystorage.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/bigquery," - "https://www.googleapis.com/auth/bigquery.readonly," "https://www.googleapis.com/auth/cloud-platform"; // Creates a new read session. A read session divides the contents of a @@ -168,6 +167,13 @@ service BigQueryWrite { // * For PENDING streams, data is not made visible until the stream itself is // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly // committed via the `BatchCommitWriteStreams` rpc. + // + // Note: For users coding against the gRPC api directly, it may be + // necessary to supply the x-goog-request-params system parameter + // with `write_stream=`. + // + // More information about system parameters: + // https://cloud.google.com/apis/docs/system-parameters rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { option (google.api.http) = { post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" @@ -409,10 +415,12 @@ message AppendRowsRequest { // request. // // For explicitly created write streams, the format is: - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` + // + // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` // // For the special default stream, the format is: - // `projects/{project}/datasets/{dataset}/tables/{table}/_default`. + // + // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -499,7 +507,10 @@ message BatchCommitWriteStreamsRequest { // Required. Parent table that all the streams should belong to, in the form of // `projects/{project}/datasets/{dataset}/tables/{table}`. string parent = 1 [ - (google.api.field_behavior) = REQUIRED + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } ]; // Required. The group of streams that will be committed atomically. @@ -594,6 +605,12 @@ message StorageError { // There is a schema mismatch and it is caused by user schema has extra // field than bigquery schema. SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + + // Offset already exists. + OFFSET_ALREADY_EXISTS = 8; + + // Offset out of range. + OFFSET_OUT_OF_RANGE = 9; } // BigQuery Storage specific error code. diff --git a/protos/google/cloud/bigquery/storage/v1/stream.proto b/protos/google/cloud/bigquery/storage/v1/stream.proto index 0b0bc1ad..bd1fa2ce 100644 --- a/protos/google/cloud/bigquery/storage/v1/stream.proto +++ b/protos/google/cloud/bigquery/storage/v1/stream.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -132,6 +132,14 @@ message ReadSession { // all streams are completely consumed. This estimate is based on // metadata from the table which might be incomplete or stale. int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. ID set by client to annotate a session identity. This does not need + // to be strictly unique, but instead the same ID should be used to group + // logically connected sessions (e.g. All using the same ID for all sessions + // needed to complete a Spark SQL query is reasonable). + // + // Maximum length is 256 bytes. + string trace_id = 13 [(google.api.field_behavior) = OPTIONAL]; } // Information about a single stream that gets data out of the storage system. diff --git a/protos/google/cloud/bigquery/storage/v1/table.proto b/protos/google/cloud/bigquery/storage/v1/table.proto index a8c6f844..545f6292 100644 --- a/protos/google/cloud/bigquery/storage/v1/table.proto +++ b/protos/google/cloud/bigquery/storage/v1/table.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/protos/google/cloud/bigquery/storage/v1beta1/storage.proto b/protos/google/cloud/bigquery/storage/v1beta1/storage.proto index 81e77c73..0d311418 100644 --- a/protos/google/cloud/bigquery/storage/v1beta1/storage.proto +++ b/protos/google/cloud/bigquery/storage/v1beta1/storage.proto @@ -37,7 +37,6 @@ service BigQueryStorage { option (google.api.default_host) = "bigquerystorage.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/bigquery," - "https://www.googleapis.com/auth/bigquery.readonly," "https://www.googleapis.com/auth/cloud-platform"; // Creates a new read session. A read session divides the contents of a diff --git a/protos/protos.d.ts b/protos/protos.d.ts index 12ded42b..a82f4076 100644 --- a/protos/protos.d.ts +++ b/protos/protos.d.ts @@ -2984,7 +2984,9 @@ export namespace google { INVALID_STREAM_TYPE = 4, INVALID_STREAM_STATE = 5, STREAM_FINALIZED = 6, - SCHEMA_MISMATCH_EXTRA_FIELDS = 7 + SCHEMA_MISMATCH_EXTRA_FIELDS = 7, + OFFSET_ALREADY_EXISTS = 8, + OFFSET_OUT_OF_RANGE = 9 } } @@ -3027,6 +3029,9 @@ export namespace google { /** ReadSession estimatedTotalBytesScanned */ estimatedTotalBytesScanned?: (number|Long|string|null); + + /** ReadSession traceId */ + traceId?: (string|null); } /** Represents a ReadSession. */ @@ -3068,6 +3073,9 @@ export namespace google { /** ReadSession estimatedTotalBytesScanned. */ public estimatedTotalBytesScanned: (number|Long|string); + /** ReadSession traceId. */ + public traceId: string; + /** ReadSession schema. */ public schema?: ("avroSchema"|"arrowSchema"); diff --git a/protos/protos.js b/protos/protos.js index 930ac609..1d0989c2 100644 --- a/protos/protos.js +++ b/protos/protos.js @@ -6611,6 +6611,8 @@ case 5: case 6: case 7: + case 8: + case 9: break; } if (message.entity != null && message.hasOwnProperty("entity")) @@ -6667,6 +6669,14 @@ case 7: message.code = 7; break; + case "OFFSET_ALREADY_EXISTS": + case 8: + message.code = 8; + break; + case "OFFSET_OUT_OF_RANGE": + case 9: + message.code = 9; + break; } if (object.entity != null) message.entity = String(object.entity); @@ -6725,6 +6735,8 @@ * @property {number} INVALID_STREAM_STATE=5 INVALID_STREAM_STATE value * @property {number} STREAM_FINALIZED=6 STREAM_FINALIZED value * @property {number} SCHEMA_MISMATCH_EXTRA_FIELDS=7 SCHEMA_MISMATCH_EXTRA_FIELDS value + * @property {number} OFFSET_ALREADY_EXISTS=8 OFFSET_ALREADY_EXISTS value + * @property {number} OFFSET_OUT_OF_RANGE=9 OFFSET_OUT_OF_RANGE value */ StorageError.StorageErrorCode = (function() { var valuesById = {}, values = Object.create(valuesById); @@ -6736,6 +6748,8 @@ values[valuesById[5] = "INVALID_STREAM_STATE"] = 5; values[valuesById[6] = "STREAM_FINALIZED"] = 6; values[valuesById[7] = "SCHEMA_MISMATCH_EXTRA_FIELDS"] = 7; + values[valuesById[8] = "OFFSET_ALREADY_EXISTS"] = 8; + values[valuesById[9] = "OFFSET_OUT_OF_RANGE"] = 9; return values; })(); @@ -6774,6 +6788,7 @@ * @property {google.cloud.bigquery.storage.v1.ReadSession.ITableReadOptions|null} [readOptions] ReadSession readOptions * @property {Array.|null} [streams] ReadSession streams * @property {number|Long|null} [estimatedTotalBytesScanned] ReadSession estimatedTotalBytesScanned + * @property {string|null} [traceId] ReadSession traceId */ /** @@ -6872,6 +6887,14 @@ */ ReadSession.prototype.estimatedTotalBytesScanned = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * ReadSession traceId. + * @member {string} traceId + * @memberof google.cloud.bigquery.storage.v1.ReadSession + * @instance + */ + ReadSession.prototype.traceId = ""; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -6931,6 +6954,8 @@ $root.google.cloud.bigquery.storage.v1.ReadStream.encode(message.streams[i], writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); if (message.estimatedTotalBytesScanned != null && Object.hasOwnProperty.call(message, "estimatedTotalBytesScanned")) writer.uint32(/* id 12, wireType 0 =*/96).int64(message.estimatedTotalBytesScanned); + if (message.traceId != null && Object.hasOwnProperty.call(message, "traceId")) + writer.uint32(/* id 13, wireType 2 =*/106).string(message.traceId); return writer; }; @@ -6997,6 +7022,9 @@ case 12: message.estimatedTotalBytesScanned = reader.int64(); break; + case 13: + message.traceId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -7093,6 +7121,9 @@ if (message.estimatedTotalBytesScanned != null && message.hasOwnProperty("estimatedTotalBytesScanned")) if (!$util.isInteger(message.estimatedTotalBytesScanned) && !(message.estimatedTotalBytesScanned && $util.isInteger(message.estimatedTotalBytesScanned.low) && $util.isInteger(message.estimatedTotalBytesScanned.high))) return "estimatedTotalBytesScanned: integer|Long expected"; + if (message.traceId != null && message.hasOwnProperty("traceId")) + if (!$util.isString(message.traceId)) + return "traceId: string expected"; return null; }; @@ -7170,6 +7201,8 @@ message.estimatedTotalBytesScanned = object.estimatedTotalBytesScanned; else if (typeof object.estimatedTotalBytesScanned === "object") message.estimatedTotalBytesScanned = new $util.LongBits(object.estimatedTotalBytesScanned.low >>> 0, object.estimatedTotalBytesScanned.high >>> 0).toNumber(); + if (object.traceId != null) + message.traceId = String(object.traceId); return message; }; @@ -7200,6 +7233,7 @@ object.estimatedTotalBytesScanned = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else object.estimatedTotalBytesScanned = options.longs === String ? "0" : 0; + object.traceId = ""; } if (message.name != null && message.hasOwnProperty("name")) object.name = message.name; @@ -7233,6 +7267,8 @@ object.estimatedTotalBytesScanned = options.longs === String ? String(message.estimatedTotalBytesScanned) : message.estimatedTotalBytesScanned; else object.estimatedTotalBytesScanned = options.longs === String ? $util.Long.prototype.toString.call(message.estimatedTotalBytesScanned) : options.longs === Number ? new $util.LongBits(message.estimatedTotalBytesScanned.low >>> 0, message.estimatedTotalBytesScanned.high >>> 0).toNumber() : message.estimatedTotalBytesScanned; + if (message.traceId != null && message.hasOwnProperty("traceId")) + object.traceId = message.traceId; return object; }; diff --git a/protos/protos.json b/protos/protos.json index 651ae7e0..a892cd68 100644 --- a/protos/protos.json +++ b/protos/protos.json @@ -36,7 +36,10 @@ }, "rowCount": { "type": "int64", - "id": 2 + "id": 2, + "options": { + "deprecated": true + } } } }, @@ -73,7 +76,10 @@ }, "rowCount": { "type": "int64", - "id": 2 + "id": 2, + "options": { + "deprecated": true + } } } }, @@ -97,7 +103,7 @@ "BigQueryRead": { "options": { "(google.api.default_host)": "bigquerystorage.googleapis.com", - "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/bigquery.readonly,https://www.googleapis.com/auth/cloud-platform" + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/cloud-platform" }, "methods": { "CreateReadSession": { @@ -548,7 +554,8 @@ "type": "string", "id": 1, "options": { - "(google.api.field_behavior)": "REQUIRED" + "(google.api.field_behavior)": "REQUIRED", + "(google.api.resource_reference).type": "bigquery.googleapis.com/Table" } }, "writeStreams": { @@ -643,7 +650,9 @@ "INVALID_STREAM_TYPE": 4, "INVALID_STREAM_STATE": 5, "STREAM_FINALIZED": 6, - "SCHEMA_MISMATCH_EXTRA_FIELDS": 7 + "SCHEMA_MISMATCH_EXTRA_FIELDS": 7, + "OFFSET_ALREADY_EXISTS": 8, + "OFFSET_OUT_OF_RANGE": 9 } } } @@ -740,6 +749,13 @@ "options": { "(google.api.field_behavior)": "OUTPUT_ONLY" } + }, + "traceId": { + "type": "string", + "id": 13, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } }, "nested": { @@ -1026,7 +1042,7 @@ "BigQueryStorage": { "options": { "(google.api.default_host)": "bigquerystorage.googleapis.com", - "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/bigquery.readonly,https://www.googleapis.com/auth/cloud-platform" + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/cloud-platform" }, "methods": { "CreateReadSession": { diff --git a/samples/generated/v1/big_query_read.create_read_session.js b/samples/generated/v1/big_query_read.create_read_session.js index a14c16b7..f4d2f950 100644 --- a/samples/generated/v1/big_query_read.create_read_session.js +++ b/samples/generated/v1/big_query_read.create_read_session.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_read.read_rows.js b/samples/generated/v1/big_query_read.read_rows.js index a51ad69d..f5f781a4 100644 --- a/samples/generated/v1/big_query_read.read_rows.js +++ b/samples/generated/v1/big_query_read.read_rows.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_read.split_read_stream.js b/samples/generated/v1/big_query_read.split_read_stream.js index 478f78e1..6e146955 100644 --- a/samples/generated/v1/big_query_read.split_read_stream.js +++ b/samples/generated/v1/big_query_read.split_read_stream.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_write.append_rows.js b/samples/generated/v1/big_query_write.append_rows.js index 51db4c07..9cefbb22 100644 --- a/samples/generated/v1/big_query_write.append_rows.js +++ b/samples/generated/v1/big_query_write.append_rows.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; @@ -26,9 +31,9 @@ function main(writeStream) { * If provided for subsequent requests, it must match the value of the first * request. * For explicitly created write streams, the format is: - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` + * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` * For the special default stream, the format is: - * `projects/{project}/datasets/{dataset}/tables/{table}/_default`. + * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. */ // const writeStream = 'abc123' /** diff --git a/samples/generated/v1/big_query_write.batch_commit_write_streams.js b/samples/generated/v1/big_query_write.batch_commit_write_streams.js index a9f7ea03..9c258a30 100644 --- a/samples/generated/v1/big_query_write.batch_commit_write_streams.js +++ b/samples/generated/v1/big_query_write.batch_commit_write_streams.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_write.create_write_stream.js b/samples/generated/v1/big_query_write.create_write_stream.js index 4537cb61..839a8bf6 100644 --- a/samples/generated/v1/big_query_write.create_write_stream.js +++ b/samples/generated/v1/big_query_write.create_write_stream.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_write.finalize_write_stream.js b/samples/generated/v1/big_query_write.finalize_write_stream.js index bb660301..3ff3da7a 100644 --- a/samples/generated/v1/big_query_write.finalize_write_stream.js +++ b/samples/generated/v1/big_query_write.finalize_write_stream.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_write.flush_rows.js b/samples/generated/v1/big_query_write.flush_rows.js index 94f1f525..751fdebf 100644 --- a/samples/generated/v1/big_query_write.flush_rows.js +++ b/samples/generated/v1/big_query_write.flush_rows.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/big_query_write.get_write_stream.js b/samples/generated/v1/big_query_write.get_write_stream.js index 4f2caa2a..16507508 100644 --- a/samples/generated/v1/big_query_write.get_write_stream.js +++ b/samples/generated/v1/big_query_write.get_write_stream.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json b/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json new file mode 100644 index 00000000..a847f738 --- /dev/null +++ b/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json @@ -0,0 +1,415 @@ +{ + "clientLibrary": { + "name": "nodejs-storage", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.bigquery.storage.v1", + "version": "v1" + } + ] + }, + "snippets": [ + { + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async", + "title": "BigQueryRead createReadSession Sample", + "origin": "API_DEFINITION", + "description": " Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Data is assigned to each stream such that roughly the same number of rows can be read from each stream. Because the server-side unit for assigning data is collections of rows, the API does not guarantee that each stream will return the same number or rows. Additionally, the limits are enforced based on the number of pre-filtered rows, so some filters can lead to lopsided assignments. Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller.", + "canonical": true, + "file": "big_query_read.create_read_session.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 66, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "read_session", + "type": ".google.cloud.bigquery.storage.v1.ReadSession" + }, + { + "name": "max_stream_count", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.ReadSession", + "client": { + "shortName": "BigQueryReadClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" + }, + "method": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", + "service": { + "shortName": "BigQueryRead", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_ReadRows_async", + "title": "BigQueryRead readRows Sample", + "origin": "API_DEFINITION", + "description": " Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to read individual rows larger than 100 MiB will fail. Each request also returns a set of stream statistics reflecting the current state of the stream.", + "canonical": true, + "file": "big_query_read.read_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 58, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", + "async": true, + "parameters": [ + { + "name": "read_stream", + "type": "TYPE_STRING" + }, + { + "name": "offset", + "type": "TYPE_INT64" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.ReadRowsResponse", + "client": { + "shortName": "BigQueryReadClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" + }, + "method": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", + "service": { + "shortName": "BigQueryRead", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async", + "title": "BigQueryRead splitReadStream Sample", + "origin": "API_DEFINITION", + "description": " Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are referred to as the primary and the residual streams of the split. The original `ReadStream` can still be read from in the same manner as before. Both of the returned `ReadStream` objects can also be read from, and the rows returned by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. Concretely, it is guaranteed that for streams original, primary, and residual, that original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read to completion.", + "canonical": true, + "file": "big_query_read.split_read_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 60, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "fraction", + "type": "TYPE_DOUBLE" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.SplitReadStreamResponse", + "client": { + "shortName": "BigQueryReadClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" + }, + "method": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", + "service": { + "shortName": "BigQueryRead", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async", + "title": "BigQueryRead createWriteStream Sample", + "origin": "API_DEFINITION", + "description": " Creates a write stream to the given table. Additionally, every table has a special stream named '_default' to which data can be written. This stream doesn't need to be created using CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. Data written to this stream is considered committed as soon as an acknowledgement is received.", + "canonical": true, + "file": "big_query_write.create_write_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 56, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "write_stream", + "type": ".google.cloud.bigquery.storage.v1.WriteStream" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.WriteStream", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "CreateWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async", + "title": "BigQueryRead appendRows Sample", + "origin": "API_DEFINITION", + "description": " Appends data to the given stream. If `offset` is specified, the `offset` is checked against the end of stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset beyond the current end of the stream or `ALREADY_EXISTS` if user provides an `offset` that has already been written to. User can retry with adjusted offset within the same RPC connection. If `offset` is not specified, append happens at the end of the stream. The response contains an optional offset at which the append happened. No offset information will be returned for appends to a default stream. Responses are received in the same order in which requests are sent. There will be one response for each successful inserted request. Responses may optionally embed error information if the originating AppendRequest was not successfully processed. The specifics of when successfully appended data is made visible to the table are governed by the type of stream: * For COMMITTED streams (which includes the default stream), data is visible immediately upon successful append. * For BUFFERED streams, data is made visible via a subsequent `FlushRows` rpc which advances a cursor to a newer offset in the stream. * For PENDING streams, data is not made visible until the stream itself is finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly committed via the `BatchCommitWriteStreams` rpc. Note: For users coding against the gRPC api directly, it may be necessary to supply the x-goog-request-params system parameter with `write_stream=`. More information about system parameters: https://cloud.google.com/apis/docs/system-parameters", + "canonical": true, + "file": "big_query_write.append_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 77, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AppendRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", + "async": true, + "parameters": [ + { + "name": "write_stream", + "type": "TYPE_STRING" + }, + { + "name": "offset", + "type": ".google.protobuf.Int64Value" + }, + { + "name": "proto_rows", + "type": ".google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData" + }, + { + "name": "trace_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.AppendRowsResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "AppendRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async", + "title": "BigQueryRead getWriteStream Sample", + "origin": "API_DEFINITION", + "description": " Gets information about a write stream.", + "canonical": true, + "file": "big_query_write.get_write_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 51, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.WriteStream", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "GetWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async", + "title": "BigQueryRead finalizeWriteStream Sample", + "origin": "API_DEFINITION", + "description": " Finalize a write stream so that no new data can be appended to the stream. Finalize is not supported on the '_default' stream.", + "canonical": true, + "file": "big_query_write.finalize_write_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 51, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "FinalizeWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "FinalizeWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async", + "title": "BigQueryRead batchCommitWriteStreams Sample", + "origin": "API_DEFINITION", + "description": " Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams must be finalized before commit and cannot be committed multiple times. Once a stream is committed, data in the stream becomes available for read operations.", + "canonical": true, + "file": "big_query_write.batch_commit_write_streams.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 56, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "BatchCommitWriteStreams", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "write_streams", + "type": "TYPE_STRING[]" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "BatchCommitWriteStreams", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async", + "title": "BigQueryRead flushRows Sample", + "origin": "API_DEFINITION", + "description": " Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush operation is required in order for the rows to become available for reading. A Flush operation flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in the request. Flush is not supported on the _default stream, since it is not BUFFERED.", + "canonical": true, + "file": "big_query_write.flush_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 55, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "FlushRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", + "async": true, + "parameters": [ + { + "name": "write_stream", + "type": "TYPE_STRING" + }, + { + "name": "offset", + "type": ".google.protobuf.Int64Value" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.FlushRowsResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "FlushRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + } + ] +} diff --git a/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js b/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js index ed64422d..3d7b6d3e 100644 --- a/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js +++ b/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1beta1/big_query_storage.create_read_session.js b/samples/generated/v1beta1/big_query_storage.create_read_session.js index 9cf38ca4..c71a464c 100644 --- a/samples/generated/v1beta1/big_query_storage.create_read_session.js +++ b/samples/generated/v1beta1/big_query_storage.create_read_session.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1beta1/big_query_storage.finalize_stream.js b/samples/generated/v1beta1/big_query_storage.finalize_stream.js index 33ca0da1..51d63d0b 100644 --- a/samples/generated/v1beta1/big_query_storage.finalize_stream.js +++ b/samples/generated/v1beta1/big_query_storage.finalize_stream.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1beta1/big_query_storage.read_rows.js b/samples/generated/v1beta1/big_query_storage.read_rows.js index 5436f8f8..65924b17 100644 --- a/samples/generated/v1beta1/big_query_storage.read_rows.js +++ b/samples/generated/v1beta1/big_query_storage.read_rows.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1beta1/big_query_storage.split_read_stream.js b/samples/generated/v1beta1/big_query_storage.split_read_stream.js index a517cdf5..24ae597d 100644 --- a/samples/generated/v1beta1/big_query_storage.split_read_stream.js +++ b/samples/generated/v1beta1/big_query_storage.split_read_stream.js @@ -1,16 +1,21 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + 'use strict'; diff --git a/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json b/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json new file mode 100644 index 00000000..a36b0694 --- /dev/null +++ b/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json @@ -0,0 +1,247 @@ +{ + "clientLibrary": { + "name": "nodejs-storage", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.bigquery.storage.v1beta1", + "version": "v1beta1" + } + ] + }, + "snippets": [ + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async", + "title": "BigQueryStorage createReadSession Sample", + "origin": "API_DEFINITION", + "description": " Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Read sessions automatically expire 24 hours after they are created and do not require manual clean-up by the caller.", + "canonical": true, + "file": "big_query_storage.create_read_session.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 83, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.CreateReadSession", + "async": true, + "parameters": [ + { + "name": "table_reference", + "type": ".google.cloud.bigquery.storage.v1beta1.TableReference" + }, + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "table_modifiers", + "type": ".google.cloud.bigquery.storage.v1beta1.TableModifiers" + }, + { + "name": "requested_streams", + "type": "TYPE_INT32" + }, + { + "name": "read_options", + "type": ".google.cloud.bigquery.storage.v1beta1.TableReadOptions" + }, + { + "name": "format", + "type": ".google.cloud.bigquery.storage.v1beta1.DataFormat" + }, + { + "name": "sharding_strategy", + "type": ".google.cloud.bigquery.storage.v1beta1.ShardingStrategy" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.ReadSession", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.CreateReadSession", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async", + "title": "BigQueryStorage readRows Sample", + "origin": "API_DEFINITION", + "description": " Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data.", + "canonical": true, + "file": "big_query_storage.read_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 54, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.ReadRows", + "async": true, + "parameters": [ + { + "name": "read_position", + "type": ".google.cloud.bigquery.storage.v1beta1.StreamPosition" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.ReadRowsResponse", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.ReadRows", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async", + "title": "BigQueryStorage batchCreateReadSessionStreams Sample", + "origin": "API_DEFINITION", + "description": " Creates additional streams for a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task upwards by adding additional workers.", + "canonical": true, + "file": "big_query_storage.batch_create_read_session_streams.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 58, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "BatchCreateReadSessionStreams", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.BatchCreateReadSessionStreams", + "async": true, + "parameters": [ + { + "name": "session", + "type": ".google.cloud.bigquery.storage.v1beta1.ReadSession" + }, + { + "name": "requested_streams", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "BatchCreateReadSessionStreams", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.BatchCreateReadSessionStreams", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async", + "title": "BigQueryStorage finalizeStream Sample", + "origin": "API_DEFINITION", + "description": " Triggers the graceful termination of a single stream in a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task downwards without losing data. This API does not delete the stream -- it remains visible in the ReadSession, and any data processed by the stream is not released to other streams. However, no additional data will be assigned to the stream once this call completes. Callers must continue reading data on the stream until the end of the stream is reached so that data which has already been assigned to the stream will be processed. This method will return an error if there are no other live streams in the Session, or if SplitReadStream() has been called on the given Stream.", + "canonical": true, + "file": "big_query_storage.finalize_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 50, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "FinalizeStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.FinalizeStream", + "async": true, + "parameters": [ + { + "name": "stream", + "type": ".google.cloud.bigquery.storage.v1beta1.Stream" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "FinalizeStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.FinalizeStream", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async", + "title": "BigQueryStorage splitReadStream Sample", + "origin": "API_DEFINITION", + "description": " Splits a given read stream into two Streams. These streams are referred to as the primary and the residual of the split. The original stream can still be read from in the same manner as before. Both of the returned streams can also be read from, and the total rows return by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back to back in the original Stream. Concretely, it is guaranteed that for streams Original, Primary, and Residual, that Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read to completion. This method is guaranteed to be idempotent.", + "canonical": true, + "file": "big_query_storage.split_read_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 60, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.SplitReadStream", + "async": true, + "parameters": [ + { + "name": "original_stream", + "type": ".google.cloud.bigquery.storage.v1beta1.Stream" + }, + { + "name": "fraction", + "type": "TYPE_FLOAT" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.SplitReadStream", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + } + ] +} diff --git a/src/v1/big_query_read_client.ts b/src/v1/big_query_read_client.ts index f133143b..a0246aef 100644 --- a/src/v1/big_query_read_client.ts +++ b/src/v1/big_query_read_client.ts @@ -18,8 +18,15 @@ /* global window */ import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; - +import { + Callback, + CallOptions, + Descriptors, + ClientOptions, + GoogleError, +} from 'google-gax'; + +import {PassThrough} from 'stream'; import * as protos from '../../protos/protos'; import jsonProtos = require('../../protos/protos.json'); /** @@ -241,6 +248,16 @@ export class BigQueryReadClient { stub => (...args: Array<{}>) => { if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit( + 'error', + new GoogleError('The client has already been closed.') + ); + }); + return stream; + } return Promise.reject('The client has already been closed.'); } const func = stub[methodName]; @@ -297,7 +314,6 @@ export class BigQueryReadClient { static get scopes() { return [ 'https://www.googleapis.com/auth/bigquery', - 'https://www.googleapis.com/auth/bigquery.readonly', 'https://www.googleapis.com/auth/cloud-platform', ]; } @@ -885,9 +901,8 @@ export class BigQueryReadClient { * @returns {Promise} A promise that resolves when the client is closed. */ close(): Promise { - this.initialize(); - if (!this._terminated) { - return this.bigQueryReadStub!.then(stub => { + if (this.bigQueryReadStub && !this._terminated) { + return this.bigQueryReadStub.then(stub => { this._terminated = true; stub.close(); }); diff --git a/src/v1/big_query_write_client.ts b/src/v1/big_query_write_client.ts index b775bf6f..1cb67db7 100644 --- a/src/v1/big_query_write_client.ts +++ b/src/v1/big_query_write_client.ts @@ -18,8 +18,15 @@ /* global window */ import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; +import { + Callback, + CallOptions, + Descriptors, + ClientOptions, + GoogleError, +} from 'google-gax'; +import {PassThrough} from 'stream'; import * as protos from '../../protos/protos'; import jsonProtos = require('../../protos/protos.json'); /** @@ -247,6 +254,16 @@ export class BigQueryWriteClient { stub => (...args: Array<{}>) => { if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit( + 'error', + new GoogleError('The client has already been closed.') + ); + }); + return stream; + } return Promise.reject('The client has already been closed.'); } const func = stub[methodName]; @@ -873,6 +890,13 @@ export class BigQueryWriteClient { * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly * committed via the `BatchCommitWriteStreams` rpc. * + * Note: For users coding against the gRPC api directly, it may be + * necessary to supply the x-goog-request-params system parameter + * with `write_stream=`. + * + * More information about system parameters: + * https://cloud.google.com/apis/docs/system-parameters + * * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Stream} @@ -887,7 +911,7 @@ export class BigQueryWriteClient { */ appendRows(options?: CallOptions): gax.CancellableStream { this.initialize(); - return this.innerApiCalls.appendRows(options); + return this.innerApiCalls.appendRows(null, options); } // -------------------- @@ -1167,9 +1191,8 @@ export class BigQueryWriteClient { * @returns {Promise} A promise that resolves when the client is closed. */ close(): Promise { - this.initialize(); - if (!this._terminated) { - return this.bigQueryWriteStub!.then(stub => { + if (this.bigQueryWriteStub && !this._terminated) { + return this.bigQueryWriteStub.then(stub => { this._terminated = true; stub.close(); }); diff --git a/src/v1beta1/big_query_storage_client.ts b/src/v1beta1/big_query_storage_client.ts index 72b13b1e..342ef1a6 100644 --- a/src/v1beta1/big_query_storage_client.ts +++ b/src/v1beta1/big_query_storage_client.ts @@ -18,8 +18,15 @@ /* global window */ import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; - +import { + Callback, + CallOptions, + Descriptors, + ClientOptions, + GoogleError, +} from 'google-gax'; + +import {PassThrough} from 'stream'; import * as protos from '../../protos/protos'; import jsonProtos = require('../../protos/protos.json'); /** @@ -238,6 +245,16 @@ export class BigQueryStorageClient { stub => (...args: Array<{}>) => { if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit( + 'error', + new GoogleError('The client has already been closed.') + ); + }); + return stream; + } return Promise.reject('The client has already been closed.'); } const func = stub[methodName]; @@ -294,7 +311,6 @@ export class BigQueryStorageClient { static get scopes() { return [ 'https://www.googleapis.com/auth/bigquery', - 'https://www.googleapis.com/auth/bigquery.readonly', 'https://www.googleapis.com/auth/cloud-platform', ]; } @@ -962,9 +978,8 @@ export class BigQueryStorageClient { * @returns {Promise} A promise that resolves when the client is closed. */ close(): Promise { - this.initialize(); - if (!this._terminated) { - return this.bigQueryStorageStub!.then(stub => { + if (this.bigQueryStorageStub && !this._terminated) { + return this.bigQueryStorageStub.then(stub => { this._terminated = true; stub.close(); }); diff --git a/test/gapic_big_query_read_v1.ts b/test/gapic_big_query_read_v1.ts index a5c551d3..793ecd88 100644 --- a/test/gapic_big_query_read_v1.ts +++ b/test/gapic_big_query_read_v1.ts @@ -111,12 +111,27 @@ describe('v1.BigQueryReadClient', () => { assert(client.bigQueryReadStub); }); - it('has close method', () => { + it('has close method for the initialized client', done => { const client = new bigqueryreadModule.v1.BigQueryReadClient({ credentials: {client_email: 'bogus', private_key: 'bogus'}, projectId: 'bogus', }); - client.close(); + client.initialize(); + assert(client.bigQueryReadStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryReadStub, undefined); + client.close().then(() => { + done(); + }); }); it('has getProjectId method', async () => { @@ -265,6 +280,22 @@ describe('v1.BigQueryReadClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes createReadSession with closed client', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest() + ); + request.readSession = {}; + request.readSession.table = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createReadSession(request), expectedError); + }); }); describe('splitReadStream', () => { @@ -376,6 +407,21 @@ describe('v1.BigQueryReadClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes splitReadStream with closed client', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest() + ); + request.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.splitReadStream(request), expectedError); + }); }); describe('readRows', () => { @@ -468,6 +514,35 @@ describe('v1.BigQueryReadClient', () => { .calledWith(request, expectedOptions) ); }); + + it('invokes readRows with closed client', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest() + ); + request.readStream = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on( + 'data', + ( + response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse + ) => { + resolve(response); + } + ); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + }); }); describe('Path templates', () => { diff --git a/test/gapic_big_query_storage_v1beta1.ts b/test/gapic_big_query_storage_v1beta1.ts index 3365f629..005e5489 100644 --- a/test/gapic_big_query_storage_v1beta1.ts +++ b/test/gapic_big_query_storage_v1beta1.ts @@ -113,12 +113,27 @@ describe('v1beta1.BigQueryStorageClient', () => { assert(client.bigQueryStorageStub); }); - it('has close method', () => { + it('has close method for the initialized client', done => { const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ credentials: {client_email: 'bogus', private_key: 'bogus'}, projectId: 'bogus', }); - client.close(); + client.initialize(); + assert(client.bigQueryStorageStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryStorageStub, undefined); + client.close().then(() => { + done(); + }); }); it('has getProjectId method', async () => { @@ -276,6 +291,24 @@ describe('v1beta1.BigQueryStorageClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes createReadSession with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest() + ); + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createReadSession(request), expectedError); + }); }); describe('batchCreateReadSessionStreams', () => { @@ -394,6 +427,25 @@ describe('v1beta1.BigQueryStorageClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes batchCreateReadSessionStreams with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest() + ); + request.session = {}; + request.session.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects( + client.batchCreateReadSessionStreams(request), + expectedError + ); + }); }); describe('finalizeStream', () => { @@ -508,6 +560,22 @@ describe('v1beta1.BigQueryStorageClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes finalizeStream with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest() + ); + request.stream = {}; + request.stream.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.finalizeStream(request), expectedError); + }); }); describe('splitReadStream', () => { @@ -622,6 +690,22 @@ describe('v1beta1.BigQueryStorageClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes splitReadStream with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest() + ); + request.originalStream = {}; + request.originalStream.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.splitReadStream(request), expectedError); + }); }); describe('readRows', () => { @@ -718,6 +802,37 @@ describe('v1beta1.BigQueryStorageClient', () => { .calledWith(request, expectedOptions) ); }); + + it('invokes readRows with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest() + ); + request.readPosition = {}; + request.readPosition.stream = {}; + request.readPosition.stream.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on( + 'data', + ( + response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse + ) => { + resolve(response); + } + ); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + }); }); describe('Path templates', () => { diff --git a/test/gapic_big_query_write_v1.ts b/test/gapic_big_query_write_v1.ts index 608b6497..44fdb98d 100644 --- a/test/gapic_big_query_write_v1.ts +++ b/test/gapic_big_query_write_v1.ts @@ -104,12 +104,27 @@ describe('v1.BigQueryWriteClient', () => { assert(client.bigQueryWriteStub); }); - it('has close method', () => { + it('has close method for the initialized client', done => { const client = new bigquerywriteModule.v1.BigQueryWriteClient({ credentials: {client_email: 'bogus', private_key: 'bogus'}, projectId: 'bogus', }); - client.close(); + client.initialize(); + assert(client.bigQueryWriteStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryWriteStub, undefined); + client.close().then(() => { + done(); + }); }); it('has getProjectId method', async () => { @@ -255,6 +270,21 @@ describe('v1.BigQueryWriteClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes createWriteStream with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest() + ); + request.parent = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createWriteStream(request), expectedError); + }); }); describe('getWriteStream', () => { @@ -366,6 +396,21 @@ describe('v1.BigQueryWriteClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes getWriteStream with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest() + ); + request.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getWriteStream(request), expectedError); + }); }); describe('finalizeWriteStream', () => { @@ -478,6 +523,21 @@ describe('v1.BigQueryWriteClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes finalizeWriteStream with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest() + ); + request.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.finalizeWriteStream(request), expectedError); + }); }); describe('batchCommitWriteStreams', () => { @@ -593,6 +653,24 @@ describe('v1.BigQueryWriteClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes batchCommitWriteStreams with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest() + ); + request.parent = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects( + client.batchCommitWriteStreams(request), + expectedError + ); + }); }); describe('flushRows', () => { @@ -701,6 +779,21 @@ describe('v1.BigQueryWriteClient', () => { .calledWith(request, expectedOptions, undefined) ); }); + + it('invokes flushRows with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest() + ); + request.writeStream = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.flushRows(request), expectedError); + }); }); describe('appendRows', () => { @@ -738,7 +831,7 @@ describe('v1.BigQueryWriteClient', () => { assert( (client.innerApiCalls.appendRows as SinonStub) .getCall(0) - .calledWithExactly(undefined) + .calledWith(null) ); assert.deepStrictEqual( ((stream as unknown as PassThrough)._transform as SinonStub).getCall(0) @@ -756,8 +849,6 @@ describe('v1.BigQueryWriteClient', () => { const request = generateSampleMessage( new protos.google.cloud.bigquery.storage.v1.AppendRowsRequest() ); - request.writeStream = ''; - const expectedHeaderRequestParams = 'write_stream='; const expectedError = new Error('expected'); client.innerApiCalls.appendRows = stubBidiStreamingCall( undefined, @@ -783,7 +874,7 @@ describe('v1.BigQueryWriteClient', () => { assert( (client.innerApiCalls.appendRows as SinonStub) .getCall(0) - .calledWithExactly(undefined) + .calledWith(null) ); assert.deepStrictEqual( ((stream as unknown as PassThrough)._transform as SinonStub).getCall(0)