diff --git a/baselines/bigquery-storage/.gitignore.baseline b/baselines/bigquery-storage/.gitignore.baseline new file mode 100644 index 000000000..5d32b2378 --- /dev/null +++ b/baselines/bigquery-storage/.gitignore.baseline @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/baselines/bigquery-storage/.jsdoc.js.baseline b/baselines/bigquery-storage/.jsdoc.js.baseline new file mode 100644 index 000000000..86397b2c7 --- /dev/null +++ b/baselines/bigquery-storage/.jsdoc.js.baseline @@ -0,0 +1,55 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2019 Google, LLC.', + includeDate: false, + sourceFiles: false, + systemName: 'storage', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/baselines/bigquery-storage/.mocharc.json.baseline b/baselines/bigquery-storage/.mocharc.json.baseline new file mode 100644 index 000000000..670c5e2c2 --- /dev/null +++ b/baselines/bigquery-storage/.mocharc.json.baseline @@ -0,0 +1,5 @@ +{ + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} diff --git a/baselines/bigquery-storage/README.md.baseline b/baselines/bigquery-storage/README.md.baseline new file mode 100644 index 000000000..f5dcfbaf5 --- /dev/null +++ b/baselines/bigquery-storage/README.md.baseline @@ -0,0 +1 @@ +Storage: Nodejs Client diff --git a/baselines/bigquery-storage/linkinator.config.json.baseline b/baselines/bigquery-storage/linkinator.config.json.baseline new file mode 100644 index 000000000..b555215ca --- /dev/null +++ b/baselines/bigquery-storage/linkinator.config.json.baseline @@ -0,0 +1,8 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io" + ] +} diff --git a/baselines/bigquery-storage/package.json b/baselines/bigquery-storage/package.json new file mode 100644 index 000000000..7e767c200 --- /dev/null +++ b/baselines/bigquery-storage/package.json @@ -0,0 +1,49 @@ +{ + "name": "storage", + "version": "0.1.0", + "description": "Storage client for Node.js", + "repository": "googleapis/nodejs-storage", + "license": "Apache-2.0", + "author": "Google LLC", + "files": [ + "build/src", + "build/protos" + ], + "main": "build/src/index.js", + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "predocs-test": "npm run docs", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^1.14.2" + }, + "devDependencies": { + "@types/mocha": "^5.2.7", + "@types/node": "^12.12.29", + "c8": "^7.1.0", + "gts": "^1.1.2", + "jsdoc": "^3.6.3", + "jsdoc-fresh": "^1.0.2", + "jsdoc-region-tag": "^1.0.4", + "linkinator": "^2.0.3", + "mocha": "^6.2.2", + "pack-n-play": "^1.0.0-2", + "null-loader": "^3.0.0", + "ts-loader": "^6.2.1", + "typescript": "~3.7.5", + "webpack": "^4.42.0", + "webpack-cli": "^3.3.11" + }, + "engines": { + "node": ">=8.13.0" + } +} diff --git a/baselines/bigquery-storage/package.json.baseline b/baselines/bigquery-storage/package.json.baseline new file mode 120000 index 000000000..2ff8622f1 --- /dev/null +++ b/baselines/bigquery-storage/package.json.baseline @@ -0,0 +1 @@ +package.json \ No newline at end of file diff --git a/baselines/bigquery-storage/proto.list.baseline b/baselines/bigquery-storage/proto.list.baseline new file mode 100644 index 000000000..5451a6d74 --- /dev/null +++ b/baselines/bigquery-storage/proto.list.baseline @@ -0,0 +1,13 @@ +google/cloud/bigquery/storage/v1beta1/arrow.proto +google/cloud/bigquery/storage/v1beta1/avro.proto +google/cloud/bigquery/storage/v1beta1/read_options.proto +google/api/http.proto +google/protobuf/descriptor.proto +google/api/annotations.proto +google/api/client.proto +google/api/field_behavior.proto +google/api/resource.proto +google/protobuf/timestamp.proto +google/cloud/bigquery/storage/v1beta1/table_reference.proto +google/protobuf/empty.proto +google/cloud/bigquery/storage/v1beta1/storage.proto diff --git a/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto.baseline b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto.baseline new file mode 100644 index 000000000..3003de444 --- /dev/null +++ b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto.baseline @@ -0,0 +1,37 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Arrow schema. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/avro.proto.baseline b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/avro.proto.baseline new file mode 100644 index 000000000..021d8e44f --- /dev/null +++ b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/avro.proto.baseline @@ -0,0 +1,38 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto.baseline b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto.baseline new file mode 100644 index 000000000..9591deba7 --- /dev/null +++ b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto.baseline @@ -0,0 +1,41 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Options dictating how we read a table. +message TableReadOptions { + // Optional. Names of the fields in the table that should be read. If empty, + // all fields will be read. If the specified field is a nested field, all the + // sub-fields in the field will be selected. The output field order is + // unrelated to the order of fields in selected_fields. + repeated string selected_fields = 1; + + // Optional. SQL text filtering statement, similar to a WHERE clause in + // a query. Currently, only a single predicate that is a comparison between + // a column and a constant value is supported. Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + string row_restriction = 2; +} diff --git a/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/storage.proto.baseline b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/storage.proto.baseline new file mode 100644 index 000000000..22f742fbb --- /dev/null +++ b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/storage.proto.baseline @@ -0,0 +1,405 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta1/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta1/avro.proto"; +import "google/cloud/bigquery/storage/v1beta1/read_options.proto"; +import "google/cloud/bigquery/storage/v1beta1/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// BigQuery storage API. +// +// The BigQuery storage API can be used to read data stored in BigQuery. +service BigQueryStorage { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.readonly," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Read sessions automatically expire 24 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1beta1/{table_reference.project_id=projects/*}" + body: "*" + additional_bindings { + post: "/v1beta1/{table_reference.dataset_id=projects/*/datasets/*}" + body: "*" + } + }; + option (google.api.method_signature) = "table_reference,parent,requested_streams"; + } + + // Reads rows from the table in the format prescribed by the read session. + // Each response contains one or more table rows, up to a maximum of 10 MiB + // per response; read requests which attempt to read individual rows larger + // than this will fail. + // + // Each request also returns a set of stream statistics reflecting the + // estimated total number of rows in the read stream. This number is computed + // based on the total table size and the number of active streams in the read + // session, and may change as other streams continue to read data. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1beta1/{read_position.stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "read_position"; + } + + // Creates additional streams for a ReadSession. This API can be used to + // dynamically adjust the parallelism of a batch processing task upwards by + // adding additional workers. + rpc BatchCreateReadSessionStreams(BatchCreateReadSessionStreamsRequest) returns (BatchCreateReadSessionStreamsResponse) { + option (google.api.http) = { + post: "/v1beta1/{session.name=projects/*/sessions/*}" + body: "*" + }; + option (google.api.method_signature) = "session,requested_streams"; + } + + // Triggers the graceful termination of a single stream in a ReadSession. This + // API can be used to dynamically adjust the parallelism of a batch processing + // task downwards without losing data. + // + // This API does not delete the stream -- it remains visible in the + // ReadSession, and any data processed by the stream is not released to other + // streams. However, no additional data will be assigned to the stream once + // this call completes. Callers must continue reading data on the stream until + // the end of the stream is reached so that data which has already been + // assigned to the stream will be processed. + // + // This method will return an error if there are no other live streams + // in the Session, or if SplitReadStream() has been called on the given + // Stream. + rpc FinalizeStream(FinalizeStreamRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{stream.name=projects/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "stream"; + } + + // Splits a given read stream into two Streams. These streams are referred to + // as the primary and the residual of the split. The original stream can still + // be read from in the same manner as before. Both of the returned streams can + // also be read from, and the total rows return by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back to back in the + // original Stream. Concretely, it is guaranteed that for streams Original, + // Primary, and Residual, that Original[0-j] = Primary[0-j] and + // Original[j-n] = Residual[0-m] once the streams have been read to + // completion. + // + // This method is guaranteed to be idempotent. + rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1beta1/{original_stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "original_stream"; + } +} + +// Information about a single data stream within a read session. +message Stream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/Stream" + pattern: "projects/{project}/locations/{location}/streams/{stream}" + }; + + // Name of the stream, in the form + // `projects/{project_id}/locations/{location}/streams/{stream_id}`. + string name = 1; +} + +// Expresses a point within a given stream using an offset position. +message StreamPosition { + // Identifier for a given Stream. + Stream stream = 1; + + // Position in the stream. + int64 offset = 2; +} + +// Information returned from a `CreateReadSession` request. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1; + + // Time at which the session becomes invalid. After this time, subsequent + // requests to read this Session will return errors. + google.protobuf.Timestamp expire_time = 2; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Avro schema. + AvroSchema avro_schema = 5; + + // Arrow schema. + ArrowSchema arrow_schema = 6; + } + + // Streams associated with this session. + repeated Stream streams = 4; + + // Table that this ReadSession is reading from. + TableReference table_reference = 7; + + // Any modifiers which are applied when reading from the specified table. + TableModifiers table_modifiers = 8; + + // The strategy to use for distributing data among the streams. + ShardingStrategy sharding_strategy = 9; +} + +// Creates a new read session, which may include additional options such as +// requested parallelism, projection filters and constraints. +message CreateReadSessionRequest { + // Required. Reference to the table to read. + TableReference table_reference = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. String of the form `projects/{project_id}` indicating the + // project this ReadSession is associated with. This is the project that will + // be billed for usage. + string parent = 6 [(google.api.field_behavior) = REQUIRED]; + + // Any modifiers to the Table (e.g. snapshot timestamp). + TableModifiers table_modifiers = 2; + + // Initial number of streams. If unset or 0, we will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table and + // the maximum amount of parallelism allowed by the system. + // + // Streams must be read starting from offset 0. + int32 requested_streams = 3; + + // Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 4; + + // Data output format. Currently default to Avro. + DataFormat format = 5; + + // The strategy to use for distributing data among multiple streams. Currently + // defaults to liquid sharding. + ShardingStrategy sharding_strategy = 7; +} + +// Data format for input or output data. +enum DataFormat { + // Data format is unspecified. + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + ARROW = 3; +} + +// Strategy for distributing data among multiple streams in a read session. +enum ShardingStrategy { + // Same as LIQUID. + SHARDING_STRATEGY_UNSPECIFIED = 0; + + // Assigns data to each stream based on the client's read rate. The faster the + // client reads from a stream, the more data is assigned to the stream. In + // this strategy, it's possible to read all data from a single stream even if + // there are other streams present. + LIQUID = 1; + + // Assigns data to each stream such that roughly the same number of rows can + // be read from each stream. Because the server-side unit for assigning data + // is collections of rows, the API does not guarantee that each stream will + // return the same number or rows. Additionally, the limits are enforced based + // on the number of pre-filtering rows, so some filters can lead to lopsided + // assignments. + BALANCED = 2; +} + +// Requesting row data via `ReadRows` must provide Stream position information. +message ReadRowsRequest { + // Required. Identifier of the position in the stream to start reading from. + // The offset requested must be less than the last row read from ReadRows. + // Requesting a larger offset is undefined. + StreamPosition read_position = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Progress information for a given Stream. +message StreamStatus { + // Number of estimated rows in the current stream. May change over time as + // different readers in the stream progress at rates which are relatively fast + // or slow. + int64 estimated_row_count = 1; + + // A value in the range [0.0, 1.0] that represents the fraction of rows + // assigned to this stream that have been processed by the server. In the + // presence of read filters, the server may process more rows than it returns, + // so this value reflects progress through the pre-filtering rows. + // + // This value is only populated for sessions created through the BALANCED + // sharding strategy. + float fraction_consumed = 2; + + // Represents the progress of the current stream. + // + // Note: This value is under development and should not be used. Use + // `fraction_consumed` instead. + Progress progress = 4; + + // Whether this stream can be split. For sessions that use the LIQUID sharding + // strategy, this value is always false. For BALANCED sessions, this value is + // false when enough data have been read such that no more splits are possible + // at that point or beyond. For small tables or streams that are the result of + // a chain of splits, this value may never be true. + bool is_splittable = 3; +} + +message Progress { + // The fraction of rows assigned to the stream that have been processed by the + // server so far, not including the rows in the current response message. + // + // This value, along with `at_response_end`, can be used to interpolate the + // progress made as the rows in the message are being processed using the + // following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the `at_response_start` + // value of the current response. + float at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the rows in + // the current response. + float at_response_end = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleStatus { + // How much this connection is being throttled. + // 0 is no throttling, 100 is completely throttled. + int32 throttle_percent = 1; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. This value is recorded here, + // in addition to the row_count values in the output-specific messages in + // `rows`, so that code which needs to record progress through the stream can + // do so in an output format-independent way. + int64 row_count = 6; + + // Estimated stream statistics. + StreamStatus status = 2; + + // Throttling status. If unset, the latest response still describes + // the current throttling status. + ThrottleStatus throttle_status = 5; +} + +// Information needed to request additional streams for an established read +// session. +message BatchCreateReadSessionStreamsRequest { + // Required. Must be a non-expired session obtained from a call to + // CreateReadSession. Only the name field needs to be set. + ReadSession session = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Number of new streams requested. Must be positive. + // Number of added streams may be less than this, see CreateReadSessionRequest + // for more information. + int32 requested_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The response from `BatchCreateReadSessionStreams` returns the stream +// identifiers for the newly created streams. +message BatchCreateReadSessionStreamsResponse { + // Newly added streams. + repeated Stream streams = 1; +} + +// Request information for invoking `FinalizeStream`. +message FinalizeStreamRequest { + // Stream to finalize. + Stream stream = 2; +} + +// Request information for `SplitReadStream`. +message SplitReadStreamRequest { + // Stream to split. + Stream original_stream = 1; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to to a data storage boundary on the server side. + float fraction = 2; +} + +// Response from `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + Stream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + Stream remainder_stream = 2; +} diff --git a/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto.baseline b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto.baseline new file mode 100644 index 000000000..a55dc48eb --- /dev/null +++ b/baselines/bigquery-storage/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto.baseline @@ -0,0 +1,43 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Table reference that includes just the 3 strings needed to identify a table. +message TableReference { + // The assigned project ID of the project. + string project_id = 1; + + // The ID of the dataset in the above project. + string dataset_id = 2; + + // The ID of the table in the above dataset. + string table_id = 3; +} + +// All fields in this message optional. +message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; +} diff --git a/baselines/bigquery-storage/src/index.ts.baseline b/baselines/bigquery-storage/src/index.ts.baseline new file mode 100644 index 000000000..dc46c23fa --- /dev/null +++ b/baselines/bigquery-storage/src/index.ts.baseline @@ -0,0 +1,26 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1beta1 from './v1beta1'; +const BigQueryStorageClient = v1beta1.BigQueryStorageClient; +export {v1beta1, BigQueryStorageClient}; +// For compatibility with JavaScript libraries we need to provide this default export: +// tslint:disable-next-line no-default-export +export default {v1beta1, BigQueryStorageClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/baselines/bigquery-storage/src/v1beta1/big_query_storage_client.ts.baseline b/baselines/bigquery-storage/src/v1beta1/big_query_storage_client.ts.baseline new file mode 100644 index 000000000..878cdeb02 --- /dev/null +++ b/baselines/bigquery-storage/src/v1beta1/big_query_storage_client.ts.baseline @@ -0,0 +1,720 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as gax from 'google-gax'; +import {APICallback, Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; +import * as path from 'path'; + +import * as protosTypes from '../../protos/protos'; +import * as gapicConfig from './big_query_storage_client_config.json'; + +const version = require('../../../package.json').version; + +/** + * BigQuery storage API. + * + * The BigQuery storage API can be used to read data stored in BigQuery. + * @class + * @memberof v1beta1 + */ +export class BigQueryStorageClient { + private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}}; + private _innerApiCalls: {[name: string]: Function}; + private _pathTemplates: {[name: string]: gax.PathTemplate}; + private _terminated = false; + auth: gax.GoogleAuth; + bigQueryStorageStub: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of BigQueryStorageClient. + * + * @param {object} [options] - The configuration object. See the subsequent + * parameters for more details. + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {function} [options.promise] - Custom promise module to use instead + * of native Promises. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + */ + + constructor(opts?: ClientOptions) { + // Ensure that options include the service address and port. + const staticMembers = this.constructor as typeof BigQueryStorageClient; + const servicePath = opts && opts.servicePath ? + opts.servicePath : + ((opts && opts.apiEndpoint) ? opts.apiEndpoint : + staticMembers.servicePath); + const port = opts && opts.port ? opts.port : staticMembers.port; + + if (!opts) { + opts = {servicePath, port}; + } + opts.servicePath = opts.servicePath || servicePath; + opts.port = opts.port || port; + opts.clientConfig = opts.clientConfig || {}; + + const isBrowser = (typeof window !== 'undefined'); + if (isBrowser){ + opts.fallback = true; + } + // If we are in browser, we are already using fallback because of the + // "browser" field in package.json. + // But if we were explicitly requested to use fallback, let's do it now. + const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options + // sent to the client. + opts.scopes = (this.constructor as typeof BigQueryStorageClient).scopes; + const gaxGrpc = new gaxModule.GrpcClient(opts); + + // Save the auth object to the client, for use by other methods. + this.auth = (gaxGrpc.auth as gax.GoogleAuth); + + // Determine the client header string. + const clientHeader = [ + `gax/${gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + // For Node.js, pass the path to JSON proto file. + // For browsers, pass the JSON content. + + const nodejsProtoPath = path.join(__dirname, '..', '..', 'protos', 'protos.json'); + const protos = gaxGrpc.loadProto( + opts.fallback ? + require("../../protos/protos.json") : + nodejsProtoPath + ); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this._pathTemplates = { + readSessionPathTemplate: new gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/sessions/{session}' + ), + streamPathTemplate: new gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/streams/{stream}' + ), + }; + + // Some of the methods on this service provide streaming responses. + // Provide descriptors for these. + this._descriptors.stream = { + readRows: new gaxModule.StreamDescriptor(gax.StreamType.SERVER_STREAMING) + }; + + // Put together the default options sent with requests. + const defaults = gaxGrpc.constructSettings( + 'google.cloud.bigquery.storage.v1beta1.BigQueryStorage', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this._innerApiCalls = {}; + + // Put together the "service stub" for + // google.cloud.bigquery.storage.v1beta1.BigQueryStorage. + this.bigQueryStorageStub = gaxGrpc.createStub( + opts.fallback ? + (protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1beta1.BigQueryStorage') : + // tslint:disable-next-line no-any + (protos as any).google.cloud.bigquery.storage.v1beta1.BigQueryStorage, + opts) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const bigQueryStorageStubMethods = + ['createReadSession', 'readRows', 'batchCreateReadSessionStreams', 'finalizeStream', 'splitReadStream']; + + for (const methodName of bigQueryStorageStubMethods) { + const innerCallPromise = this.bigQueryStorageStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + return stub[methodName].apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const apiCall = gaxModule.createApiCall( + innerCallPromise, + defaults[methodName], + this._descriptors.page[methodName] || + this._descriptors.stream[methodName] || + this._descriptors.longrunning[methodName] + ); + + this._innerApiCalls[methodName] = ( + argument: {}, + callOptions?: CallOptions, + callback?: APICallback + ) => { + return apiCall(argument, callOptions, callback); + }; + } + } + + /** + * The DNS address for this API service. + */ + static get servicePath() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The port for this API service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/bigquery.readonly', + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @param {function(Error, string)} callback - the callback to + * be called with the current project Id. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + createReadSession( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + options?: gax.CallOptions): + Promise<[ + protosTypes.google.cloud.bigquery.storage.v1beta1.IReadSession, + protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined + ]>; + createReadSession( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + options: gax.CallOptions, + callback: Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.IReadSession, + protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, + {}|undefined>): void; +/** + * Creates a new read session. A read session divides the contents of a + * BigQuery table into one or more streams, which can then be used to read + * data from the table. The read session also specifies properties of the + * data to be read, such as a list of columns or a push-down filter describing + * the rows to be returned. + * + * A particular row can be read by at most one stream. When the caller has + * reached the end of each stream in the session, then all the data in the + * table has been read. + * + * Read sessions automatically expire 24 hours after they are created and do + * not require manual clean-up by the caller. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.TableReference} request.tableReference + * Required. Reference to the table to read. + * @param {string} request.parent + * Required. String of the form `projects/{project_id}` indicating the + * project this ReadSession is associated with. This is the project that will + * be billed for usage. + * @param {google.cloud.bigquery.storage.v1beta1.TableModifiers} request.tableModifiers + * Any modifiers to the Table (e.g. snapshot timestamp). + * @param {number} request.requestedStreams + * Initial number of streams. If unset or 0, we will + * provide a value of streams so as to produce reasonable throughput. Must be + * non-negative. The number of streams may be lower than the requested number, + * depending on the amount parallelism that is reasonable for the table and + * the maximum amount of parallelism allowed by the system. + * + * Streams must be read starting from offset 0. + * @param {google.cloud.bigquery.storage.v1beta1.TableReadOptions} request.readOptions + * Read options for this session (e.g. column selection, filters). + * @param {google.cloud.bigquery.storage.v1beta1.DataFormat} request.format + * Data output format. Currently default to Avro. + * @param {google.cloud.bigquery.storage.v1beta1.ShardingStrategy} request.shardingStrategy + * The strategy to use for distributing data among multiple streams. Currently + * defaults to liquid sharding. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1beta1.ReadSession}. + * The promise has a method named "cancel" which cancels the ongoing API call. + */ + createReadSession( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + optionsOrCallback?: gax.CallOptions|Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.IReadSession, + protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined>, + callback?: Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.IReadSession, + protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, + {}|undefined>): + Promise<[ + protosTypes.google.cloud.bigquery.storage.v1beta1.IReadSession, + protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: gax.CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as gax.CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'table_reference.project_id': request.tableReference!.projectId || '', + 'table_reference.dataset_id': request.tableReference!.datasetId || '', + }); + return this._innerApiCalls.createReadSession(request, options, callback); + } + batchCreateReadSessionStreams( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + options?: gax.CallOptions): + Promise<[ + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined + ]>; + batchCreateReadSessionStreams( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + options: gax.CallOptions, + callback: Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, + {}|undefined>): void; +/** + * Creates additional streams for a ReadSession. This API can be used to + * dynamically adjust the parallelism of a batch processing task upwards by + * adding additional workers. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.ReadSession} request.session + * Required. Must be a non-expired session obtained from a call to + * CreateReadSession. Only the name field needs to be set. + * @param {number} request.requestedStreams + * Required. Number of new streams requested. Must be positive. + * Number of added streams may be less than this, see CreateReadSessionRequest + * for more information. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [BatchCreateReadSessionStreamsResponse]{@link google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse}. + * The promise has a method named "cancel" which cancels the ongoing API call. + */ + batchCreateReadSessionStreams( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + optionsOrCallback?: gax.CallOptions|Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined>, + callback?: Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, + {}|undefined>): + Promise<[ + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: gax.CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as gax.CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'session.name': request.session!.name || '', + }); + return this._innerApiCalls.batchCreateReadSessionStreams(request, options, callback); + } + finalizeStream( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + options?: gax.CallOptions): + Promise<[ + protosTypes.google.protobuf.IEmpty, + protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined + ]>; + finalizeStream( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + options: gax.CallOptions, + callback: Callback< + protosTypes.google.protobuf.IEmpty, + protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, + {}|undefined>): void; +/** + * Triggers the graceful termination of a single stream in a ReadSession. This + * API can be used to dynamically adjust the parallelism of a batch processing + * task downwards without losing data. + * + * This API does not delete the stream -- it remains visible in the + * ReadSession, and any data processed by the stream is not released to other + * streams. However, no additional data will be assigned to the stream once + * this call completes. Callers must continue reading data on the stream until + * the end of the stream is reached so that data which has already been + * assigned to the stream will be processed. + * + * This method will return an error if there are no other live streams + * in the Session, or if SplitReadStream() has been called on the given + * Stream. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.Stream} request.stream + * Stream to finalize. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. + * The promise has a method named "cancel" which cancels the ongoing API call. + */ + finalizeStream( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + optionsOrCallback?: gax.CallOptions|Callback< + protosTypes.google.protobuf.IEmpty, + protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined>, + callback?: Callback< + protosTypes.google.protobuf.IEmpty, + protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, + {}|undefined>): + Promise<[ + protosTypes.google.protobuf.IEmpty, + protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: gax.CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as gax.CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'stream.name': request.stream!.name || '', + }); + return this._innerApiCalls.finalizeStream(request, options, callback); + } + splitReadStream( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + options?: gax.CallOptions): + Promise<[ + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined + ]>; + splitReadStream( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + options: gax.CallOptions, + callback: Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, + {}|undefined>): void; +/** + * Splits a given read stream into two Streams. These streams are referred to + * as the primary and the residual of the split. The original stream can still + * be read from in the same manner as before. Both of the returned streams can + * also be read from, and the total rows return by both child streams will be + * the same as the rows read from the original stream. + * + * Moreover, the two child streams will be allocated back to back in the + * original Stream. Concretely, it is guaranteed that for streams Original, + * Primary, and Residual, that Original[0-j] = Primary[0-j] and + * Original[j-n] = Residual[0-m] once the streams have been read to + * completion. + * + * This method is guaranteed to be idempotent. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.Stream} request.originalStream + * Stream to split. + * @param {number} request.fraction + * A value in the range (0.0, 1.0) that specifies the fractional point at + * which the original stream should be split. The actual split point is + * evaluated on pre-filtered rows, so if a filter is provided, then there is + * no guarantee that the division of the rows between the new child streams + * will be proportional to this fractional value. Additionally, because the + * server-side unit for assigning data is collections of rows, this fraction + * will always map to to a data storage boundary on the server side. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse}. + * The promise has a method named "cancel" which cancels the ongoing API call. + */ + splitReadStream( + request: protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + optionsOrCallback?: gax.CallOptions|Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined>, + callback?: Callback< + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, + {}|undefined>): + Promise<[ + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: gax.CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as gax.CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'original_stream.name': request.originalStream!.name || '', + }); + return this._innerApiCalls.splitReadStream(request, options, callback); + } + +/** + * Reads rows from the table in the format prescribed by the read session. + * Each response contains one or more table rows, up to a maximum of 10 MiB + * per response; read requests which attempt to read individual rows larger + * than this will fail. + * + * Each request also returns a set of stream statistics reflecting the + * estimated total number of rows in the read stream. This number is computed + * based on the total table size and the number of active streams in the read + * session, and may change as other streams continue to read data. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.StreamPosition} request.readPosition + * Required. Identifier of the position in the stream to start reading from. + * The offset requested must be less than the last row read from ReadRows. + * Requesting a larger offset is undefined. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits [ReadRowsResponse]{@link google.cloud.bigquery.storage.v1beta1.ReadRowsResponse} on 'data' event. + */ + readRows( + request?: protosTypes.google.cloud.bigquery.storage.v1beta1.IReadRowsRequest, + options?: gax.CallOptions): + gax.CancellableStream{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'read_position.stream.name': request.readPosition!.stream!.name || '', + }); + return this._innerApiCalls.readRows(request, options); + } + + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified readSession resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} session + * @returns {string} Resource name string. + */ + readSessionPath(project:string,location:string,session:string) { + return this._pathTemplates.readSessionPathTemplate.render({ + project: project, + location: location, + session: session, + }); + } + + /** + * Parse the project from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the project. + */ + matchProjectFromReadSessionName(readSessionName: string) { + return this._pathTemplates.readSessionPathTemplate.match(readSessionName).project; + } + + /** + * Parse the location from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the location. + */ + matchLocationFromReadSessionName(readSessionName: string) { + return this._pathTemplates.readSessionPathTemplate.match(readSessionName).location; + } + + /** + * Parse the session from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the session. + */ + matchSessionFromReadSessionName(readSessionName: string) { + return this._pathTemplates.readSessionPathTemplate.match(readSessionName).session; + } + + /** + * Return a fully-qualified stream resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} stream + * @returns {string} Resource name string. + */ + streamPath(project:string,location:string,stream:string) { + return this._pathTemplates.streamPathTemplate.render({ + project: project, + location: location, + stream: stream, + }); + } + + /** + * Parse the project from Stream resource. + * + * @param {string} streamName + * A fully-qualified path representing Stream resource. + * @returns {string} A string representing the project. + */ + matchProjectFromStreamName(streamName: string) { + return this._pathTemplates.streamPathTemplate.match(streamName).project; + } + + /** + * Parse the location from Stream resource. + * + * @param {string} streamName + * A fully-qualified path representing Stream resource. + * @returns {string} A string representing the location. + */ + matchLocationFromStreamName(streamName: string) { + return this._pathTemplates.streamPathTemplate.match(streamName).location; + } + + /** + * Parse the stream from Stream resource. + * + * @param {string} streamName + * A fully-qualified path representing Stream resource. + * @returns {string} A string representing the stream. + */ + matchStreamFromStreamName(streamName: string) { + return this._pathTemplates.streamPathTemplate.match(streamName).stream; + } + + /** + * Terminate the GRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + */ + close(): Promise { + if (!this._terminated) { + return this.bigQueryStorageStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-storage/src/v1beta1/big_query_storage_client_config.json.baseline b/baselines/bigquery-storage/src/v1beta1/big_query_storage_client_config.json.baseline new file mode 100644 index 000000000..d687cae81 --- /dev/null +++ b/baselines/bigquery-storage/src/v1beta1/big_query_storage_client_config.json.baseline @@ -0,0 +1,46 @@ +{ + "interfaces": { + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateReadSession": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ReadRows": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "BatchCreateReadSessionStreams": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "FinalizeStream": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "SplitReadStream": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-storage/src/v1beta1/big_query_storage_proto_list.json.baseline b/baselines/bigquery-storage/src/v1beta1/big_query_storage_proto_list.json.baseline new file mode 100644 index 000000000..0b8010758 --- /dev/null +++ b/baselines/bigquery-storage/src/v1beta1/big_query_storage_proto_list.json.baseline @@ -0,0 +1,7 @@ +[ + "../../protos/google/cloud/bigquery/storage/v1beta1/arrow.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/avro.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/read_options.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/storage.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto" +] diff --git a/baselines/bigquery-storage/src/v1beta1/index.ts.baseline b/baselines/bigquery-storage/src/v1beta1/index.ts.baseline new file mode 100644 index 000000000..7346292b3 --- /dev/null +++ b/baselines/bigquery-storage/src/v1beta1/index.ts.baseline @@ -0,0 +1,19 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {BigQueryStorageClient} from './big_query_storage_client'; diff --git a/baselines/bigquery-storage/system-test/fixtures/sample/src/index.js.baseline b/baselines/bigquery-storage/system-test/fixtures/sample/src/index.js.baseline new file mode 100644 index 000000000..ef6bea63a --- /dev/null +++ b/baselines/bigquery-storage/system-test/fixtures/sample/src/index.js.baseline @@ -0,0 +1,27 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const storage = require('storage'); + +function main() { + const bigQueryStorageClient = new storage.BigQueryStorageClient(); +} + +main(); diff --git a/baselines/bigquery-storage/system-test/fixtures/sample/src/index.ts.baseline b/baselines/bigquery-storage/system-test/fixtures/sample/src/index.ts.baseline new file mode 100644 index 000000000..c0ea7baad --- /dev/null +++ b/baselines/bigquery-storage/system-test/fixtures/sample/src/index.ts.baseline @@ -0,0 +1,25 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {BigQueryStorageClient} from 'storage'; + +function main() { + const bigQueryStorageClient = new BigQueryStorageClient(); +} + +main(); diff --git a/baselines/bigquery-storage/system-test/install.ts.baseline b/baselines/bigquery-storage/system-test/install.ts.baseline new file mode 100644 index 000000000..140852a85 --- /dev/null +++ b/baselines/bigquery-storage/system-test/install.ts.baseline @@ -0,0 +1,49 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('typescript consumer tests', () => { + + it('should have correct type signature for typescript users', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), // path to your module. + sample: { + description: 'typescript based user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); // will throw upon error. + }); + + it('should have correct type signature for javascript users', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), // path to your module. + sample: { + description: 'typescript based user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); // will throw upon error. + }); + +}); diff --git a/baselines/bigquery-storage/test/gapic-big_query_storage-v1beta1.ts.baseline b/baselines/bigquery-storage/test/gapic-big_query_storage-v1beta1.ts.baseline new file mode 100644 index 000000000..f21cc6581 --- /dev/null +++ b/baselines/bigquery-storage/test/gapic-big_query_storage-v1beta1.ts.baseline @@ -0,0 +1,355 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protosTypes from '../protos/protos'; +import * as assert from 'assert'; +import { describe, it } from 'mocha'; +const bigquerystorageModule = require('../src'); + +import {PassThrough} from 'stream'; + + +const FAKE_STATUS_CODE = 1; +class FakeError{ + name: string; + message: string; + code: number; + constructor(n: number){ + this.name = 'fakeName'; + this.message = 'fake message'; + this.code = n; + } +} +const error = new FakeError(FAKE_STATUS_CODE); +export interface Callback { + (err: FakeError|null, response?: {} | null): void; +} + +export class Operation{ + constructor(){}; + promise() {}; +} +function mockSimpleGrpcMethod(expectedRequest: {}, response: {} | null, error: FakeError | null) { + return (actualRequest: {}, options: {}, callback: Callback) => { + assert.deepStrictEqual(actualRequest, expectedRequest); + if (error) { + callback(error); + } else if (response) { + callback(null, response); + } else { + callback(null); + } + }; +} +function mockServerStreamingGrpcMethod(expectedRequest: {}, response: {} | null, error: FakeError | null) { + return (actualRequest: {}) => { + assert.deepStrictEqual(actualRequest, expectedRequest); + const mockStream = new PassThrough({ + objectMode: true, + transform: (chunk: {}, enc: {}, callback: Callback) => { + if (error) { + callback(error); + } + else { + callback(null, response); + } + } + }); + return mockStream; + }; +} +describe('v1beta1.BigQueryStorageClient', () => { + it('has servicePath', () => { + const servicePath = bigquerystorageModule.v1beta1.BigQueryStorageClient.servicePath; + assert(servicePath); + }); + it('has apiEndpoint', () => { + const apiEndpoint = bigquerystorageModule.v1beta1.BigQueryStorageClient.apiEndpoint; + assert(apiEndpoint); + }); + it('has port', () => { + const port = bigquerystorageModule.v1beta1.BigQueryStorageClient.port; + assert(port); + assert(typeof port === 'number'); + }); + it('should create a client with no option', () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient(); + assert(client); + }); + it('should create a client with gRPC fallback', () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + fallback: true, + }); + assert(client); + }); + describe('createReadSession', () => { + it('invokes createReadSession without error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest = {}; + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.createReadSession = mockSimpleGrpcMethod( + request, + expectedResponse, + null + ); + client.createReadSession(request, (err: {}, response: {}) => { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }) + }); + + it('invokes createReadSession with error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest = {}; + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.createReadSession = mockSimpleGrpcMethod( + request, + null, + error + ); + client.createReadSession(request, (err: FakeError, response: {}) => { + assert(err instanceof FakeError); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + assert(typeof response === 'undefined'); + done(); + }) + }); + }); + describe('batchCreateReadSessionStreams', () => { + it('invokes batchCreateReadSessionStreams without error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest = {}; + request.session = {}; + request.session.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.batchCreateReadSessionStreams = mockSimpleGrpcMethod( + request, + expectedResponse, + null + ); + client.batchCreateReadSessionStreams(request, (err: {}, response: {}) => { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }) + }); + + it('invokes batchCreateReadSessionStreams with error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest = {}; + request.session = {}; + request.session.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.batchCreateReadSessionStreams = mockSimpleGrpcMethod( + request, + null, + error + ); + client.batchCreateReadSessionStreams(request, (err: FakeError, response: {}) => { + assert(err instanceof FakeError); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + assert(typeof response === 'undefined'); + done(); + }) + }); + }); + describe('finalizeStream', () => { + it('invokes finalizeStream without error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest = {}; + request.stream = {}; + request.stream.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.finalizeStream = mockSimpleGrpcMethod( + request, + expectedResponse, + null + ); + client.finalizeStream(request, (err: {}, response: {}) => { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }) + }); + + it('invokes finalizeStream with error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest = {}; + request.stream = {}; + request.stream.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.finalizeStream = mockSimpleGrpcMethod( + request, + null, + error + ); + client.finalizeStream(request, (err: FakeError, response: {}) => { + assert(err instanceof FakeError); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + assert(typeof response === 'undefined'); + done(); + }) + }); + }); + describe('splitReadStream', () => { + it('invokes splitReadStream without error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest = {}; + request.originalStream = {}; + request.originalStream.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.splitReadStream = mockSimpleGrpcMethod( + request, + expectedResponse, + null + ); + client.splitReadStream(request, (err: {}, response: {}) => { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }) + }); + + it('invokes splitReadStream with error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest = {}; + request.originalStream = {}; + request.originalStream.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.splitReadStream = mockSimpleGrpcMethod( + request, + null, + error + ); + client.splitReadStream(request, (err: FakeError, response: {}) => { + assert(err instanceof FakeError); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + assert(typeof response === 'undefined'); + done(); + }) + }); + }); + describe('readRows', () => { + it('invokes readRows without error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.IReadRowsRequest = {}; + request.readPosition = {}; + request.readPosition.stream = {}; + request.readPosition.stream.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.readRows = mockServerStreamingGrpcMethod(request, expectedResponse, null); + const stream = client.readRows(request); + stream.on('data', (response: {}) =>{ + assert.deepStrictEqual(response, expectedResponse); + done(); + }); + stream.on('error', (err: FakeError) => { + done(err); + }); + stream.write(); + }); + it('invokes readRows with error', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + // Mock request + const request: protosTypes.google.cloud.bigquery.storage.v1beta1.IReadRowsRequest = {}; + request.readPosition = {}; + request.readPosition.stream = {}; + request.readPosition.stream.name = ''; + // Mock response + const expectedResponse = {}; + // Mock gRPC layer + client._innerApiCalls.readRows = mockServerStreamingGrpcMethod(request, null, error); + const stream = client.readRows(request); + stream.on('data', () =>{ + assert.fail(); + }); + stream.on('error', (err: FakeError) => { + assert(err instanceof FakeError); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + done(); + }); + stream.write(); + }); + }); +}); diff --git a/baselines/bigquery-storage/tsconfig.json.baseline b/baselines/bigquery-storage/tsconfig.json.baseline new file mode 100644 index 000000000..613d35597 --- /dev/null +++ b/baselines/bigquery-storage/tsconfig.json.baseline @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2016", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/baselines/bigquery-storage/tslint.json.baseline b/baselines/bigquery-storage/tslint.json.baseline new file mode 100644 index 000000000..617dc975b --- /dev/null +++ b/baselines/bigquery-storage/tslint.json.baseline @@ -0,0 +1,3 @@ +{ + "extends": "gts/tslint.json" +} diff --git a/baselines/bigquery-storage/webpack.config.js.baseline b/baselines/bigquery-storage/webpack.config.js.baseline new file mode 100644 index 000000000..ad9db41c8 --- /dev/null +++ b/baselines/bigquery-storage/webpack.config.js.baseline @@ -0,0 +1,64 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'BigQueryStorage', + filename: './big-query-storage.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/baselines/showcase/test/gapic-echo-v1beta1.ts.baseline b/baselines/showcase/test/gapic-echo-v1beta1.ts.baseline index 4f5b41ea6..80b20ef95 100644 --- a/baselines/showcase/test/gapic-echo-v1beta1.ts.baseline +++ b/baselines/showcase/test/gapic-echo-v1beta1.ts.baseline @@ -284,7 +284,7 @@ describe('v1beta1.EchoClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IExpandRequest = {}; // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -305,7 +305,7 @@ describe('v1beta1.EchoClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IExpandRequest = {}; // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -329,7 +329,7 @@ describe('v1beta1.EchoClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IEchoRequest = {}; // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -348,7 +348,7 @@ describe('v1beta1.EchoClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IEchoRequest = {}; // Mock response const expectedResponse = {}; // Mock gRPC layer diff --git a/baselines/showcase/test/gapic-messaging-v1beta1.ts.baseline b/baselines/showcase/test/gapic-messaging-v1beta1.ts.baseline index ffcc6c9cc..2d4a2deed 100644 --- a/baselines/showcase/test/gapic-messaging-v1beta1.ts.baseline +++ b/baselines/showcase/test/gapic-messaging-v1beta1.ts.baseline @@ -580,7 +580,8 @@ describe('v1beta1.MessagingClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IStreamBlurbsRequest = {}; + request.name = ''; // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -601,7 +602,8 @@ describe('v1beta1.MessagingClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IStreamBlurbsRequest = {}; + request.name = ''; // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -625,7 +627,7 @@ describe('v1beta1.MessagingClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IConnectRequest = {}; // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -644,7 +646,7 @@ describe('v1beta1.MessagingClient', () => { projectId: 'bogus', }); // Mock request - const request = {}; + const request: protosTypes.google.showcase.v1beta1.IConnectRequest = {}; // Mock response const expectedResponse = {}; // Mock gRPC layer diff --git a/templates/typescript_gapic/test/gapic-$service-$version.ts.njk b/templates/typescript_gapic/test/gapic-$service-$version.ts.njk index 460eeda86..84f3e4969 100644 --- a/templates/typescript_gapic/test/gapic-$service-$version.ts.njk +++ b/templates/typescript_gapic/test/gapic-$service-$version.ts.njk @@ -254,7 +254,7 @@ describe('{{ api.naming.version }}.{{ service.name }}Client', () => { projectId: 'bogus', }); // Mock request - const request = {}; + {{ util.initRequestWithHeaderParam(method) }} // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -275,7 +275,7 @@ describe('{{ api.naming.version }}.{{ service.name }}Client', () => { projectId: 'bogus', }); // Mock request - const request = {}; + {{ util.initRequestWithHeaderParam(method) }} // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -301,7 +301,7 @@ describe('{{ api.naming.version }}.{{ service.name }}Client', () => { projectId: 'bogus', }); // Mock request - const request = {}; + {{ util.initRequestWithHeaderParam(method) }} // Mock response const expectedResponse = {}; // Mock gRPC layer @@ -320,7 +320,7 @@ describe('{{ api.naming.version }}.{{ service.name }}Client', () => { projectId: 'bogus', }); // Mock request - const request = {}; + {{ util.initRequestWithHeaderParam(method) }} // Mock response const expectedResponse = {}; // Mock gRPC layer diff --git a/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto new file mode 100644 index 000000000..3003de444 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto @@ -0,0 +1,37 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Arrow schema. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/avro.proto b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/avro.proto new file mode 100644 index 000000000..021d8e44f --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/avro.proto @@ -0,0 +1,38 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto new file mode 100644 index 000000000..9591deba7 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto @@ -0,0 +1,41 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Options dictating how we read a table. +message TableReadOptions { + // Optional. Names of the fields in the table that should be read. If empty, + // all fields will be read. If the specified field is a nested field, all the + // sub-fields in the field will be selected. The output field order is + // unrelated to the order of fields in selected_fields. + repeated string selected_fields = 1; + + // Optional. SQL text filtering statement, similar to a WHERE clause in + // a query. Currently, only a single predicate that is a comparison between + // a column and a constant value is supported. Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + string row_restriction = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/storage.proto b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/storage.proto new file mode 100644 index 000000000..22f742fbb --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/storage.proto @@ -0,0 +1,405 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta1/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta1/avro.proto"; +import "google/cloud/bigquery/storage/v1beta1/read_options.proto"; +import "google/cloud/bigquery/storage/v1beta1/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// BigQuery storage API. +// +// The BigQuery storage API can be used to read data stored in BigQuery. +service BigQueryStorage { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.readonly," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Read sessions automatically expire 24 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1beta1/{table_reference.project_id=projects/*}" + body: "*" + additional_bindings { + post: "/v1beta1/{table_reference.dataset_id=projects/*/datasets/*}" + body: "*" + } + }; + option (google.api.method_signature) = "table_reference,parent,requested_streams"; + } + + // Reads rows from the table in the format prescribed by the read session. + // Each response contains one or more table rows, up to a maximum of 10 MiB + // per response; read requests which attempt to read individual rows larger + // than this will fail. + // + // Each request also returns a set of stream statistics reflecting the + // estimated total number of rows in the read stream. This number is computed + // based on the total table size and the number of active streams in the read + // session, and may change as other streams continue to read data. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1beta1/{read_position.stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "read_position"; + } + + // Creates additional streams for a ReadSession. This API can be used to + // dynamically adjust the parallelism of a batch processing task upwards by + // adding additional workers. + rpc BatchCreateReadSessionStreams(BatchCreateReadSessionStreamsRequest) returns (BatchCreateReadSessionStreamsResponse) { + option (google.api.http) = { + post: "/v1beta1/{session.name=projects/*/sessions/*}" + body: "*" + }; + option (google.api.method_signature) = "session,requested_streams"; + } + + // Triggers the graceful termination of a single stream in a ReadSession. This + // API can be used to dynamically adjust the parallelism of a batch processing + // task downwards without losing data. + // + // This API does not delete the stream -- it remains visible in the + // ReadSession, and any data processed by the stream is not released to other + // streams. However, no additional data will be assigned to the stream once + // this call completes. Callers must continue reading data on the stream until + // the end of the stream is reached so that data which has already been + // assigned to the stream will be processed. + // + // This method will return an error if there are no other live streams + // in the Session, or if SplitReadStream() has been called on the given + // Stream. + rpc FinalizeStream(FinalizeStreamRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{stream.name=projects/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "stream"; + } + + // Splits a given read stream into two Streams. These streams are referred to + // as the primary and the residual of the split. The original stream can still + // be read from in the same manner as before. Both of the returned streams can + // also be read from, and the total rows return by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back to back in the + // original Stream. Concretely, it is guaranteed that for streams Original, + // Primary, and Residual, that Original[0-j] = Primary[0-j] and + // Original[j-n] = Residual[0-m] once the streams have been read to + // completion. + // + // This method is guaranteed to be idempotent. + rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1beta1/{original_stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "original_stream"; + } +} + +// Information about a single data stream within a read session. +message Stream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/Stream" + pattern: "projects/{project}/locations/{location}/streams/{stream}" + }; + + // Name of the stream, in the form + // `projects/{project_id}/locations/{location}/streams/{stream_id}`. + string name = 1; +} + +// Expresses a point within a given stream using an offset position. +message StreamPosition { + // Identifier for a given Stream. + Stream stream = 1; + + // Position in the stream. + int64 offset = 2; +} + +// Information returned from a `CreateReadSession` request. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1; + + // Time at which the session becomes invalid. After this time, subsequent + // requests to read this Session will return errors. + google.protobuf.Timestamp expire_time = 2; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Avro schema. + AvroSchema avro_schema = 5; + + // Arrow schema. + ArrowSchema arrow_schema = 6; + } + + // Streams associated with this session. + repeated Stream streams = 4; + + // Table that this ReadSession is reading from. + TableReference table_reference = 7; + + // Any modifiers which are applied when reading from the specified table. + TableModifiers table_modifiers = 8; + + // The strategy to use for distributing data among the streams. + ShardingStrategy sharding_strategy = 9; +} + +// Creates a new read session, which may include additional options such as +// requested parallelism, projection filters and constraints. +message CreateReadSessionRequest { + // Required. Reference to the table to read. + TableReference table_reference = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. String of the form `projects/{project_id}` indicating the + // project this ReadSession is associated with. This is the project that will + // be billed for usage. + string parent = 6 [(google.api.field_behavior) = REQUIRED]; + + // Any modifiers to the Table (e.g. snapshot timestamp). + TableModifiers table_modifiers = 2; + + // Initial number of streams. If unset or 0, we will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table and + // the maximum amount of parallelism allowed by the system. + // + // Streams must be read starting from offset 0. + int32 requested_streams = 3; + + // Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 4; + + // Data output format. Currently default to Avro. + DataFormat format = 5; + + // The strategy to use for distributing data among multiple streams. Currently + // defaults to liquid sharding. + ShardingStrategy sharding_strategy = 7; +} + +// Data format for input or output data. +enum DataFormat { + // Data format is unspecified. + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + ARROW = 3; +} + +// Strategy for distributing data among multiple streams in a read session. +enum ShardingStrategy { + // Same as LIQUID. + SHARDING_STRATEGY_UNSPECIFIED = 0; + + // Assigns data to each stream based on the client's read rate. The faster the + // client reads from a stream, the more data is assigned to the stream. In + // this strategy, it's possible to read all data from a single stream even if + // there are other streams present. + LIQUID = 1; + + // Assigns data to each stream such that roughly the same number of rows can + // be read from each stream. Because the server-side unit for assigning data + // is collections of rows, the API does not guarantee that each stream will + // return the same number or rows. Additionally, the limits are enforced based + // on the number of pre-filtering rows, so some filters can lead to lopsided + // assignments. + BALANCED = 2; +} + +// Requesting row data via `ReadRows` must provide Stream position information. +message ReadRowsRequest { + // Required. Identifier of the position in the stream to start reading from. + // The offset requested must be less than the last row read from ReadRows. + // Requesting a larger offset is undefined. + StreamPosition read_position = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Progress information for a given Stream. +message StreamStatus { + // Number of estimated rows in the current stream. May change over time as + // different readers in the stream progress at rates which are relatively fast + // or slow. + int64 estimated_row_count = 1; + + // A value in the range [0.0, 1.0] that represents the fraction of rows + // assigned to this stream that have been processed by the server. In the + // presence of read filters, the server may process more rows than it returns, + // so this value reflects progress through the pre-filtering rows. + // + // This value is only populated for sessions created through the BALANCED + // sharding strategy. + float fraction_consumed = 2; + + // Represents the progress of the current stream. + // + // Note: This value is under development and should not be used. Use + // `fraction_consumed` instead. + Progress progress = 4; + + // Whether this stream can be split. For sessions that use the LIQUID sharding + // strategy, this value is always false. For BALANCED sessions, this value is + // false when enough data have been read such that no more splits are possible + // at that point or beyond. For small tables or streams that are the result of + // a chain of splits, this value may never be true. + bool is_splittable = 3; +} + +message Progress { + // The fraction of rows assigned to the stream that have been processed by the + // server so far, not including the rows in the current response message. + // + // This value, along with `at_response_end`, can be used to interpolate the + // progress made as the rows in the message are being processed using the + // following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the `at_response_start` + // value of the current response. + float at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the rows in + // the current response. + float at_response_end = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleStatus { + // How much this connection is being throttled. + // 0 is no throttling, 100 is completely throttled. + int32 throttle_percent = 1; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. This value is recorded here, + // in addition to the row_count values in the output-specific messages in + // `rows`, so that code which needs to record progress through the stream can + // do so in an output format-independent way. + int64 row_count = 6; + + // Estimated stream statistics. + StreamStatus status = 2; + + // Throttling status. If unset, the latest response still describes + // the current throttling status. + ThrottleStatus throttle_status = 5; +} + +// Information needed to request additional streams for an established read +// session. +message BatchCreateReadSessionStreamsRequest { + // Required. Must be a non-expired session obtained from a call to + // CreateReadSession. Only the name field needs to be set. + ReadSession session = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Number of new streams requested. Must be positive. + // Number of added streams may be less than this, see CreateReadSessionRequest + // for more information. + int32 requested_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The response from `BatchCreateReadSessionStreams` returns the stream +// identifiers for the newly created streams. +message BatchCreateReadSessionStreamsResponse { + // Newly added streams. + repeated Stream streams = 1; +} + +// Request information for invoking `FinalizeStream`. +message FinalizeStreamRequest { + // Stream to finalize. + Stream stream = 2; +} + +// Request information for `SplitReadStream`. +message SplitReadStreamRequest { + // Stream to split. + Stream original_stream = 1; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to to a data storage boundary on the server side. + float fraction = 2; +} + +// Response from `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + Stream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + Stream remainder_stream = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto new file mode 100644 index 000000000..a55dc48eb --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto @@ -0,0 +1,43 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Table reference that includes just the 3 strings needed to identify a table. +message TableReference { + // The assigned project ID of the project. + string project_id = 1; + + // The ID of the dataset in the above project. + string dataset_id = 2; + + // The ID of the table in the above dataset. + string table_id = 3; +} + +// All fields in this message optional. +message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; +} diff --git a/typescript/test/unit/baselines.ts b/typescript/test/unit/baselines.ts index 8c1c94d4e..136d03a63 100644 --- a/typescript/test/unit/baselines.ts +++ b/typescript/test/unit/baselines.ts @@ -71,4 +71,11 @@ describe('Baseline tests', () => { protoPath: 'google/cloud/translate/v3beta1/*.proto', useCommonProto: true, }); + + runBaselineTest({ + baselineName: 'bigquery-storage', + outputDir: '.test-out-bigquery-storage', + protoPath: 'google/cloud/bigquery/storage/v1beta1/*.proto', + useCommonProto: false, + }); });