diff --git a/packages/google-cloud-videointelligence/.gitignore b/packages/google-cloud-videointelligence/.gitignore
index 38a485caf5f..5d32b23782f 100644
--- a/packages/google-cloud-videointelligence/.gitignore
+++ b/packages/google-cloud-videointelligence/.gitignore
@@ -1,13 +1,14 @@
**/*.log
**/node_modules
.coverage
+coverage
.nyc_output
docs/
out/
+build/
system-test/secrets.js
system-test/*key.json
*.lock
+.DS_Store
package-lock.json
-.vscode
__pycache__
-*.code-workspace
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/.jsdoc.js b/packages/google-cloud-videointelligence/.jsdoc.js
index 34cb55e9a47..38161bb575d 100644
--- a/packages/google-cloud-videointelligence/.jsdoc.js
+++ b/packages/google-cloud-videointelligence/.jsdoc.js
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
'use strict';
@@ -31,7 +34,8 @@ module.exports = {
source: {
excludePattern: '(^|\\/|\\\\)[._]',
include: [
- 'src'
+ 'build/src',
+ 'protos'
],
includePattern: '\\.js$'
},
@@ -42,7 +46,7 @@ module.exports = {
systemName: '@google-cloud/video-intelligence',
theme: 'lumen',
default: {
- "outputSourceFiles": false
+ outputSourceFiles: false
}
},
markdown: {
diff --git a/packages/google-cloud-videointelligence/package.json b/packages/google-cloud-videointelligence/package.json
index be0524a3073..ed93f1d410b 100644
--- a/packages/google-cloud-videointelligence/package.json
+++ b/packages/google-cloud-videointelligence/package.json
@@ -8,11 +8,10 @@
"node": ">=8.10.0"
},
"repository": "googleapis/nodejs-video-intelligence",
- "main": "src/index.js",
+ "main": "build/src/index.js",
"files": [
- "protos",
- "src",
- "AUTHORS",
+ "build/protos",
+ "build/src",
"LICENSE"
],
"keywords": [
@@ -30,31 +29,42 @@
],
"scripts": {
"docs": "jsdoc -c .jsdoc.js",
- "lint": "eslint '**/*.js'",
- "samples-test": "cd samples/ && npm link ../ && npm test && cd ../",
- "system-test": "mocha system-test/*.js --timeout 600000",
- "test": "c8 mocha",
+ "lint": "gts fix && eslint --fix samples/*.js",
+ "samples-test": "cd samples/ && npm link ../ && npm install && npm test && cd ../",
+ "system-test": "c8 mocha build/system-test --timeout 600000",
+ "test": "c8 mocha build/test",
"fix": "eslint --fix '**/*.js'",
"docs-test": "linkinator docs",
"predocs-test": "npm run docs",
- "prelint": "cd samples; npm link ../; npm i"
+ "prelint": "cd samples; npm link ../; npm i",
+ "clean": "gts clean",
+ "compile": "tsc -p . && cp -r protos build/",
+ "compile-protos": "compileProtos src",
+ "prepare": "npm run compile"
},
"dependencies": {
- "google-gax": "^1.7.5",
- "protobufjs": "^6.8.8"
+ "google-gax": "^1.14.2"
},
"devDependencies": {
- "codecov": "^3.4.0",
+ "@types/mocha": "^5.2.7",
+ "@types/node": "^12.12.29",
+ "c8": "^7.0.0",
"eslint": "^6.0.0",
"eslint-config-prettier": "^6.0.0",
"eslint-plugin-node": "^11.0.0",
"eslint-plugin-prettier": "^3.1.0",
- "jsdoc": "^3.6.2",
- "jsdoc-fresh": "^1.0.1",
- "jsdoc-region-tag": "^1.0.2",
- "linkinator": "^2.0.0",
- "mocha": "^7.0.0",
- "c8": "^7.0.0",
- "prettier": "^1.17.1"
+ "gts": "^1.1.2",
+ "jsdoc": "^3.6.3",
+ "jsdoc-fresh": "^1.0.2",
+ "jsdoc-region-tag": "^1.0.4",
+ "linkinator": "^2.0.3",
+ "mocha": "^7.1.0",
+ "null-loader": "^3.0.0",
+ "pack-n-play": "^1.0.0-2",
+ "prettier": "^1.17.1",
+ "ts-loader": "^6.2.1",
+ "typescript": "^3.8.3",
+ "webpack": "^4.42.0",
+ "webpack-cli": "^3.3.11"
}
}
diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto
index 6611e9eb2c6..ee7d618fbc2 100644
--- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto
+++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto
@@ -62,7 +62,7 @@ message AnnotateVideoRequest {
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](/storage/docs/reference-uris).
+ // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
// A video URI may include wildcards in `object-id`, and thus identify
// multiple videos. Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
@@ -85,7 +85,7 @@ message AnnotateVideoRequest {
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](/storage/docs/reference-uris).
+ // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Cloud region where annotation should take place. Supported cloud
diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto
index 8e80640e05c..690099751da 100644
--- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto
+++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto
@@ -64,7 +64,7 @@ message AnnotateVideoRequest {
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
- // more information, see [Request URIs](/storage/docs/reference-uris). A video
+ // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
// URI may include wildcards in `object-id`, and thus identify multiple
// videos. Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
@@ -87,7 +87,7 @@ message AnnotateVideoRequest {
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
- // more information, see [Request URIs](/storage/docs/reference-uris).
+ // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Cloud region where annotation should take place. Supported cloud
diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto
index 44d3ca64162..a54bddd07d0 100644
--- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto
+++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto
@@ -62,7 +62,7 @@ message AnnotateVideoRequest {
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](/storage/docs/reference-uris).
+ // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
// A video URI may include wildcards in `object-id`, and thus identify
// multiple videos. Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
@@ -85,7 +85,7 @@ message AnnotateVideoRequest {
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](/storage/docs/reference-uris).
+ // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Cloud region where annotation should take place. Supported cloud
diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto
index 044233b09d2..3b1d51cd775 100644
--- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto
+++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto
@@ -62,7 +62,7 @@ message AnnotateVideoRequest {
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](/storage/docs/reference-uris).
+ // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
// A video URI may include wildcards in `object-id`, and thus identify
// multiple videos. Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
@@ -85,7 +85,7 @@ message AnnotateVideoRequest {
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](/storage/docs/reference-uris).
+ // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Cloud region where annotation should take place. Supported cloud
diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
index 942f63be8a9..3d418e2ff64 100644
--- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
+++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
@@ -76,7 +76,7 @@ message AnnotateVideoRequest {
// supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
- // more information, see [Request URIs](/storage/docs/reference-uris). A video
+ // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
// URI may include wildcards in `object-id`, and thus identify multiple
// videos. Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
@@ -99,7 +99,7 @@ message AnnotateVideoRequest {
// URIs are supported, which must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
- // more information, see [Request URIs](/storage/docs/reference-uris).
+ // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Cloud region where annotation should take place. Supported cloud
diff --git a/packages/google-cloud-videointelligence/src/index.js b/packages/google-cloud-videointelligence/src/index.js
deleted file mode 100644
index 45ce8cf6828..00000000000
--- a/packages/google-cloud-videointelligence/src/index.js
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2017, Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @namespace google.protobuf
- */
-/**
- * @namespace google.rpc
- */
-/**
- * @namespace google.longrunning
- */
-/**
- * @namespace google.cloud.videointelligence
- */
-/**
- * @namespace google.cloud.videointelligence.v1
- */
-/**
- * @namespace google.cloud.videointelligence.v1beta2
- */
-/**
- * @namespace google.cloud.videointelligence.v1p1beta1
- */
-/**
- * @namespace google.cloud.videointelligence.v1p2beta1
- */
-/**
- * @namespace google.cloud.videointelligence.v1p3beta1
- */
-
-'use strict';
-
-// Import the clients for each version supported by this package.
-const gapic = Object.freeze({
- v1: require('./v1'),
- v1beta2: require('./v1beta2'),
- v1p1beta1: require('./v1p1beta1'),
- v1p2beta1: require('./v1p2beta1'),
- v1p3beta1: require('./v1p3beta1'),
-});
-
-/**
- * The `@google-cloud/video-intelligence` package has the following named exports:
- *
- * - `VideoIntelligenceServiceClient` - Reference to
- * {@link v1.VideoIntelligenceServiceClient}
- * - `v1` - This is used for selecting or pinning a
- * particular backend service version. It exports:
- * - `VideoIntelligenceServiceClient` - Reference to
- * {@link v1.VideoIntelligenceServiceClient}
- * - `v1beta2` - This is used for selecting or pinning a
- * particular backend service version. It exports:
- * - `VideoIntelligenceServiceClient` - Reference to
- * {@link v1beta2.VideoIntelligenceServiceClient}
- * - `v1p1beta1` - This is used for selecting or pinning a
- * particular backend service version. It exports:
- * - `VideoIntelligenceServiceClient` - Reference to
- * {@link v1p1beta1.VideoIntelligenceServiceClient}
- * - `v1p2beta1` - This is used for selecting or pinning a
- * particular backend service version. It exports:
- * - `VideoIntelligenceServiceClient` - Reference to
- * {@link v1p2beta1.VideoIntelligenceServiceClient}
- * - `v1p3beta1` - This is used for selecting or pinning a
- * particular backend service version. It exports:
- * - `VideoIntelligenceServiceClient` - Reference to
- * {@link v1p3beta1.VideoIntelligenceServiceClient}
- *
- * @module {object} @google-cloud/video-intelligence
- * @alias nodejs-video-intelligence
- *
- * @example
Install the client library with npm:
- * npm install --save @google-cloud/video-intelligence
- *
- * @example Import the client library:
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * @example Create a client that uses Application Default Credentials (ADC):
- * const client = new videoIntelligence.VideoIntelligenceServiceClient();
- *
- * @example Create a client with explicit credentials:
- * const client = new videoIntelligence.VideoIntelligenceServiceClient({
- * projectId: 'your-project-id',
- * keyFilename: '/path/to/keyfile.json',
- * });
- *
- * @example include:samples/quickstart.js
- * region_tag:video_quickstart
- * Full quickstart example:
- */
-
-/**
- * @type {object}
- * @property {constructor} VideoIntelligenceServiceClient
- * Reference to {@link v1.VideoIntelligenceServiceClient}
- */
-module.exports = gapic.v1;
-
-/**
- * @type {object}
- * @property {constructor} VideoIntelligenceServiceClient
- * Reference to {@link v1.VideoIntelligenceServiceClient}
- */
-module.exports.v1 = gapic.v1;
-
-/**
- * @type {object}
- * @property {constructor} VideoIntelligenceServiceClient
- * Reference to {@link v1beta2.VideoIntelligenceServiceClient}
- */
-module.exports.v1beta2 = gapic.v1beta2;
-
-/**
- * @type {object}
- * @property {constructor} VideoIntelligenceServiceClient
- * Reference to {@link v1p1beta1.VideoIntelligenceServiceClient}
- */
-module.exports.v1p1beta1 = gapic.v1p1beta1;
-
-/**
- * @type {object}
- * @property {constructor} VideoIntelligenceServiceClient
- * Reference to {@link v1p2beta1.VideoIntelligenceServiceClient}
- */
-module.exports.v1p2beta1 = gapic.v1p2beta1;
-
-/**
- * @type {object}
- * @property {constructor} VideoIntelligenceServiceClient
- * Reference to {@link v1p3beta1.VideoIntelligenceServiceClient}
- */
-module.exports.v1p3beta1 = gapic.v1p3beta1;
-
-// Alias `module.exports` as `module.exports.default`, for future-proofing.
-module.exports.default = Object.assign({}, module.exports);
diff --git a/packages/google-cloud-videointelligence/src/index.ts b/packages/google-cloud-videointelligence/src/index.ts
new file mode 100644
index 00000000000..5def8117af1
--- /dev/null
+++ b/packages/google-cloud-videointelligence/src/index.ts
@@ -0,0 +1,46 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import * as v1 from './v1';
+import * as v1beta2 from './v1beta2';
+import * as v1p1beta1 from './v1p1beta1';
+import * as v1p2beta1 from './v1p2beta1';
+import * as v1p3beta1 from './v1p3beta1';
+const VideoIntelligenceServiceClient = v1.VideoIntelligenceServiceClient;
+const StreamingVideoIntelligenceServiceClient =
+ v1p3beta1.StreamingVideoIntelligenceServiceClient;
+export {
+ v1,
+ v1beta2,
+ v1p1beta1,
+ v1p2beta1,
+ v1p3beta1,
+ VideoIntelligenceServiceClient,
+ StreamingVideoIntelligenceServiceClient,
+};
+// For compatibility with JavaScript libraries we need to provide this default export:
+// tslint:disable-next-line no-default-export
+export default {
+ v1,
+ v1beta2,
+ v1p1beta1,
+ v1p2beta1,
+ v1p3beta1,
+ VideoIntelligenceServiceClient,
+ StreamingVideoIntelligenceServiceClient,
+};
diff --git a/packages/google-cloud-videointelligence/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js
deleted file mode 100644
index e7ecaf471eb..00000000000
--- a/packages/google-cloud-videointelligence/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js
+++ /dev/null
@@ -1,1159 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * Video annotation request.
- *
- * @property {string} inputUri
- * Input video location. Currently, only
- * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- * A video URI may include wildcards in `object-id`, and thus identify
- * multiple videos. Supported wildcards: '*' to match 0 or more characters;
- * '?' to match 1 character. If unset, the input video should be embedded
- * in the request as `input_content`. If set, `input_content` should be unset.
- *
- * @property {Buffer} inputContent
- * The video data bytes.
- * If unset, the input video(s) should be specified via `input_uri`.
- * If set, `input_uri` should be unset.
- *
- * @property {number[]} features
- * Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1.Feature}
- *
- * @property {Object} videoContext
- * Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1.VideoContext}
- *
- * @property {string} outputUri
- * Optional. Location where the output (in JSON format) should be stored.
- * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
- * URIs are supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @property {string} locationId
- * Optional. Cloud region where annotation should take place. Supported cloud
- * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
- * is specified, a region will be determined based on video file location.
- *
- * @typedef AnnotateVideoRequest
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const AnnotateVideoRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video context and/or feature-specific parameters.
- *
- * @property {Object[]} segments
- * Video segments to annotate. The segments may overlap and are not required
- * to be contiguous or span the whole video. If unspecified, each video is
- * treated as a single segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @property {Object} labelDetectionConfig
- * Config for LABEL_DETECTION.
- *
- * This object should have the same structure as [LabelDetectionConfig]{@link google.cloud.videointelligence.v1.LabelDetectionConfig}
- *
- * @property {Object} shotChangeDetectionConfig
- * Config for SHOT_CHANGE_DETECTION.
- *
- * This object should have the same structure as [ShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1.ShotChangeDetectionConfig}
- *
- * @property {Object} explicitContentDetectionConfig
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1.ExplicitContentDetectionConfig}
- *
- * @property {Object} faceDetectionConfig
- * Config for FACE_DETECTION.
- *
- * This object should have the same structure as [FaceDetectionConfig]{@link google.cloud.videointelligence.v1.FaceDetectionConfig}
- *
- * @property {Object} speechTranscriptionConfig
- * Config for SPEECH_TRANSCRIPTION.
- *
- * This object should have the same structure as [SpeechTranscriptionConfig]{@link google.cloud.videointelligence.v1.SpeechTranscriptionConfig}
- *
- * @property {Object} textDetectionConfig
- * Config for TEXT_DETECTION.
- *
- * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1.TextDetectionConfig}
- *
- * @property {Object} objectTrackingConfig
- * Config for OBJECT_TRACKING.
- *
- * This object should have the same structure as [ObjectTrackingConfig]{@link google.cloud.videointelligence.v1.ObjectTrackingConfig}
- *
- * @typedef VideoContext
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const VideoContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for LABEL_DETECTION.
- *
- * @property {number} labelDetectionMode
- * What labels should be detected with LABEL_DETECTION, in addition to
- * video-level labels or segment-level labels.
- * If unspecified, defaults to `SHOT_MODE`.
- *
- * The number should be among the values of [LabelDetectionMode]{@link google.cloud.videointelligence.v1.LabelDetectionMode}
- *
- * @property {boolean} stationaryCamera
- * Whether the video has been shot from a stationary (i.e. non-moving) camera.
- * When set to true, might improve detection accuracy for moving objects.
- * Should be used with `SHOT_AND_FRAME_MODE` enabled.
- *
- * @property {string} model
- * Model to use for label detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @property {number} frameConfidenceThreshold
- * The confidence threshold we perform filtering on the labels from
- * frame-level detection. If not set, it is set to 0.4 by default. The valid
- * range for this threshold is [0.1, 0.9]. Any value set outside of this
- * range will be clipped.
- * Note: for best results please follow the default threshold. We will update
- * the default threshold everytime when we release a new model.
- *
- * @property {number} videoConfidenceThreshold
- * The confidence threshold we perform filtering on the labels from
- * video-level and shot-level detections. If not set, it is set to 0.3 by
- * default. The valid range for this threshold is [0.1, 0.9]. Any value set
- * outside of this range will be clipped.
- * Note: for best results please follow the default threshold. We will update
- * the default threshold everytime when we release a new model.
- *
- * @typedef LabelDetectionConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const LabelDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SHOT_CHANGE_DETECTION.
- *
- * @property {string} model
- * Model to use for shot change detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ShotChangeDetectionConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ShotChangeDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for OBJECT_TRACKING.
- *
- * @property {string} model
- * Model to use for object tracking.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ObjectTrackingConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ObjectTrackingConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for FACE_DETECTION.
- *
- * @property {string} model
- * Model to use for face detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @property {boolean} includeBoundingBoxes
- * Whether bounding boxes be included in the face annotation output.
- *
- * @typedef FaceDetectionConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.FaceDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const FaceDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * @property {string} model
- * Model to use for explicit content detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ExplicitContentDetectionConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ExplicitContentDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for TEXT_DETECTION.
- *
- * @property {string[]} languageHints
- * Language hint can be specified if the language to be detected is known a
- * priori. It can increase the accuracy of the detection. Language hint must
- * be language code in BCP-47 format.
- *
- * Automatic language detection is performed if no hint is provided.
- *
- * @property {string} model
- * Model to use for text detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef TextDetectionConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.TextDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const TextDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment.
- *
- * @property {Object} startTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the start of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the end of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef VideoSegment
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const VideoSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for label detection.
- *
- * @property {Object} segment
- * Video segment where a label was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelSegment
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const LabelSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for label detection.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelFrame
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const LabelFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Detected entity from video analysis.
- *
- * @property {string} entityId
- * Opaque entity ID. Some IDs may be available in
- * [Google Knowledge Graph Search
- * API](https://developers.google.com/knowledge-graph/).
- *
- * @property {string} description
- * Textual description, e.g. `Fixed-gear bicycle`.
- *
- * @property {string} languageCode
- * Language code for `description` in BCP-47 format.
- *
- * @typedef Entity
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const Entity = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Label annotation.
- *
- * @property {Object} entity
- * Detected entity.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1.Entity}
- *
- * @property {Object[]} categoryEntities
- * Common categories for the detected entity.
- * E.g. when the label is `Terrier` the category is likely `dog`. And in some
- * cases there might be more than one categories e.g. `Terrier` could also be
- * a `pet`.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1.Entity}
- *
- * @property {Object[]} segments
- * All video segments where a label was detected.
- *
- * This object should have the same structure as [LabelSegment]{@link google.cloud.videointelligence.v1.LabelSegment}
- *
- * @property {Object[]} frames
- * All video frames where a label was detected.
- *
- * This object should have the same structure as [LabelFrame]{@link google.cloud.videointelligence.v1.LabelFrame}
- *
- * @typedef LabelAnnotation
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const LabelAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for explicit content.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} pornographyLikelihood
- * Likelihood of the pornography content..
- *
- * The number should be among the values of [Likelihood]{@link google.cloud.videointelligence.v1.Likelihood}
- *
- * @typedef ExplicitContentFrame
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ExplicitContentFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Explicit content annotation (based on per-frame visual signals only).
- * If no explicit content has been detected in a frame, no annotations are
- * present for that frame.
- *
- * @property {Object[]} frames
- * All video frames where explicit content was detected.
- *
- * This object should have the same structure as [ExplicitContentFrame]{@link google.cloud.videointelligence.v1.ExplicitContentFrame}
- *
- * @typedef ExplicitContentAnnotation
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ExplicitContentAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding box.
- * The normalized vertex coordinates are relative to the original image.
- * Range: [0, 1].
- *
- * @property {number} left
- * Left X coordinate.
- *
- * @property {number} top
- * Top Y coordinate.
- *
- * @property {number} right
- * Right X coordinate.
- *
- * @property {number} bottom
- * Bottom Y coordinate.
- *
- * @typedef NormalizedBoundingBox
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.NormalizedBoundingBox definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const NormalizedBoundingBox = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for face detection.
- *
- * @property {Object} segment
- * Video segment where a face was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @typedef FaceSegment
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.FaceSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const FaceSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for face detection.
- *
- * @property {Object[]} normalizedBoundingBoxes
- * Normalized Bounding boxes in a frame.
- * There can be more than one boxes if the same face is detected in multiple
- * locations within the current frame.
- *
- * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1.NormalizedBoundingBox}
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef FaceFrame
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.FaceFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const FaceFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Face annotation.
- *
- * @property {Buffer} thumbnail
- * Thumbnail of a representative face view (in JPEG format).
- *
- * @property {Object[]} segments
- * All video segments where a face was detected.
- *
- * This object should have the same structure as [FaceSegment]{@link google.cloud.videointelligence.v1.FaceSegment}
- *
- * @property {Object[]} frames
- * All video frames where a face was detected.
- *
- * This object should have the same structure as [FaceFrame]{@link google.cloud.videointelligence.v1.FaceFrame}
- *
- * @typedef FaceAnnotation
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const FaceAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation results for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {Object} segment
- * Video segment on which the annotation is run.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @property {Object[]} segmentLabelAnnotations
- * Topical label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1.LabelAnnotation}
- *
- * @property {Object[]} segmentPresenceLabelAnnotations
- * Presence label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label. Compared to the
- * existing topical `segment_label_annotations`, this field presents more
- * fine-grained, segment-level labels detected in video content and is made
- * available only when the client sets `LabelDetectionConfig.model` to
- * "builtin/latest" in the request.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1.LabelAnnotation}
- *
- * @property {Object[]} shotLabelAnnotations
- * Topical label annotations on shot level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1.LabelAnnotation}
- *
- * @property {Object[]} shotPresenceLabelAnnotations
- * Presence label annotations on shot level. There is exactly one element for
- * each unique label. Compared to the existing topical
- * `shot_label_annotations`, this field presents more fine-grained, shot-level
- * labels detected in video content and is made available only when the client
- * sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1.LabelAnnotation}
- *
- * @property {Object[]} frameLabelAnnotations
- * Label annotations on frame level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1.LabelAnnotation}
- *
- * @property {Object[]} faceAnnotations
- * Face annotations. There is exactly one element for each unique face.
- *
- * This object should have the same structure as [FaceAnnotation]{@link google.cloud.videointelligence.v1.FaceAnnotation}
- *
- * @property {Object[]} shotAnnotations
- * Shot annotations. Each shot is represented as a video segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @property {Object} explicitAnnotation
- * Explicit content annotation.
- *
- * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1.ExplicitContentAnnotation}
- *
- * @property {Object[]} speechTranscriptions
- * Speech transcription.
- *
- * This object should have the same structure as [SpeechTranscription]{@link google.cloud.videointelligence.v1.SpeechTranscription}
- *
- * @property {Object[]} textAnnotations
- * OCR text detection and tracking.
- * Annotations for list of detected text snippets. Each will have list of
- * frame information associated with it.
- *
- * This object should have the same structure as [TextAnnotation]{@link google.cloud.videointelligence.v1.TextAnnotation}
- *
- * @property {Object[]} objectAnnotations
- * Annotations for list of objects detected and tracked in video.
- *
- * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1.ObjectTrackingAnnotation}
- *
- * @property {Object} error
- * If set, indicates an error. Note that for a single `AnnotateVideoRequest`
- * some videos may succeed and some may fail.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @typedef VideoAnnotationResults
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const VideoAnnotationResults = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation response. Included in the `response`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationResults
- * Annotation results for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationResults]{@link google.cloud.videointelligence.v1.VideoAnnotationResults}
- *
- * @typedef AnnotateVideoResponse
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const AnnotateVideoResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation progress for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {number} progressPercent
- * Approximate percentage processed thus far. Guaranteed to be
- * 100 when fully processed.
- *
- * @property {Object} startTime
- * Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} updateTime
- * Time of the most recent update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {number} feature
- * Specifies which feature is being tracked if the request contains more than
- * one features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1.Feature}
- *
- * @property {Object} segment
- * Specifies which segment is being tracked if the request contains more than
- * one segments.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @typedef VideoAnnotationProgress
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const VideoAnnotationProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation progress. Included in the `metadata`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationProgress
- * Progress metadata for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationProgress]{@link google.cloud.videointelligence.v1.VideoAnnotationProgress}
- *
- * @typedef AnnotateVideoProgress
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const AnnotateVideoProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SPEECH_TRANSCRIPTION.
- *
- * @property {string} languageCode
- * Required. *Required* The language of the supplied audio as a
- * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- * Example: "en-US".
- * See [Language Support](https://cloud.google.com/speech/docs/languages)
- * for a list of the currently supported language codes.
- *
- * @property {number} maxAlternatives
- * Optional. Maximum number of recognition hypotheses to be returned.
- * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- * within each `SpeechTranscription`. The server may return fewer than
- * `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
- * return a maximum of one. If omitted, will return a maximum of one.
- *
- * @property {boolean} filterProfanity
- * Optional. If set to `true`, the server will attempt to filter out
- * profanities, replacing all but the initial character in each filtered word
- * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- * won't be filtered out.
- *
- * @property {Object[]} speechContexts
- * Optional. A means to provide context to assist the speech recognition.
- *
- * This object should have the same structure as [SpeechContext]{@link google.cloud.videointelligence.v1.SpeechContext}
- *
- * @property {boolean} enableAutomaticPunctuation
- * Optional. If 'true', adds punctuation to recognition result hypotheses.
- * This feature is only available in select languages. Setting this for
- * requests in other languages has no effect at all. The default 'false' value
- * does not add punctuation to result hypotheses. NOTE: "This is currently
- * offered as an experimental service, complimentary to all users. In the
- * future this may be exclusively available as a premium feature."
- *
- * @property {number[]} audioTracks
- * Optional. For file formats, such as MXF or MKV, supporting multiple audio
- * tracks, specify up to two tracks. Default: track 0.
- *
- * @property {boolean} enableSpeakerDiarization
- * Optional. If 'true', enables speaker detection for each recognized word in
- * the top alternative of the recognition result using a speaker_tag provided
- * in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- *
- * @property {number} diarizationSpeakerCount
- * Optional. If set, specifies the estimated number of speakers in the conversation.
- * If not set, defaults to '2'.
- * Ignored unless enable_speaker_diarization is set to true.
- *
- * @property {boolean} enableWordConfidence
- * Optional. If `true`, the top result includes a list of words and the
- * confidence for those words. If `false`, no word-level confidence
- * information is returned. The default is `false`.
- *
- * @typedef SpeechTranscriptionConfig
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.SpeechTranscriptionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const SpeechTranscriptionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides "hints" to the speech recognizer to favor specific words and phrases
- * in the results.
- *
- * @property {string[]} phrases
- * Optional. A list of strings containing words and phrases "hints" so that
- * the speech recognition is more likely to recognize them. This can be used
- * to improve the accuracy for specific words and phrases, for example, if
- * specific commands are typically spoken by the user. This can also be used
- * to add additional words to the vocabulary of the recognizer. See
- * [usage limits](https://cloud.google.com/speech/limits#content).
- *
- * @typedef SpeechContext
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const SpeechContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A speech recognition result corresponding to a portion of the audio.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the maximum specified
- * in `max_alternatives`). These alternatives are ordered in terms of
- * accuracy, with the top (first) alternative being the most probable, as
- * ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.videointelligence.v1.SpeechRecognitionAlternative}
- *
- * @property {string} languageCode
- * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
- * the language in this result. This language code was detected to have the
- * most likelihood of being spoken in the audio.
- *
- * @typedef SpeechTranscription
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.SpeechTranscription definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const SpeechTranscription = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Alternative hypotheses (a.k.a. n-best list).
- *
- * @property {string} transcript
- * Transcript text representing the words that the user spoke.
- *
- * @property {number} confidence
- * Output only. The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {Object[]} words
- * Output only. A list of word-specific information for each recognized word.
- * Note: When `enable_speaker_diarization` is true, you will see all the words
- * from the beginning of the audio.
- *
- * This object should have the same structure as [WordInfo]{@link google.cloud.videointelligence.v1.WordInfo}
- *
- * @typedef SpeechRecognitionAlternative
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const SpeechRecognitionAlternative = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Word-specific information for recognized words. Word information is only
- * included in the response when certain request parameters are set, such
- * as `enable_word_time_offsets`.
- *
- * @property {Object} startTime
- * Time offset relative to the beginning of the audio, and
- * corresponding to the start of the spoken word. This field is only set if
- * `enable_word_time_offsets=true` and only in the top hypothesis. This is an
- * experimental feature and the accuracy of the time offset can vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTime
- * Time offset relative to the beginning of the audio, and
- * corresponding to the end of the spoken word. This field is only set if
- * `enable_word_time_offsets=true` and only in the top hypothesis. This is an
- * experimental feature and the accuracy of the time offset can vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {string} word
- * The word corresponding to this set of information.
- *
- * @property {number} confidence
- * Output only. The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {number} speakerTag
- * Output only. A distinct integer value is assigned for every speaker within
- * the audio. This field specifies which one of those speakers was detected to
- * have spoken this word. Value ranges from 1 up to diarization_speaker_count,
- * and is only set if speaker diarization is enabled.
- *
- * @typedef WordInfo
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.WordInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const WordInfo = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A vertex represents a 2D point in the image.
- * NOTE: the normalized vertex coordinates are relative to the original image
- * and range from 0 to 1.
- *
- * @property {number} x
- * X coordinate.
- *
- * @property {number} y
- * Y coordinate.
- *
- * @typedef NormalizedVertex
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.NormalizedVertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const NormalizedVertex = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding polygon for text (that might not be aligned with axis).
- * Contains list of the corner points in clockwise order starting from
- * top-left corner. For example, for a rectangular bounding box:
- * When the text is horizontal it might look like:
- * 0----1
- * | |
- * 3----2
- *
- * When it's clockwise rotated 180 degrees around the top-left corner it
- * becomes:
- * 2----3
- * | |
- * 1----0
- *
- * and the vertex order will still be (0, 1, 2, 3). Note that values can be less
- * than 0, or greater than 1 due to trignometric calculations for location of
- * the box.
- *
- * @property {Object[]} vertices
- * Normalized vertices of the bounding polygon.
- *
- * This object should have the same structure as [NormalizedVertex]{@link google.cloud.videointelligence.v1.NormalizedVertex}
- *
- * @typedef NormalizedBoundingPoly
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.NormalizedBoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const NormalizedBoundingPoly = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for text detection.
- *
- * @property {Object} segment
- * Video segment where a text snippet was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence for the track of detected text. It is calculated as the highest
- * over all frames where OCR detected text appears.
- *
- * @property {Object[]} frames
- * Information related to the frames where OCR detected text appears.
- *
- * This object should have the same structure as [TextFrame]{@link google.cloud.videointelligence.v1.TextFrame}
- *
- * @typedef TextSegment
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.TextSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const TextSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for text annotation (OCR).
- * Contains information regarding timestamp and bounding box locations for the
- * frames containing detected OCR text snippets.
- *
- * @property {Object} rotatedBoundingBox
- * Bounding polygon of the detected text for this frame.
- *
- * This object should have the same structure as [NormalizedBoundingPoly]{@link google.cloud.videointelligence.v1.NormalizedBoundingPoly}
- *
- * @property {Object} timeOffset
- * Timestamp of this frame.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef TextFrame
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.TextFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const TextFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotations related to one detected OCR text snippet. This will contain the
- * corresponding text, confidence value, and frame level information for each
- * detection.
- *
- * @property {string} text
- * The detected text.
- *
- * @property {Object[]} segments
- * All video segments where OCR detected text appears.
- *
- * This object should have the same structure as [TextSegment]{@link google.cloud.videointelligence.v1.TextSegment}
- *
- * @typedef TextAnnotation
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const TextAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotations for object detection and tracking. This field
- * stores per frame location, time offset, and confidence.
- *
- * @property {Object} normalizedBoundingBox
- * The normalized bounding box location of this object track for the frame.
- *
- * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1.NormalizedBoundingBox}
- *
- * @property {Object} timeOffset
- * The timestamp of the frame in microseconds.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef ObjectTrackingFrame
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ObjectTrackingFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ObjectTrackingFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotations corresponding to one tracked object.
- *
- * @property {Object} segment
- * Non-streaming batch mode ONLY.
- * Each object track corresponds to one video segment where it appears.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1.VideoSegment}
- *
- * @property {number} trackId
- * Streaming mode ONLY.
- * In streaming mode, we do not know the end time of a tracked object
- * before it is completed. Hence, there is no VideoSegment info returned.
- * Instead, we provide a unique identifiable integer track_id so that
- * the customers can correlate the results of the ongoing
- * ObjectTrackAnnotation of the same track_id over time.
- *
- * @property {Object} entity
- * Entity to specify the object category that this track is labeled as.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1.Entity}
- *
- * @property {number} confidence
- * Object category's labeling confidence of this track.
- *
- * @property {Object[]} frames
- * Information corresponding to all frames where this object track appears.
- * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
- * messages in frames.
- * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
- *
- * This object should have the same structure as [ObjectTrackingFrame]{@link google.cloud.videointelligence.v1.ObjectTrackingFrame}
- *
- * @typedef ObjectTrackingAnnotation
- * @memberof google.cloud.videointelligence.v1
- * @see [google.cloud.videointelligence.v1.ObjectTrackingAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto}
- */
-const ObjectTrackingAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation feature.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1
- */
-const Feature = {
-
- /**
- * Unspecified.
- */
- FEATURE_UNSPECIFIED: 0,
-
- /**
- * Label detection. Detect objects, such as dog or flower.
- */
- LABEL_DETECTION: 1,
-
- /**
- * Shot change detection.
- */
- SHOT_CHANGE_DETECTION: 2,
-
- /**
- * Explicit content detection.
- */
- EXPLICIT_CONTENT_DETECTION: 3,
-
- /**
- * Human face detection and tracking.
- */
- FACE_DETECTION: 4,
-
- /**
- * Speech transcription.
- */
- SPEECH_TRANSCRIPTION: 6,
-
- /**
- * OCR text detection and tracking.
- */
- TEXT_DETECTION: 7,
-
- /**
- * Object detection and tracking.
- */
- OBJECT_TRACKING: 9
-};
-
-/**
- * Label detection mode.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1
- */
-const LabelDetectionMode = {
-
- /**
- * Unspecified.
- */
- LABEL_DETECTION_MODE_UNSPECIFIED: 0,
-
- /**
- * Detect shot-level labels.
- */
- SHOT_MODE: 1,
-
- /**
- * Detect frame-level labels.
- */
- FRAME_MODE: 2,
-
- /**
- * Detect both shot-level and frame-level labels.
- */
- SHOT_AND_FRAME_MODE: 3
-};
-
-/**
- * Bucketized representation of likelihood.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1
- */
-const Likelihood = {
-
- /**
- * Unspecified likelihood.
- */
- LIKELIHOOD_UNSPECIFIED: 0,
-
- /**
- * Very unlikely.
- */
- VERY_UNLIKELY: 1,
-
- /**
- * Unlikely.
- */
- UNLIKELY: 2,
-
- /**
- * Possible.
- */
- POSSIBLE: 3,
-
- /**
- * Likely.
- */
- LIKELY: 4,
-
- /**
- * Very likely.
- */
- VERY_LIKELY: 5
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1/doc/google/longrunning/doc_operations.js b/packages/google-cloud-videointelligence/src/v1/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 099e418d620..00000000000
--- a/packages/google-cloud-videointelligence/src/v1/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should be a resource name ending with `operations/{unique_id}`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1/doc/google/protobuf/doc_any.js b/packages/google-cloud-videointelligence/src/v1/doc/google/protobuf/doc_any.js
deleted file mode 100644
index 813682aa336..00000000000
--- a/packages/google-cloud-videointelligence/src/v1/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1/doc/google/protobuf/doc_duration.js b/packages/google-cloud-videointelligence/src/v1/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index bd4b4ee6067..00000000000
--- a/packages/google-cloud-videointelligence/src/v1/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1/doc/google/rpc/doc_status.js b/packages/google-cloud-videointelligence/src/v1/doc/google/rpc/doc_status.js
deleted file mode 100644
index 750e0af7689..00000000000
--- a/packages/google-cloud-videointelligence/src/v1/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). Each `Status` message contains
- * three pieces of data: error code, error message, and error details.
- *
- * You can find out more about this error model and how to work with it in the
- * [API Design Guide](https://cloud.google.com/apis/design/errors).
- *
- * @property {number} code
- * The status code, which should be an enum value of google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/browser.js b/packages/google-cloud-videointelligence/src/v1/index.ts
similarity index 64%
rename from packages/google-cloud-videointelligence/src/browser.js
rename to packages/google-cloud-videointelligence/src/v1/index.ts
index 68dc62d25d6..b4969ebdd1f 100644
--- a/packages/google-cloud-videointelligence/src/browser.js
+++ b/packages/google-cloud-videointelligence/src/v1/index.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,11 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-// Set a flag that we are running in a browser bundle.
-global.isBrowser = true;
-
-// Re-export all exports from ./index.js.
-module.exports = require('./index');
+export {VideoIntelligenceServiceClient} from './video_intelligence_service_client';
diff --git a/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client.ts
similarity index 54%
rename from packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client.js
rename to packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client.ts
index ac23daa002f..48b8adde745 100644
--- a/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client.js
+++ b/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,22 +11,40 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
-const gapicConfig = require('./video_intelligence_service_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './video_intelligence_service_client_config.json';
-const VERSION = require('../../package.json').version;
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Video Intelligence API.
- *
+ * Service that implements Google Cloud Video Intelligence API.
* @class
* @memberof v1
*/
-class VideoIntelligenceServiceClient {
+export class VideoIntelligenceServiceClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+ operationsClient: gax.OperationsClient;
+ videoIntelligenceServiceStub: Promise<{[name: string]: Function}>;
+
/**
* Construct an instance of VideoIntelligenceServiceClient.
*
@@ -54,58 +72,57 @@ class VideoIntelligenceServiceClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this
+ .constructor as typeof VideoIntelligenceServiceClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this
+ .constructor as typeof VideoIntelligenceServiceClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -121,24 +138,25 @@ class VideoIntelligenceServiceClient {
opts.fallback ? require('../../protos/protos.json') : nodejsProtoPath
);
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ this.operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const annotateVideoResponse = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1.AnnotateVideoResponse'
- );
+ '.google.cloud.videointelligence.v1.AnnotateVideoResponse'
+ ) as gax.protobuf.Type;
const annotateVideoMetadata = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1.AnnotateVideoProgress'
- );
+ '.google.cloud.videointelligence.v1.AnnotateVideoProgress'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
annotateVideo: new gaxModule.LongrunningDescriptor(
@@ -151,8 +169,8 @@ class VideoIntelligenceServiceClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.videointelligence.v1.VideoIntelligenceService',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -163,32 +181,49 @@ class VideoIntelligenceServiceClient {
// Put together the "service stub" for
// google.cloud.videointelligence.v1.VideoIntelligenceService.
- const videoIntelligenceServiceStub = gaxGrpc.createStub(
+ this.videoIntelligenceServiceStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService(
+ ? (protos as protobuf.Root).lookupService(
'google.cloud.videointelligence.v1.VideoIntelligenceService'
)
- : protos.google.cloud.videointelligence.v1.VideoIntelligenceService,
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.videointelligence.v1
+ .VideoIntelligenceService,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
const videoIntelligenceServiceStubMethods = ['annotateVideo'];
+
for (const methodName of videoIntelligenceServiceStubMethods) {
- const innerCallPromise = videoIntelligenceServiceStub.then(
- stub => (...args) => {
+ const innerCallPromise = this.videoIntelligenceServiceStub.then(
+ stub => (...args: Array<{}>) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.longrunning[methodName]
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
+ this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -222,19 +257,52 @@ class VideoIntelligenceServiceClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous video annotation. Progress and results can be
* retrieved through the `google.longrunning.Operations` interface.
@@ -243,141 +311,96 @@ class VideoIntelligenceServiceClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {number[]} request.features
- * Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1.Feature}
- * @param {string} [request.inputUri]
+ * @param {string} request.inputUri
* Input video location. Currently, only
* [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
* supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* A video URI may include wildcards in `object-id`, and thus identify
* multiple videos. Supported wildcards: '*' to match 0 or more characters;
* '?' to match 1 character. If unset, the input video should be embedded
* in the request as `input_content`. If set, `input_content` should be unset.
- * @param {Buffer} [request.inputContent]
+ * @param {Buffer} request.inputContent
* The video data bytes.
* If unset, the input video(s) should be specified via `input_uri`.
* If set, `input_uri` should be unset.
- * @param {Object} [request.videoContext]
+ * @param {number[]} request.features
+ * Required. Requested video annotation features.
+ * @param {google.cloud.videointelligence.v1.VideoContext} request.videoContext
* Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1.VideoContext}
* @param {string} [request.outputUri]
* Optional. Location where the output (in JSON format) should be stored.
* Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
* URIs are supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* @param {string} [request.locationId]
* Optional. Cloud region where annotation should take place. Supported cloud
* regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
* is specified, a region will be determined based on video file location.
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * const client = new videoIntelligence.v1.VideoIntelligenceServiceClient({
- * // optional auth parameters.
- * });
- *
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const request = {
- * features: features,
- * inputUri: inputUri,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const request = {
- * features: features,
- * inputUri: inputUri,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const request = {
- * features: features,
- * inputUri: inputUri,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.annotateVideo(request);
- *
- * const [response] = await operation.promise();
*/
- annotateVideo(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.annotateVideo(request, options, callback);
}
-}
-module.exports = VideoIntelligenceServiceClient;
+ /**
+ * Terminate the GRPC channel and close the client.
+ *
+ * The client will no longer be usable and all future behavior is undefined.
+ */
+ close(): Promise {
+ if (!this._terminated) {
+ return this.videoIntelligenceServiceStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
+ }
+}
diff --git a/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client_config.json
index 2a907f4060f..49091879c8e 100644
--- a/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client_config.json
+++ b/packages/google-cloud-videointelligence/src/v1/video_intelligence_service_client_config.json
@@ -2,28 +2,37 @@
"interfaces": {
"google.cloud.videointelligence.v1.VideoIntelligenceService": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
- "initial_rpc_timeout_millis": 20000,
- "rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 20000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
+ },
+ "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": {
+ "initial_retry_delay_millis": 1000,
+ "retry_delay_multiplier": 2.5,
+ "max_retry_delay_millis": 120000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000
}
},
"methods": {
"AnnotateVideo": {
- "timeout_millis": 60000,
- "retry_codes_name": "non_idempotent",
- "retry_params_name": "default"
+ "timeout_millis": 600000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3"
}
}
}
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js
deleted file mode 100644
index 4019788cd52..00000000000
--- a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js
+++ /dev/null
@@ -1,652 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * Video annotation request.
- *
- * @property {string} inputUri
- * Input video location. Currently, only
- * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video
- * URI may include wildcards in `object-id`, and thus identify multiple
- * videos. Supported wildcards: '*' to match 0 or more characters;
- * '?' to match 1 character. If unset, the input video should be embedded
- * in the request as `input_content`. If set, `input_content` should be unset.
- *
- * @property {Buffer} inputContent
- * The video data bytes.
- * If unset, the input video(s) should be specified via `input_uri`.
- * If set, `input_uri` should be unset.
- *
- * @property {number[]} features
- * Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1beta2.Feature}
- *
- * @property {Object} videoContext
- * Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1beta2.VideoContext}
- *
- * @property {string} outputUri
- * Optional. Location where the output (in JSON format) should be stored.
- * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
- * URIs are supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @property {string} locationId
- * Optional. Cloud region where annotation should take place. Supported cloud
- * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
- * is specified, a region will be determined based on video file location.
- *
- * @typedef AnnotateVideoRequest
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const AnnotateVideoRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video context and/or feature-specific parameters.
- *
- * @property {Object[]} segments
- * Video segments to annotate. The segments may overlap and are not required
- * to be contiguous or span the whole video. If unspecified, each video is
- * treated as a single segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1beta2.VideoSegment}
- *
- * @property {Object} labelDetectionConfig
- * Config for LABEL_DETECTION.
- *
- * This object should have the same structure as [LabelDetectionConfig]{@link google.cloud.videointelligence.v1beta2.LabelDetectionConfig}
- *
- * @property {Object} shotChangeDetectionConfig
- * Config for SHOT_CHANGE_DETECTION.
- *
- * This object should have the same structure as [ShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig}
- *
- * @property {Object} explicitContentDetectionConfig
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig}
- *
- * @property {Object} faceDetectionConfig
- * Config for FACE_DETECTION.
- *
- * This object should have the same structure as [FaceDetectionConfig]{@link google.cloud.videointelligence.v1beta2.FaceDetectionConfig}
- *
- * @typedef VideoContext
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const VideoContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for LABEL_DETECTION.
- *
- * @property {number} labelDetectionMode
- * What labels should be detected with LABEL_DETECTION, in addition to
- * video-level labels or segment-level labels.
- * If unspecified, defaults to `SHOT_MODE`.
- *
- * The number should be among the values of [LabelDetectionMode]{@link google.cloud.videointelligence.v1beta2.LabelDetectionMode}
- *
- * @property {boolean} stationaryCamera
- * Whether the video has been shot from a stationary (i.e. non-moving) camera.
- * When set to true, might improve detection accuracy for moving objects.
- * Should be used with `SHOT_AND_FRAME_MODE` enabled.
- *
- * @property {string} model
- * Model to use for label detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef LabelDetectionConfig
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const LabelDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SHOT_CHANGE_DETECTION.
- *
- * @property {string} model
- * Model to use for shot change detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ShotChangeDetectionConfig
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const ShotChangeDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * @property {string} model
- * Model to use for explicit content detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ExplicitContentDetectionConfig
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const ExplicitContentDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for FACE_DETECTION.
- *
- * @property {string} model
- * Model to use for face detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @property {boolean} includeBoundingBoxes
- * Whether bounding boxes be included in the face annotation output.
- *
- * @typedef FaceDetectionConfig
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.FaceDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const FaceDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment.
- *
- * @property {Object} startTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the start of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the end of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef VideoSegment
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const VideoSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for label detection.
- *
- * @property {Object} segment
- * Video segment where a label was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1beta2.VideoSegment}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelSegment
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const LabelSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for label detection.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelFrame
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const LabelFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Detected entity from video analysis.
- *
- * @property {string} entityId
- * Opaque entity ID. Some IDs may be available in
- * [Google Knowledge Graph Search
- * API](https://developers.google.com/knowledge-graph/).
- *
- * @property {string} description
- * Textual description, e.g. `Fixed-gear bicycle`.
- *
- * @property {string} languageCode
- * Language code for `description` in BCP-47 format.
- *
- * @typedef Entity
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const Entity = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Label annotation.
- *
- * @property {Object} entity
- * Detected entity.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1beta2.Entity}
- *
- * @property {Object[]} categoryEntities
- * Common categories for the detected entity.
- * E.g. when the label is `Terrier` the category is likely `dog`. And in some
- * cases there might be more than one categories e.g. `Terrier` could also be
- * a `pet`.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1beta2.Entity}
- *
- * @property {Object[]} segments
- * All video segments where a label was detected.
- *
- * This object should have the same structure as [LabelSegment]{@link google.cloud.videointelligence.v1beta2.LabelSegment}
- *
- * @property {Object[]} frames
- * All video frames where a label was detected.
- *
- * This object should have the same structure as [LabelFrame]{@link google.cloud.videointelligence.v1beta2.LabelFrame}
- *
- * @typedef LabelAnnotation
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const LabelAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for explicit content.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} pornographyLikelihood
- * Likelihood of the pornography content..
- *
- * The number should be among the values of [Likelihood]{@link google.cloud.videointelligence.v1beta2.Likelihood}
- *
- * @typedef ExplicitContentFrame
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const ExplicitContentFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Explicit content annotation (based on per-frame visual signals only).
- * If no explicit content has been detected in a frame, no annotations are
- * present for that frame.
- *
- * @property {Object[]} frames
- * All video frames where explicit content was detected.
- *
- * This object should have the same structure as [ExplicitContentFrame]{@link google.cloud.videointelligence.v1beta2.ExplicitContentFrame}
- *
- * @typedef ExplicitContentAnnotation
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const ExplicitContentAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding box.
- * The normalized vertex coordinates are relative to the original image.
- * Range: [0, 1].
- *
- * @property {number} left
- * Left X coordinate.
- *
- * @property {number} top
- * Top Y coordinate.
- *
- * @property {number} right
- * Right X coordinate.
- *
- * @property {number} bottom
- * Bottom Y coordinate.
- *
- * @typedef NormalizedBoundingBox
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.NormalizedBoundingBox definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const NormalizedBoundingBox = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for face detection.
- *
- * @property {Object} segment
- * Video segment where a face was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1beta2.VideoSegment}
- *
- * @typedef FaceSegment
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.FaceSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const FaceSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for face detection.
- *
- * @property {Object[]} normalizedBoundingBoxes
- * Normalized Bounding boxes in a frame.
- * There can be more than one boxes if the same face is detected in multiple
- * locations within the current frame.
- *
- * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1beta2.NormalizedBoundingBox}
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef FaceFrame
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.FaceFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const FaceFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Face annotation.
- *
- * @property {Buffer} thumbnail
- * Thumbnail of a representative face view (in JPEG format).
- *
- * @property {Object[]} segments
- * All video segments where a face was detected.
- *
- * This object should have the same structure as [FaceSegment]{@link google.cloud.videointelligence.v1beta2.FaceSegment}
- *
- * @property {Object[]} frames
- * All video frames where a face was detected.
- *
- * This object should have the same structure as [FaceFrame]{@link google.cloud.videointelligence.v1beta2.FaceFrame}
- *
- * @typedef FaceAnnotation
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const FaceAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation results for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {Object[]} segmentLabelAnnotations
- * Label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1beta2.LabelAnnotation}
- *
- * @property {Object[]} shotLabelAnnotations
- * Label annotations on shot level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1beta2.LabelAnnotation}
- *
- * @property {Object[]} frameLabelAnnotations
- * Label annotations on frame level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1beta2.LabelAnnotation}
- *
- * @property {Object[]} faceAnnotations
- * Face annotations. There is exactly one element for each unique face.
- *
- * This object should have the same structure as [FaceAnnotation]{@link google.cloud.videointelligence.v1beta2.FaceAnnotation}
- *
- * @property {Object[]} shotAnnotations
- * Shot annotations. Each shot is represented as a video segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1beta2.VideoSegment}
- *
- * @property {Object} explicitAnnotation
- * Explicit content annotation.
- *
- * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation}
- *
- * @property {Object} error
- * If set, indicates an error. Note that for a single `AnnotateVideoRequest`
- * some videos may succeed and some may fail.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @typedef VideoAnnotationResults
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const VideoAnnotationResults = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation response. Included in the `response`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationResults
- * Annotation results for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationResults]{@link google.cloud.videointelligence.v1beta2.VideoAnnotationResults}
- *
- * @typedef AnnotateVideoResponse
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const AnnotateVideoResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation progress for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {number} progressPercent
- * Approximate percentage processed thus far.
- * Guaranteed to be 100 when fully processed.
- *
- * @property {Object} startTime
- * Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} updateTime
- * Time of the most recent update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @typedef VideoAnnotationProgress
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const VideoAnnotationProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation progress. Included in the `metadata`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationProgress
- * Progress metadata for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationProgress]{@link google.cloud.videointelligence.v1beta2.VideoAnnotationProgress}
- *
- * @typedef AnnotateVideoProgress
- * @memberof google.cloud.videointelligence.v1beta2
- * @see [google.cloud.videointelligence.v1beta2.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto}
- */
-const AnnotateVideoProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation feature.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1beta2
- */
-const Feature = {
-
- /**
- * Unspecified.
- */
- FEATURE_UNSPECIFIED: 0,
-
- /**
- * Label detection. Detect objects, such as dog or flower.
- */
- LABEL_DETECTION: 1,
-
- /**
- * Shot change detection.
- */
- SHOT_CHANGE_DETECTION: 2,
-
- /**
- * Explicit content detection.
- */
- EXPLICIT_CONTENT_DETECTION: 3,
-
- /**
- * Human face detection and tracking.
- */
- FACE_DETECTION: 4
-};
-
-/**
- * Label detection mode.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1beta2
- */
-const LabelDetectionMode = {
-
- /**
- * Unspecified.
- */
- LABEL_DETECTION_MODE_UNSPECIFIED: 0,
-
- /**
- * Detect shot-level labels.
- */
- SHOT_MODE: 1,
-
- /**
- * Detect frame-level labels.
- */
- FRAME_MODE: 2,
-
- /**
- * Detect both shot-level and frame-level labels.
- */
- SHOT_AND_FRAME_MODE: 3
-};
-
-/**
- * Bucketized representation of likelihood.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1beta2
- */
-const Likelihood = {
-
- /**
- * Unspecified likelihood.
- */
- LIKELIHOOD_UNSPECIFIED: 0,
-
- /**
- * Very unlikely.
- */
- VERY_UNLIKELY: 1,
-
- /**
- * Unlikely.
- */
- UNLIKELY: 2,
-
- /**
- * Possible.
- */
- POSSIBLE: 3,
-
- /**
- * Likely.
- */
- LIKELY: 4,
-
- /**
- * Very likely.
- */
- VERY_LIKELY: 5
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/longrunning/doc_operations.js b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 099e418d620..00000000000
--- a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should be a resource name ending with `operations/{unique_id}`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/protobuf/doc_any.js b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/protobuf/doc_any.js
deleted file mode 100644
index 813682aa336..00000000000
--- a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/protobuf/doc_duration.js b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index bd4b4ee6067..00000000000
--- a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/rpc/doc_status.js b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/rpc/doc_status.js
deleted file mode 100644
index 750e0af7689..00000000000
--- a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). Each `Status` message contains
- * three pieces of data: error code, error message, and error details.
- *
- * You can find out more about this error model and how to work with it in the
- * [API Design Guide](https://cloud.google.com/apis/design/errors).
- *
- * @property {number} code
- * The status code, which should be an enum value of google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1/index.js b/packages/google-cloud-videointelligence/src/v1beta2/index.ts
similarity index 64%
rename from packages/google-cloud-videointelligence/src/v1/index.js
rename to packages/google-cloud-videointelligence/src/v1beta2/index.ts
index 9bdd2dd7138..b4969ebdd1f 100644
--- a/packages/google-cloud-videointelligence/src/v1/index.js
+++ b/packages/google-cloud-videointelligence/src/v1beta2/index.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,9 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const VideoIntelligenceServiceClient = require('./video_intelligence_service_client');
-
-module.exports.VideoIntelligenceServiceClient = VideoIntelligenceServiceClient;
+export {VideoIntelligenceServiceClient} from './video_intelligence_service_client';
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.ts
similarity index 55%
rename from packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js
rename to packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.ts
index 46a2571d732..f28cf0ae54b 100644
--- a/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js
+++ b/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,22 +11,40 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
-const gapicConfig = require('./video_intelligence_service_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './video_intelligence_service_client_config.json';
-const VERSION = require('../../package.json').version;
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Video Intelligence API.
- *
+ * Service that implements Google Cloud Video Intelligence API.
* @class
* @memberof v1beta2
*/
-class VideoIntelligenceServiceClient {
+export class VideoIntelligenceServiceClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+ operationsClient: gax.OperationsClient;
+ videoIntelligenceServiceStub: Promise<{[name: string]: Function}>;
+
/**
* Construct an instance of VideoIntelligenceServiceClient.
*
@@ -54,58 +72,57 @@ class VideoIntelligenceServiceClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this
+ .constructor as typeof VideoIntelligenceServiceClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this
+ .constructor as typeof VideoIntelligenceServiceClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -121,24 +138,25 @@ class VideoIntelligenceServiceClient {
opts.fallback ? require('../../protos/protos.json') : nodejsProtoPath
);
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ this.operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const annotateVideoResponse = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1beta2.AnnotateVideoResponse'
- );
+ '.google.cloud.videointelligence.v1beta2.AnnotateVideoResponse'
+ ) as gax.protobuf.Type;
const annotateVideoMetadata = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1beta2.AnnotateVideoProgress'
- );
+ '.google.cloud.videointelligence.v1beta2.AnnotateVideoProgress'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
annotateVideo: new gaxModule.LongrunningDescriptor(
@@ -151,8 +169,8 @@ class VideoIntelligenceServiceClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.videointelligence.v1beta2.VideoIntelligenceService',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -163,33 +181,49 @@ class VideoIntelligenceServiceClient {
// Put together the "service stub" for
// google.cloud.videointelligence.v1beta2.VideoIntelligenceService.
- const videoIntelligenceServiceStub = gaxGrpc.createStub(
+ this.videoIntelligenceServiceStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService(
+ ? (protos as protobuf.Root).lookupService(
'google.cloud.videointelligence.v1beta2.VideoIntelligenceService'
)
- : protos.google.cloud.videointelligence.v1beta2
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.videointelligence.v1beta2
.VideoIntelligenceService,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
const videoIntelligenceServiceStubMethods = ['annotateVideo'];
+
for (const methodName of videoIntelligenceServiceStubMethods) {
- const innerCallPromise = videoIntelligenceServiceStub.then(
- stub => (...args) => {
+ const innerCallPromise = this.videoIntelligenceServiceStub.then(
+ stub => (...args: Array<{}>) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.longrunning[methodName]
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
+ this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -223,19 +257,52 @@ class VideoIntelligenceServiceClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous video annotation. Progress and results can be
* retrieved through the `google.longrunning.Operations` interface.
@@ -244,141 +311,96 @@ class VideoIntelligenceServiceClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {string} [request.inputUri]
+ * @param {string} request.inputUri
* Input video location. Currently, only
* [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
* supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
* URI may include wildcards in `object-id`, and thus identify multiple
* videos. Supported wildcards: '*' to match 0 or more characters;
* '?' to match 1 character. If unset, the input video should be embedded
* in the request as `input_content`. If set, `input_content` should be unset.
- * @param {Buffer} [request.inputContent]
+ * @param {Buffer} request.inputContent
* The video data bytes.
* If unset, the input video(s) should be specified via `input_uri`.
* If set, `input_uri` should be unset.
- * @param {number[]} [request.features]
+ * @param {number[]} request.features
* Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1beta2.Feature}
- * @param {Object} [request.videoContext]
+ * @param {google.cloud.videointelligence.v1beta2.VideoContext} request.videoContext
* Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1beta2.VideoContext}
* @param {string} [request.outputUri]
* Optional. Location where the output (in JSON format) should be stored.
* Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
* URIs are supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* @param {string} [request.locationId]
* Optional. Cloud region where annotation should take place. Supported cloud
* regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
* is specified, a region will be determined based on video file location.
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * const client = new videoIntelligence.v1beta2.VideoIntelligenceServiceClient({
- * // optional auth parameters.
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.annotateVideo(request);
- *
- * const [response] = await operation.promise();
*/
- annotateVideo(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.annotateVideo(request, options, callback);
}
-}
-module.exports = VideoIntelligenceServiceClient;
+ /**
+ * Terminate the GRPC channel and close the client.
+ *
+ * The client will no longer be usable and all future behavior is undefined.
+ */
+ close(): Promise {
+ if (!this._terminated) {
+ return this.videoIntelligenceServiceStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
+ }
+}
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client_config.json
index b52c46ea68c..f1fd51a88e2 100644
--- a/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client_config.json
+++ b/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client_config.json
@@ -2,28 +2,37 @@
"interfaces": {
"google.cloud.videointelligence.v1beta2.VideoIntelligenceService": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
+ },
+ "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
- "rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000
}
},
"methods": {
"AnnotateVideo": {
- "timeout_millis": 60000,
+ "timeout_millis": 600000,
"retry_codes_name": "idempotent",
- "retry_params_name": "default"
+ "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3"
}
}
}
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js
deleted file mode 100644
index 0ab262f5f3b..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js
+++ /dev/null
@@ -1,690 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * Video annotation request.
- *
- * @property {string} inputUri
- * Input video location. Currently, only
- * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- * A video URI may include wildcards in `object-id`, and thus identify
- * multiple videos. Supported wildcards: '*' to match 0 or more characters;
- * '?' to match 1 character. If unset, the input video should be embedded
- * in the request as `input_content`. If set, `input_content` should be unset.
- *
- * @property {Buffer} inputContent
- * The video data bytes.
- * If unset, the input video(s) should be specified via `input_uri`.
- * If set, `input_uri` should be unset.
- *
- * @property {number[]} features
- * Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p1beta1.Feature}
- *
- * @property {Object} videoContext
- * Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p1beta1.VideoContext}
- *
- * @property {string} outputUri
- * Optional. Location where the output (in JSON format) should be stored.
- * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
- * URIs are supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @property {string} locationId
- * Optional. Cloud region where annotation should take place. Supported cloud
- * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
- * is specified, a region will be determined based on video file location.
- *
- * @typedef AnnotateVideoRequest
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const AnnotateVideoRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video context and/or feature-specific parameters.
- *
- * @property {Object[]} segments
- * Video segments to annotate. The segments may overlap and are not required
- * to be contiguous or span the whole video. If unspecified, each video is
- * treated as a single segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p1beta1.VideoSegment}
- *
- * @property {Object} labelDetectionConfig
- * Config for LABEL_DETECTION.
- *
- * This object should have the same structure as [LabelDetectionConfig]{@link google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig}
- *
- * @property {Object} shotChangeDetectionConfig
- * Config for SHOT_CHANGE_DETECTION.
- *
- * This object should have the same structure as [ShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig}
- *
- * @property {Object} explicitContentDetectionConfig
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig}
- *
- * @property {Object} speechTranscriptionConfig
- * Config for SPEECH_TRANSCRIPTION.
- *
- * This object should have the same structure as [SpeechTranscriptionConfig]{@link google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig}
- *
- * @typedef VideoContext
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const VideoContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for LABEL_DETECTION.
- *
- * @property {number} labelDetectionMode
- * What labels should be detected with LABEL_DETECTION, in addition to
- * video-level labels or segment-level labels.
- * If unspecified, defaults to `SHOT_MODE`.
- *
- * The number should be among the values of [LabelDetectionMode]{@link google.cloud.videointelligence.v1p1beta1.LabelDetectionMode}
- *
- * @property {boolean} stationaryCamera
- * Whether the video has been shot from a stationary (i.e. non-moving) camera.
- * When set to true, might improve detection accuracy for moving objects.
- * Should be used with `SHOT_AND_FRAME_MODE` enabled.
- *
- * @property {string} model
- * Model to use for label detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef LabelDetectionConfig
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const LabelDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SHOT_CHANGE_DETECTION.
- *
- * @property {string} model
- * Model to use for shot change detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ShotChangeDetectionConfig
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const ShotChangeDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * @property {string} model
- * Model to use for explicit content detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ExplicitContentDetectionConfig
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const ExplicitContentDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment.
- *
- * @property {Object} startTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the start of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the end of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef VideoSegment
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const VideoSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for label detection.
- *
- * @property {Object} segment
- * Video segment where a label was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p1beta1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelSegment
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const LabelSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for label detection.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelFrame
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const LabelFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Detected entity from video analysis.
- *
- * @property {string} entityId
- * Opaque entity ID. Some IDs may be available in
- * [Google Knowledge Graph Search
- * API](https://developers.google.com/knowledge-graph/).
- *
- * @property {string} description
- * Textual description, e.g. `Fixed-gear bicycle`.
- *
- * @property {string} languageCode
- * Language code for `description` in BCP-47 format.
- *
- * @typedef Entity
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const Entity = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Label annotation.
- *
- * @property {Object} entity
- * Detected entity.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p1beta1.Entity}
- *
- * @property {Object[]} categoryEntities
- * Common categories for the detected entity.
- * E.g. when the label is `Terrier` the category is likely `dog`. And in some
- * cases there might be more than one categories e.g. `Terrier` could also be
- * a `pet`.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p1beta1.Entity}
- *
- * @property {Object[]} segments
- * All video segments where a label was detected.
- *
- * This object should have the same structure as [LabelSegment]{@link google.cloud.videointelligence.v1p1beta1.LabelSegment}
- *
- * @property {Object[]} frames
- * All video frames where a label was detected.
- *
- * This object should have the same structure as [LabelFrame]{@link google.cloud.videointelligence.v1p1beta1.LabelFrame}
- *
- * @typedef LabelAnnotation
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const LabelAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for explicit content.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} pornographyLikelihood
- * Likelihood of the pornography content..
- *
- * The number should be among the values of [Likelihood]{@link google.cloud.videointelligence.v1p1beta1.Likelihood}
- *
- * @typedef ExplicitContentFrame
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const ExplicitContentFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Explicit content annotation (based on per-frame visual signals only).
- * If no explicit content has been detected in a frame, no annotations are
- * present for that frame.
- *
- * @property {Object[]} frames
- * All video frames where explicit content was detected.
- *
- * This object should have the same structure as [ExplicitContentFrame]{@link google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame}
- *
- * @typedef ExplicitContentAnnotation
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const ExplicitContentAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation results for a single video.
- *
- * @property {string} inputUri
- * Output only. Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {Object[]} segmentLabelAnnotations
- * Label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p1beta1.LabelAnnotation}
- *
- * @property {Object[]} shotLabelAnnotations
- * Label annotations on shot level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p1beta1.LabelAnnotation}
- *
- * @property {Object[]} frameLabelAnnotations
- * Label annotations on frame level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p1beta1.LabelAnnotation}
- *
- * @property {Object[]} shotAnnotations
- * Shot annotations. Each shot is represented as a video segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p1beta1.VideoSegment}
- *
- * @property {Object} explicitAnnotation
- * Explicit content annotation.
- *
- * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation}
- *
- * @property {Object[]} speechTranscriptions
- * Speech transcription.
- *
- * This object should have the same structure as [SpeechTranscription]{@link google.cloud.videointelligence.v1p1beta1.SpeechTranscription}
- *
- * @property {Object} error
- * Output only. If set, indicates an error. Note that for a single
- * `AnnotateVideoRequest` some videos may succeed and some may fail.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @typedef VideoAnnotationResults
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const VideoAnnotationResults = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation response. Included in the `response`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationResults
- * Annotation results for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationResults]{@link google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults}
- *
- * @typedef AnnotateVideoResponse
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const AnnotateVideoResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation progress for a single video.
- *
- * @property {string} inputUri
- * Output only. Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {number} progressPercent
- * Output only. Approximate percentage processed thus far. Guaranteed to be
- * 100 when fully processed.
- *
- * @property {Object} startTime
- * Output only. Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} updateTime
- * Output only. Time of the most recent update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @typedef VideoAnnotationProgress
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const VideoAnnotationProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation progress. Included in the `metadata`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationProgress
- * Progress metadata for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationProgress]{@link google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress}
- *
- * @typedef AnnotateVideoProgress
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const AnnotateVideoProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SPEECH_TRANSCRIPTION.
- *
- * @property {string} languageCode
- * Required. *Required* The language of the supplied audio as a
- * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- * Example: "en-US".
- * See [Language Support](https://cloud.google.com/speech/docs/languages)
- * for a list of the currently supported language codes.
- *
- * @property {number} maxAlternatives
- * Optional. Maximum number of recognition hypotheses to be returned.
- * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- * within each `SpeechTranscription`. The server may return fewer than
- * `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
- * return a maximum of one. If omitted, will return a maximum of one.
- *
- * @property {boolean} filterProfanity
- * Optional. If set to `true`, the server will attempt to filter out
- * profanities, replacing all but the initial character in each filtered word
- * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- * won't be filtered out.
- *
- * @property {Object[]} speechContexts
- * Optional. A means to provide context to assist the speech recognition.
- *
- * This object should have the same structure as [SpeechContext]{@link google.cloud.videointelligence.v1p1beta1.SpeechContext}
- *
- * @property {boolean} enableAutomaticPunctuation
- * Optional. If 'true', adds punctuation to recognition result hypotheses.
- * This feature is only available in select languages. Setting this for
- * requests in other languages has no effect at all. The default 'false' value
- * does not add punctuation to result hypotheses. NOTE: "This is currently
- * offered as an experimental service, complimentary to all users. In the
- * future this may be exclusively available as a premium feature."
- *
- * @property {number[]} audioTracks
- * Optional. For file formats, such as MXF or MKV, supporting multiple audio
- * tracks, specify up to two tracks. Default: track 0.
- *
- * @typedef SpeechTranscriptionConfig
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const SpeechTranscriptionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides "hints" to the speech recognizer to favor specific words and phrases
- * in the results.
- *
- * @property {string[]} phrases
- * Optional. A list of strings containing words and phrases "hints" so that
- * the speech recognition is more likely to recognize them. This can be used
- * to improve the accuracy for specific words and phrases, for example, if
- * specific commands are typically spoken by the user. This can also be used
- * to add additional words to the vocabulary of the recognizer. See
- * [usage limits](https://cloud.google.com/speech/limits#content).
- *
- * @typedef SpeechContext
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const SpeechContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A speech recognition result corresponding to a portion of the audio.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the maximum specified
- * in `max_alternatives`). These alternatives are ordered in terms of
- * accuracy, with the top (first) alternative being the most probable, as
- * ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative}
- *
- * @typedef SpeechTranscription
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.SpeechTranscription definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const SpeechTranscription = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Alternative hypotheses (a.k.a. n-best list).
- *
- * @property {string} transcript
- * Output only. Transcript text representing the words that the user spoke.
- *
- * @property {number} confidence
- * Output only. The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {Object[]} words
- * Output only. A list of word-specific information for each recognized word.
- *
- * This object should have the same structure as [WordInfo]{@link google.cloud.videointelligence.v1p1beta1.WordInfo}
- *
- * @typedef SpeechRecognitionAlternative
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const SpeechRecognitionAlternative = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Word-specific information for recognized words. Word information is only
- * included in the response when certain request parameters are set, such
- * as `enable_word_time_offsets`.
- *
- * @property {Object} startTime
- * Output only. Time offset relative to the beginning of the audio, and
- * corresponding to the start of the spoken word. This field is only set if
- * `enable_word_time_offsets=true` and only in the top hypothesis. This is an
- * experimental feature and the accuracy of the time offset can vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTime
- * Output only. Time offset relative to the beginning of the audio, and
- * corresponding to the end of the spoken word. This field is only set if
- * `enable_word_time_offsets=true` and only in the top hypothesis. This is an
- * experimental feature and the accuracy of the time offset can vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {string} word
- * Output only. The word corresponding to this set of information.
- *
- * @typedef WordInfo
- * @memberof google.cloud.videointelligence.v1p1beta1
- * @see [google.cloud.videointelligence.v1p1beta1.WordInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto}
- */
-const WordInfo = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation feature.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p1beta1
- */
-const Feature = {
-
- /**
- * Unspecified.
- */
- FEATURE_UNSPECIFIED: 0,
-
- /**
- * Label detection. Detect objects, such as dog or flower.
- */
- LABEL_DETECTION: 1,
-
- /**
- * Shot change detection.
- */
- SHOT_CHANGE_DETECTION: 2,
-
- /**
- * Explicit content detection.
- */
- EXPLICIT_CONTENT_DETECTION: 3,
-
- /**
- * Speech transcription.
- */
- SPEECH_TRANSCRIPTION: 6
-};
-
-/**
- * Label detection mode.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p1beta1
- */
-const LabelDetectionMode = {
-
- /**
- * Unspecified.
- */
- LABEL_DETECTION_MODE_UNSPECIFIED: 0,
-
- /**
- * Detect shot-level labels.
- */
- SHOT_MODE: 1,
-
- /**
- * Detect frame-level labels.
- */
- FRAME_MODE: 2,
-
- /**
- * Detect both shot-level and frame-level labels.
- */
- SHOT_AND_FRAME_MODE: 3
-};
-
-/**
- * Bucketized representation of likelihood.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p1beta1
- */
-const Likelihood = {
-
- /**
- * Unspecified likelihood.
- */
- LIKELIHOOD_UNSPECIFIED: 0,
-
- /**
- * Very unlikely.
- */
- VERY_UNLIKELY: 1,
-
- /**
- * Unlikely.
- */
- UNLIKELY: 2,
-
- /**
- * Possible.
- */
- POSSIBLE: 3,
-
- /**
- * Likely.
- */
- LIKELY: 4,
-
- /**
- * Very likely.
- */
- VERY_LIKELY: 5
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/longrunning/doc_operations.js b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 099e418d620..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should be a resource name ending with `operations/{unique_id}`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/protobuf/doc_any.js b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/protobuf/doc_any.js
deleted file mode 100644
index 813682aa336..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/protobuf/doc_duration.js b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index bd4b4ee6067..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/rpc/doc_status.js b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/rpc/doc_status.js
deleted file mode 100644
index 750e0af7689..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). Each `Status` message contains
- * three pieces of data: error code, error message, and error details.
- *
- * You can find out more about this error model and how to work with it in the
- * [API Design Guide](https://cloud.google.com/apis/design/errors).
- *
- * @property {number} code
- * The status code, which should be an enum value of google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1beta2/index.js b/packages/google-cloud-videointelligence/src/v1p1beta1/index.ts
similarity index 64%
rename from packages/google-cloud-videointelligence/src/v1beta2/index.js
rename to packages/google-cloud-videointelligence/src/v1p1beta1/index.ts
index 9bdd2dd7138..b4969ebdd1f 100644
--- a/packages/google-cloud-videointelligence/src/v1beta2/index.js
+++ b/packages/google-cloud-videointelligence/src/v1p1beta1/index.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,9 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const VideoIntelligenceServiceClient = require('./video_intelligence_service_client');
-
-module.exports.VideoIntelligenceServiceClient = VideoIntelligenceServiceClient;
+export {VideoIntelligenceServiceClient} from './video_intelligence_service_client';
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.ts
similarity index 54%
rename from packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js
rename to packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.ts
index bea8f82bc97..cee76bc72cf 100644
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js
+++ b/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,22 +11,40 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
-const gapicConfig = require('./video_intelligence_service_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './video_intelligence_service_client_config.json';
-const VERSION = require('../../package.json').version;
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Video Intelligence API.
- *
+ * Service that implements Google Cloud Video Intelligence API.
* @class
* @memberof v1p1beta1
*/
-class VideoIntelligenceServiceClient {
+export class VideoIntelligenceServiceClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+ operationsClient: gax.OperationsClient;
+ videoIntelligenceServiceStub: Promise<{[name: string]: Function}>;
+
/**
* Construct an instance of VideoIntelligenceServiceClient.
*
@@ -54,58 +72,57 @@ class VideoIntelligenceServiceClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this
+ .constructor as typeof VideoIntelligenceServiceClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this
+ .constructor as typeof VideoIntelligenceServiceClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -121,24 +138,25 @@ class VideoIntelligenceServiceClient {
opts.fallback ? require('../../protos/protos.json') : nodejsProtoPath
);
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ this.operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const annotateVideoResponse = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse'
- );
+ '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse'
+ ) as gax.protobuf.Type;
const annotateVideoMetadata = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress'
- );
+ '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
annotateVideo: new gaxModule.LongrunningDescriptor(
@@ -151,8 +169,8 @@ class VideoIntelligenceServiceClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -163,33 +181,49 @@ class VideoIntelligenceServiceClient {
// Put together the "service stub" for
// google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService.
- const videoIntelligenceServiceStub = gaxGrpc.createStub(
+ this.videoIntelligenceServiceStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService(
+ ? (protos as protobuf.Root).lookupService(
'google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService'
)
- : protos.google.cloud.videointelligence.v1p1beta1
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.videointelligence.v1p1beta1
.VideoIntelligenceService,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
const videoIntelligenceServiceStubMethods = ['annotateVideo'];
+
for (const methodName of videoIntelligenceServiceStubMethods) {
- const innerCallPromise = videoIntelligenceServiceStub.then(
- stub => (...args) => {
+ const innerCallPromise = this.videoIntelligenceServiceStub.then(
+ stub => (...args: Array<{}>) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.longrunning[methodName]
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
+ this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -223,19 +257,52 @@ class VideoIntelligenceServiceClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous video annotation. Progress and results can be
* retrieved through the `google.longrunning.Operations` interface.
@@ -244,141 +311,96 @@ class VideoIntelligenceServiceClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {string} [request.inputUri]
+ * @param {string} request.inputUri
* Input video location. Currently, only
* [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
* supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* A video URI may include wildcards in `object-id`, and thus identify
* multiple videos. Supported wildcards: '*' to match 0 or more characters;
* '?' to match 1 character. If unset, the input video should be embedded
* in the request as `input_content`. If set, `input_content` should be unset.
- * @param {Buffer} [request.inputContent]
+ * @param {Buffer} request.inputContent
* The video data bytes.
* If unset, the input video(s) should be specified via `input_uri`.
* If set, `input_uri` should be unset.
- * @param {number[]} [request.features]
+ * @param {number[]} request.features
* Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p1beta1.Feature}
- * @param {Object} [request.videoContext]
+ * @param {google.cloud.videointelligence.v1p1beta1.VideoContext} request.videoContext
* Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p1beta1.VideoContext}
* @param {string} [request.outputUri]
* Optional. Location where the output (in JSON format) should be stored.
* Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
* URIs are supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* @param {string} [request.locationId]
* Optional. Cloud region where annotation should take place. Supported cloud
* regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
* is specified, a region will be determined based on video file location.
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * const client = new videoIntelligence.v1p1beta1.VideoIntelligenceServiceClient({
- * // optional auth parameters.
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.annotateVideo(request);
- *
- * const [response] = await operation.promise();
*/
- annotateVideo(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.annotateVideo(request, options, callback);
}
-}
-module.exports = VideoIntelligenceServiceClient;
+ /**
+ * Terminate the GRPC channel and close the client.
+ *
+ * The client will no longer be usable and all future behavior is undefined.
+ */
+ close(): Promise {
+ if (!this._terminated) {
+ return this.videoIntelligenceServiceStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
+ }
+}
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client_config.json
index e701c4af9d9..5d0c24dbde6 100644
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client_config.json
+++ b/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client_config.json
@@ -2,20 +2,29 @@
"interfaces": {
"google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
+ },
+ "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
- "rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000
}
},
@@ -23,7 +32,7 @@
"AnnotateVideo": {
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
- "retry_params_name": "default"
+ "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3"
}
}
}
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js
deleted file mode 100644
index 31a4ea321b3..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js
+++ /dev/null
@@ -1,770 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * Video annotation request.
- *
- * @property {string} inputUri
- * Input video location. Currently, only
- * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- * A video URI may include wildcards in `object-id`, and thus identify
- * multiple videos. Supported wildcards: '*' to match 0 or more characters;
- * '?' to match 1 character. If unset, the input video should be embedded
- * in the request as `input_content`. If set, `input_content` should be unset.
- *
- * @property {Buffer} inputContent
- * The video data bytes.
- * If unset, the input video(s) should be specified via `input_uri`.
- * If set, `input_uri` should be unset.
- *
- * @property {number[]} features
- * Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p2beta1.Feature}
- *
- * @property {Object} videoContext
- * Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p2beta1.VideoContext}
- *
- * @property {string} outputUri
- * Optional. Location where the output (in JSON format) should be stored.
- * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
- * URIs are supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @property {string} locationId
- * Optional. Cloud region where annotation should take place. Supported cloud
- * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
- * is specified, a region will be determined based on video file location.
- *
- * @typedef AnnotateVideoRequest
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const AnnotateVideoRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video context and/or feature-specific parameters.
- *
- * @property {Object[]} segments
- * Video segments to annotate. The segments may overlap and are not required
- * to be contiguous or span the whole video. If unspecified, each video is
- * treated as a single segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p2beta1.VideoSegment}
- *
- * @property {Object} labelDetectionConfig
- * Config for LABEL_DETECTION.
- *
- * This object should have the same structure as [LabelDetectionConfig]{@link google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig}
- *
- * @property {Object} shotChangeDetectionConfig
- * Config for SHOT_CHANGE_DETECTION.
- *
- * This object should have the same structure as [ShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig}
- *
- * @property {Object} explicitContentDetectionConfig
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig}
- *
- * @property {Object} textDetectionConfig
- * Config for TEXT_DETECTION.
- *
- * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1p2beta1.TextDetectionConfig}
- *
- * @typedef VideoContext
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const VideoContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for LABEL_DETECTION.
- *
- * @property {number} labelDetectionMode
- * What labels should be detected with LABEL_DETECTION, in addition to
- * video-level labels or segment-level labels.
- * If unspecified, defaults to `SHOT_MODE`.
- *
- * The number should be among the values of [LabelDetectionMode]{@link google.cloud.videointelligence.v1p2beta1.LabelDetectionMode}
- *
- * @property {boolean} stationaryCamera
- * Whether the video has been shot from a stationary (i.e. non-moving) camera.
- * When set to true, might improve detection accuracy for moving objects.
- * Should be used with `SHOT_AND_FRAME_MODE` enabled.
- *
- * @property {string} model
- * Model to use for label detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef LabelDetectionConfig
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const LabelDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SHOT_CHANGE_DETECTION.
- *
- * @property {string} model
- * Model to use for shot change detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ShotChangeDetectionConfig
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const ShotChangeDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * @property {string} model
- * Model to use for explicit content detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ExplicitContentDetectionConfig
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const ExplicitContentDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for TEXT_DETECTION.
- *
- * @property {string[]} languageHints
- * Language hint can be specified if the language to be detected is known a
- * priori. It can increase the accuracy of the detection. Language hint must
- * be language code in BCP-47 format.
- *
- * Automatic language detection is performed if no hint is provided.
- *
- * @typedef TextDetectionConfig
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.TextDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const TextDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment.
- *
- * @property {Object} startTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the start of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the end of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef VideoSegment
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const VideoSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for label detection.
- *
- * @property {Object} segment
- * Video segment where a label was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p2beta1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelSegment
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const LabelSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for label detection.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelFrame
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const LabelFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Detected entity from video analysis.
- *
- * @property {string} entityId
- * Opaque entity ID. Some IDs may be available in
- * [Google Knowledge Graph Search
- * API](https://developers.google.com/knowledge-graph/).
- *
- * @property {string} description
- * Textual description, e.g. `Fixed-gear bicycle`.
- *
- * @property {string} languageCode
- * Language code for `description` in BCP-47 format.
- *
- * @typedef Entity
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const Entity = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Label annotation.
- *
- * @property {Object} entity
- * Detected entity.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p2beta1.Entity}
- *
- * @property {Object[]} categoryEntities
- * Common categories for the detected entity.
- * E.g. when the label is `Terrier` the category is likely `dog`. And in some
- * cases there might be more than one categories e.g. `Terrier` could also be
- * a `pet`.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p2beta1.Entity}
- *
- * @property {Object[]} segments
- * All video segments where a label was detected.
- *
- * This object should have the same structure as [LabelSegment]{@link google.cloud.videointelligence.v1p2beta1.LabelSegment}
- *
- * @property {Object[]} frames
- * All video frames where a label was detected.
- *
- * This object should have the same structure as [LabelFrame]{@link google.cloud.videointelligence.v1p2beta1.LabelFrame}
- *
- * @typedef LabelAnnotation
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const LabelAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for explicit content.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} pornographyLikelihood
- * Likelihood of the pornography content..
- *
- * The number should be among the values of [Likelihood]{@link google.cloud.videointelligence.v1p2beta1.Likelihood}
- *
- * @typedef ExplicitContentFrame
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const ExplicitContentFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Explicit content annotation (based on per-frame visual signals only).
- * If no explicit content has been detected in a frame, no annotations are
- * present for that frame.
- *
- * @property {Object[]} frames
- * All video frames where explicit content was detected.
- *
- * This object should have the same structure as [ExplicitContentFrame]{@link google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame}
- *
- * @typedef ExplicitContentAnnotation
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const ExplicitContentAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding box.
- * The normalized vertex coordinates are relative to the original image.
- * Range: [0, 1].
- *
- * @property {number} left
- * Left X coordinate.
- *
- * @property {number} top
- * Top Y coordinate.
- *
- * @property {number} right
- * Right X coordinate.
- *
- * @property {number} bottom
- * Bottom Y coordinate.
- *
- * @typedef NormalizedBoundingBox
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const NormalizedBoundingBox = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation results for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {Object[]} segmentLabelAnnotations
- * Label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p2beta1.LabelAnnotation}
- *
- * @property {Object[]} shotLabelAnnotations
- * Label annotations on shot level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p2beta1.LabelAnnotation}
- *
- * @property {Object[]} frameLabelAnnotations
- * Label annotations on frame level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p2beta1.LabelAnnotation}
- *
- * @property {Object[]} shotAnnotations
- * Shot annotations. Each shot is represented as a video segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p2beta1.VideoSegment}
- *
- * @property {Object} explicitAnnotation
- * Explicit content annotation.
- *
- * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation}
- *
- * @property {Object[]} textAnnotations
- * OCR text detection and tracking.
- * Annotations for list of detected text snippets. Each will have list of
- * frame information associated with it.
- *
- * This object should have the same structure as [TextAnnotation]{@link google.cloud.videointelligence.v1p2beta1.TextAnnotation}
- *
- * @property {Object[]} objectAnnotations
- * Annotations for list of objects detected and tracked in video.
- *
- * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation}
- *
- * @property {Object} error
- * If set, indicates an error. Note that for a single `AnnotateVideoRequest`
- * some videos may succeed and some may fail.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @typedef VideoAnnotationResults
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const VideoAnnotationResults = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation response. Included in the `response`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationResults
- * Annotation results for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationResults]{@link google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults}
- *
- * @typedef AnnotateVideoResponse
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const AnnotateVideoResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation progress for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {number} progressPercent
- * Approximate percentage processed thus far. Guaranteed to be
- * 100 when fully processed.
- *
- * @property {Object} startTime
- * Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} updateTime
- * Time of the most recent update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @typedef VideoAnnotationProgress
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const VideoAnnotationProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation progress. Included in the `metadata`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationProgress
- * Progress metadata for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationProgress]{@link google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress}
- *
- * @typedef AnnotateVideoProgress
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const AnnotateVideoProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A vertex represents a 2D point in the image.
- * NOTE: the normalized vertex coordinates are relative to the original image
- * and range from 0 to 1.
- *
- * @property {number} x
- * X coordinate.
- *
- * @property {number} y
- * Y coordinate.
- *
- * @typedef NormalizedVertex
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.NormalizedVertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const NormalizedVertex = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding polygon for text (that might not be aligned with axis).
- * Contains list of the corner points in clockwise order starting from
- * top-left corner. For example, for a rectangular bounding box:
- * When the text is horizontal it might look like:
- * 0----1
- * | |
- * 3----2
- *
- * When it's clockwise rotated 180 degrees around the top-left corner it
- * becomes:
- * 2----3
- * | |
- * 1----0
- *
- * and the vertex order will still be (0, 1, 2, 3). Note that values can be less
- * than 0, or greater than 1 due to trignometric calculations for location of
- * the box.
- *
- * @property {Object[]} vertices
- * Normalized vertices of the bounding polygon.
- *
- * This object should have the same structure as [NormalizedVertex]{@link google.cloud.videointelligence.v1p2beta1.NormalizedVertex}
- *
- * @typedef NormalizedBoundingPoly
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const NormalizedBoundingPoly = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for text detection.
- *
- * @property {Object} segment
- * Video segment where a text snippet was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p2beta1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence for the track of detected text. It is calculated as the highest
- * over all frames where OCR detected text appears.
- *
- * @property {Object[]} frames
- * Information related to the frames where OCR detected text appears.
- *
- * This object should have the same structure as [TextFrame]{@link google.cloud.videointelligence.v1p2beta1.TextFrame}
- *
- * @typedef TextSegment
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.TextSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const TextSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for text annotation (OCR).
- * Contains information regarding timestamp and bounding box locations for the
- * frames containing detected OCR text snippets.
- *
- * @property {Object} rotatedBoundingBox
- * Bounding polygon of the detected text for this frame.
- *
- * This object should have the same structure as [NormalizedBoundingPoly]{@link google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly}
- *
- * @property {Object} timeOffset
- * Timestamp of this frame.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef TextFrame
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.TextFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const TextFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotations related to one detected OCR text snippet. This will contain the
- * corresponding text, confidence value, and frame level information for each
- * detection.
- *
- * @property {string} text
- * The detected text.
- *
- * @property {Object[]} segments
- * All video segments where OCR detected text appears.
- *
- * This object should have the same structure as [TextSegment]{@link google.cloud.videointelligence.v1p2beta1.TextSegment}
- *
- * @typedef TextAnnotation
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const TextAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotations for object detection and tracking. This field
- * stores per frame location, time offset, and confidence.
- *
- * @property {Object} normalizedBoundingBox
- * The normalized bounding box location of this object track for the frame.
- *
- * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox}
- *
- * @property {Object} timeOffset
- * The timestamp of the frame in microseconds.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef ObjectTrackingFrame
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const ObjectTrackingFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotations corresponding to one tracked object.
- *
- * @property {Object} entity
- * Entity to specify the object category that this track is labeled as.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p2beta1.Entity}
- *
- * @property {number} confidence
- * Object category's labeling confidence of this track.
- *
- * @property {Object[]} frames
- * Information corresponding to all frames where this object track appears.
- *
- * This object should have the same structure as [ObjectTrackingFrame]{@link google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame}
- *
- * @property {Object} segment
- * Each object track corresponds to one video segment where it appears.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p2beta1.VideoSegment}
- *
- * @typedef ObjectTrackingAnnotation
- * @memberof google.cloud.videointelligence.v1p2beta1
- * @see [google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto}
- */
-const ObjectTrackingAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation feature.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p2beta1
- */
-const Feature = {
-
- /**
- * Unspecified.
- */
- FEATURE_UNSPECIFIED: 0,
-
- /**
- * Label detection. Detect objects, such as dog or flower.
- */
- LABEL_DETECTION: 1,
-
- /**
- * Shot change detection.
- */
- SHOT_CHANGE_DETECTION: 2,
-
- /**
- * Explicit content detection.
- */
- EXPLICIT_CONTENT_DETECTION: 3,
-
- /**
- * OCR text detection and tracking.
- */
- TEXT_DETECTION: 7,
-
- /**
- * Object detection and tracking.
- */
- OBJECT_TRACKING: 9
-};
-
-/**
- * Label detection mode.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p2beta1
- */
-const LabelDetectionMode = {
-
- /**
- * Unspecified.
- */
- LABEL_DETECTION_MODE_UNSPECIFIED: 0,
-
- /**
- * Detect shot-level labels.
- */
- SHOT_MODE: 1,
-
- /**
- * Detect frame-level labels.
- */
- FRAME_MODE: 2,
-
- /**
- * Detect both shot-level and frame-level labels.
- */
- SHOT_AND_FRAME_MODE: 3
-};
-
-/**
- * Bucketized representation of likelihood.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p2beta1
- */
-const Likelihood = {
-
- /**
- * Unspecified likelihood.
- */
- LIKELIHOOD_UNSPECIFIED: 0,
-
- /**
- * Very unlikely.
- */
- VERY_UNLIKELY: 1,
-
- /**
- * Unlikely.
- */
- UNLIKELY: 2,
-
- /**
- * Possible.
- */
- POSSIBLE: 3,
-
- /**
- * Likely.
- */
- LIKELY: 4,
-
- /**
- * Very likely.
- */
- VERY_LIKELY: 5
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/longrunning/doc_operations.js b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 099e418d620..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should be a resource name ending with `operations/{unique_id}`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/protobuf/doc_any.js b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/protobuf/doc_any.js
deleted file mode 100644
index 813682aa336..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/protobuf/doc_duration.js b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index bd4b4ee6067..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/rpc/doc_status.js b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/rpc/doc_status.js
deleted file mode 100644
index 750e0af7689..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). Each `Status` message contains
- * three pieces of data: error code, error message, and error details.
- *
- * You can find out more about this error model and how to work with it in the
- * [API Design Guide](https://cloud.google.com/apis/design/errors).
- *
- * @property {number} code
- * The status code, which should be an enum value of google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/index.js b/packages/google-cloud-videointelligence/src/v1p2beta1/index.js
deleted file mode 100644
index 9bdd2dd7138..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/index.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-'use strict';
-
-const VideoIntelligenceServiceClient = require('./video_intelligence_service_client');
-
-module.exports.VideoIntelligenceServiceClient = VideoIntelligenceServiceClient;
diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/index.js b/packages/google-cloud-videointelligence/src/v1p2beta1/index.ts
similarity index 64%
rename from packages/google-cloud-videointelligence/src/v1p1beta1/index.js
rename to packages/google-cloud-videointelligence/src/v1p2beta1/index.ts
index 9bdd2dd7138..b4969ebdd1f 100644
--- a/packages/google-cloud-videointelligence/src/v1p1beta1/index.js
+++ b/packages/google-cloud-videointelligence/src/v1p2beta1/index.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,9 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const VideoIntelligenceServiceClient = require('./video_intelligence_service_client');
-
-module.exports.VideoIntelligenceServiceClient = VideoIntelligenceServiceClient;
+export {VideoIntelligenceServiceClient} from './video_intelligence_service_client';
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.ts
similarity index 54%
rename from packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js
rename to packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.ts
index 1b9511df368..f0c9dce78b2 100644
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js
+++ b/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,22 +11,40 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
-const gapicConfig = require('./video_intelligence_service_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './video_intelligence_service_client_config.json';
-const VERSION = require('../../package.json').version;
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Video Intelligence API.
- *
+ * Service that implements Google Cloud Video Intelligence API.
* @class
* @memberof v1p2beta1
*/
-class VideoIntelligenceServiceClient {
+export class VideoIntelligenceServiceClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+ operationsClient: gax.OperationsClient;
+ videoIntelligenceServiceStub: Promise<{[name: string]: Function}>;
+
/**
* Construct an instance of VideoIntelligenceServiceClient.
*
@@ -54,58 +72,57 @@ class VideoIntelligenceServiceClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this
+ .constructor as typeof VideoIntelligenceServiceClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this
+ .constructor as typeof VideoIntelligenceServiceClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -121,24 +138,25 @@ class VideoIntelligenceServiceClient {
opts.fallback ? require('../../protos/protos.json') : nodejsProtoPath
);
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ this.operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const annotateVideoResponse = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse'
- );
+ '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse'
+ ) as gax.protobuf.Type;
const annotateVideoMetadata = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress'
- );
+ '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
annotateVideo: new gaxModule.LongrunningDescriptor(
@@ -151,8 +169,8 @@ class VideoIntelligenceServiceClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -163,33 +181,49 @@ class VideoIntelligenceServiceClient {
// Put together the "service stub" for
// google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService.
- const videoIntelligenceServiceStub = gaxGrpc.createStub(
+ this.videoIntelligenceServiceStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService(
+ ? (protos as protobuf.Root).lookupService(
'google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService'
)
- : protos.google.cloud.videointelligence.v1p2beta1
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.videointelligence.v1p2beta1
.VideoIntelligenceService,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
const videoIntelligenceServiceStubMethods = ['annotateVideo'];
+
for (const methodName of videoIntelligenceServiceStubMethods) {
- const innerCallPromise = videoIntelligenceServiceStub.then(
- stub => (...args) => {
+ const innerCallPromise = this.videoIntelligenceServiceStub.then(
+ stub => (...args: Array<{}>) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.longrunning[methodName]
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
+ this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -223,19 +257,52 @@ class VideoIntelligenceServiceClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous video annotation. Progress and results can be
* retrieved through the `google.longrunning.Operations` interface.
@@ -244,141 +311,96 @@ class VideoIntelligenceServiceClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {string} [request.inputUri]
+ * @param {string} request.inputUri
* Input video location. Currently, only
* [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
* supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* A video URI may include wildcards in `object-id`, and thus identify
* multiple videos. Supported wildcards: '*' to match 0 or more characters;
* '?' to match 1 character. If unset, the input video should be embedded
* in the request as `input_content`. If set, `input_content` should be unset.
- * @param {Buffer} [request.inputContent]
+ * @param {Buffer} request.inputContent
* The video data bytes.
* If unset, the input video(s) should be specified via `input_uri`.
* If set, `input_uri` should be unset.
- * @param {number[]} [request.features]
+ * @param {number[]} request.features
* Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p2beta1.Feature}
- * @param {Object} [request.videoContext]
+ * @param {google.cloud.videointelligence.v1p2beta1.VideoContext} request.videoContext
* Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p2beta1.VideoContext}
* @param {string} [request.outputUri]
* Optional. Location where the output (in JSON format) should be stored.
* Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
* URIs are supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* @param {string} [request.locationId]
* Optional. Cloud region where annotation should take place. Supported cloud
* regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
* is specified, a region will be determined based on video file location.
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * const client = new videoIntelligence.v1p2beta1.VideoIntelligenceServiceClient({
- * // optional auth parameters.
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.annotateVideo(request);
- *
- * const [response] = await operation.promise();
*/
- annotateVideo(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.annotateVideo(request, options, callback);
}
-}
-module.exports = VideoIntelligenceServiceClient;
+ /**
+ * Terminate the GRPC channel and close the client.
+ *
+ * The client will no longer be usable and all future behavior is undefined.
+ */
+ close(): Promise {
+ if (!this._terminated) {
+ return this.videoIntelligenceServiceStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
+ }
+}
diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client_config.json
index 01c3c0a8933..20e275281ce 100644
--- a/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client_config.json
+++ b/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client_config.json
@@ -2,20 +2,29 @@
"interfaces": {
"google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
+ },
+ "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
- "rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000
}
},
@@ -23,7 +32,7 @@
"AnnotateVideo": {
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
- "retry_params_name": "default"
+ "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3"
}
}
}
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js
deleted file mode 100644
index ae450b25b92..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js
+++ /dev/null
@@ -1,1704 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * Video annotation request.
- *
- * @property {string} inputUri
- * Input video location. Currently, only
- * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video
- * URI may include wildcards in `object-id`, and thus identify multiple
- * videos. Supported wildcards: '*' to match 0 or more characters;
- * '?' to match 1 character. If unset, the input video should be embedded
- * in the request as `input_content`. If set, `input_content` should be unset.
- *
- * @property {Buffer} inputContent
- * The video data bytes.
- * If unset, the input video(s) should be specified via `input_uri`.
- * If set, `input_uri` should be unset.
- *
- * @property {number[]} features
- * Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p3beta1.Feature}
- *
- * @property {Object} videoContext
- * Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p3beta1.VideoContext}
- *
- * @property {string} outputUri
- * Optional. Location where the output (in JSON format) should be stored.
- * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
- * URIs are supported, which must be specified in the following format:
- * `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @property {string} locationId
- * Optional. Cloud region where annotation should take place. Supported cloud
- * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
- * is specified, a region will be determined based on video file location.
- *
- * @typedef AnnotateVideoRequest
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const AnnotateVideoRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video context and/or feature-specific parameters.
- *
- * @property {Object[]} segments
- * Video segments to annotate. The segments may overlap and are not required
- * to be contiguous or span the whole video. If unspecified, each video is
- * treated as a single segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {Object} labelDetectionConfig
- * Config for LABEL_DETECTION.
- *
- * This object should have the same structure as [LabelDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig}
- *
- * @property {Object} shotChangeDetectionConfig
- * Config for SHOT_CHANGE_DETECTION.
- *
- * This object should have the same structure as [ShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig}
- *
- * @property {Object} explicitContentDetectionConfig
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig}
- *
- * @property {Object} faceDetectionConfig
- * Config for FACE_DETECTION.
- *
- * This object should have the same structure as [FaceDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig}
- *
- * @property {Object} speechTranscriptionConfig
- * Config for SPEECH_TRANSCRIPTION.
- *
- * This object should have the same structure as [SpeechTranscriptionConfig]{@link google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig}
- *
- * @property {Object} textDetectionConfig
- * Config for TEXT_DETECTION.
- *
- * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig}
- *
- * @property {Object} personDetectionConfig
- * Config for PERSON_DETECTION.
- *
- * This object should have the same structure as [PersonDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig}
- *
- * @property {Object} objectTrackingConfig
- * Config for OBJECT_TRACKING.
- *
- * This object should have the same structure as [ObjectTrackingConfig]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig}
- *
- * @typedef VideoContext
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const VideoContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for LABEL_DETECTION.
- *
- * @property {number} labelDetectionMode
- * What labels should be detected with LABEL_DETECTION, in addition to
- * video-level labels or segment-level labels.
- * If unspecified, defaults to `SHOT_MODE`.
- *
- * The number should be among the values of [LabelDetectionMode]{@link google.cloud.videointelligence.v1p3beta1.LabelDetectionMode}
- *
- * @property {boolean} stationaryCamera
- * Whether the video has been shot from a stationary (i.e. non-moving) camera.
- * When set to true, might improve detection accuracy for moving objects.
- * Should be used with `SHOT_AND_FRAME_MODE` enabled.
- *
- * @property {string} model
- * Model to use for label detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @property {number} frameConfidenceThreshold
- * The confidence threshold we perform filtering on the labels from
- * frame-level detection. If not set, it is set to 0.4 by default. The valid
- * range for this threshold is [0.1, 0.9]. Any value set outside of this
- * range will be clipped.
- * Note: for best results please follow the default threshold. We will update
- * the default threshold everytime when we release a new model.
- *
- * @property {number} videoConfidenceThreshold
- * The confidence threshold we perform filtering on the labels from
- * video-level and shot-level detections. If not set, it is set to 0.3 by
- * default. The valid range for this threshold is [0.1, 0.9]. Any value set
- * outside of this range will be clipped.
- * Note: for best results please follow the default threshold. We will update
- * the default threshold everytime when we release a new model.
- *
- * @typedef LabelDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const LabelDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SHOT_CHANGE_DETECTION.
- *
- * @property {string} model
- * Model to use for shot change detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ShotChangeDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ShotChangeDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for OBJECT_TRACKING.
- *
- * @property {string} model
- * Model to use for object tracking.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ObjectTrackingConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ObjectTrackingConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for EXPLICIT_CONTENT_DETECTION.
- *
- * @property {string} model
- * Model to use for explicit content detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef ExplicitContentDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ExplicitContentDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for FACE_DETECTION.
- *
- * @property {string} model
- * Model to use for face detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @property {boolean} includeBoundingBoxes
- * Whether bounding boxes be included in the face annotation output.
- *
- * @property {boolean} includeAttributes
- * Whether to enable face attributes detection, such as glasses, dark_glasses,
- * mouth_open etc. Ignored if 'include_bounding_boxes' is false.
- *
- * @typedef FaceDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const FaceDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for PERSON_DETECTION.
- *
- * @property {boolean} includeBoundingBoxes
- * Whether bounding boxes be included in the person detection annotation
- * output.
- *
- * @property {boolean} includePoseLandmarks
- * Whether to enable pose landmarks detection. Ignored if
- * 'include_bounding_boxes' is false.
- *
- * @property {boolean} includeAttributes
- * Whether to enable person attributes detection, such as cloth color (black,
- * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
- * color (black, blonde, etc), hair length (long, short, bald), etc.
- * Ignored if 'include_bounding_boxes' is false.
- *
- * @typedef PersonDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const PersonDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for TEXT_DETECTION.
- *
- * @property {string[]} languageHints
- * Language hint can be specified if the language to be detected is known a
- * priori. It can increase the accuracy of the detection. Language hint must
- * be language code in BCP-47 format.
- *
- * Automatic language detection is performed if no hint is provided.
- *
- * @property {string} model
- * Model to use for text detection.
- * Supported values: "builtin/stable" (the default if unset) and
- * "builtin/latest".
- *
- * @typedef TextDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.TextDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const TextDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment.
- *
- * @property {Object} startTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the start of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTimeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the end of the segment (inclusive).
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef VideoSegment
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const VideoSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for label detection.
- *
- * @property {Object} segment
- * Video segment where a label was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelSegment
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const LabelSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for label detection.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} confidence
- * Confidence that the label is accurate. Range: [0, 1].
- *
- * @typedef LabelFrame
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const LabelFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Detected entity from video analysis.
- *
- * @property {string} entityId
- * Opaque entity ID. Some IDs may be available in
- * [Google Knowledge Graph Search
- * API](https://developers.google.com/knowledge-graph/).
- *
- * @property {string} description
- * Textual description, e.g. `Fixed-gear bicycle`.
- *
- * @property {string} languageCode
- * Language code for `description` in BCP-47 format.
- *
- * @typedef Entity
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const Entity = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Label annotation.
- *
- * @property {Object} entity
- * Detected entity.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity}
- *
- * @property {Object[]} categoryEntities
- * Common categories for the detected entity.
- * E.g. when the label is `Terrier` the category is likely `dog`. And in some
- * cases there might be more than one categories e.g. `Terrier` could also be
- * a `pet`.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity}
- *
- * @property {Object[]} segments
- * All video segments where a label was detected.
- *
- * This object should have the same structure as [LabelSegment]{@link google.cloud.videointelligence.v1p3beta1.LabelSegment}
- *
- * @property {Object[]} frames
- * All video frames where a label was detected.
- *
- * This object should have the same structure as [LabelFrame]{@link google.cloud.videointelligence.v1p3beta1.LabelFrame}
- *
- * @typedef LabelAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const LabelAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for explicit content.
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video, corresponding to the
- * video frame for this location.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} pornographyLikelihood
- * Likelihood of the pornography content..
- *
- * The number should be among the values of [Likelihood]{@link google.cloud.videointelligence.v1p3beta1.Likelihood}
- *
- * @typedef ExplicitContentFrame
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ExplicitContentFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Explicit content annotation (based on per-frame visual signals only).
- * If no explicit content has been detected in a frame, no annotations are
- * present for that frame.
- *
- * @property {Object[]} frames
- * All video frames where explicit content was detected.
- *
- * This object should have the same structure as [ExplicitContentFrame]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame}
- *
- * @typedef ExplicitContentAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ExplicitContentAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding box.
- * The normalized vertex coordinates are relative to the original image.
- * Range: [0, 1].
- *
- * @property {number} left
- * Left X coordinate.
- *
- * @property {number} top
- * Top Y coordinate.
- *
- * @property {number} right
- * Right X coordinate.
- *
- * @property {number} bottom
- * Bottom Y coordinate.
- *
- * @typedef NormalizedBoundingBox
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const NormalizedBoundingBox = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * For tracking related features.
- * An object at time_offset with attributes, and located with
- * normalized_bounding_box.
- *
- * @property {Object} normalizedBoundingBox
- * Normalized Bounding box in a frame, where the object is located.
- *
- * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox}
- *
- * @property {Object} timeOffset
- * Time-offset, relative to the beginning of the video,
- * corresponding to the video frame for this object.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object[]} attributes
- * Optional. The attributes of the object in the bounding box.
- *
- * This object should have the same structure as [DetectedAttribute]{@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute}
- *
- * @property {Object[]} landmarks
- * Optional. The detected landmarks.
- *
- * This object should have the same structure as [DetectedLandmark]{@link google.cloud.videointelligence.v1p3beta1.DetectedLandmark}
- *
- * @typedef TimestampedObject
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.TimestampedObject definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const TimestampedObject = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A track of an object instance.
- *
- * @property {Object} segment
- * Video segment of a track.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {Object[]} timestampedObjects
- * The object with timestamp and attributes per frame in the track.
- *
- * This object should have the same structure as [TimestampedObject]{@link google.cloud.videointelligence.v1p3beta1.TimestampedObject}
- *
- * @property {Object[]} attributes
- * Optional. Attributes in the track level.
- *
- * This object should have the same structure as [DetectedAttribute]{@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute}
- *
- * @property {number} confidence
- * Optional. The confidence score of the tracked object.
- *
- * @typedef Track
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.Track definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const Track = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A generic detected attribute represented by name in string format.
- *
- * @property {string} name
- * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
- * A full list of supported type names will be provided in the document.
- *
- * @property {number} confidence
- * Detected attribute confidence. Range [0, 1].
- *
- * @property {string} value
- * Text value of the detection result. For example, the value for "HairColor"
- * can be "black", "blonde", etc.
- *
- * @typedef DetectedAttribute
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.DetectedAttribute definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const DetectedAttribute = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Celebrity definition.
- *
- * @property {string} name
- * The resource name of the celebrity. Have the format
- * `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
- * kg-mid is the id in Google knowledge graph, which is unique for the
- * celebrity.
- *
- * @property {string} displayName
- * The celebrity name.
- *
- * @property {string} description
- * Textual description of additional information about the celebrity, if
- * applicable.
- *
- * @typedef Celebrity
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.Celebrity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const Celebrity = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The annotation result of a celebrity face track. RecognizedCelebrity field
- * could be empty if the face track does not have any matched celebrities.
- *
- * @property {Object[]} celebrities
- * Top N match of the celebrities for the face in this track.
- *
- * This object should have the same structure as [RecognizedCelebrity]{@link google.cloud.videointelligence.v1p3beta1.RecognizedCelebrity}
- *
- * @property {Object} faceTrack
- * A track of a person's face.
- *
- * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track}
- *
- * @typedef CelebrityTrack
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.CelebrityTrack definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const CelebrityTrack = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * The recognized celebrity with confidence score.
- *
- * @property {Object} celebrity
- * The recognized celebrity.
- *
- * This object should have the same structure as [Celebrity]{@link google.cloud.videointelligence.v1p3beta1.Celebrity}
- *
- * @property {number} confidence
- * Recognition confidence. Range [0, 1].
- *
- * @typedef RecognizedCelebrity
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
- RecognizedCelebrity: {
- // This is for documentation. Actual contents will be loaded by gRPC.
- }
-};
-
-/**
- * Celebrity recognition annotation per video.
- *
- * @property {Object[]} celebrityTracks
- * The tracks detected from the input video, including recognized celebrities
- * and other detected faces in the video.
- *
- * This object should have the same structure as [CelebrityTrack]{@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack}
- *
- * @typedef CelebrityRecognitionAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const CelebrityRecognitionAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A generic detected landmark represented by name in string format and a 2D
- * location.
- *
- * @property {string} name
- * The name of this landmark, i.e. left_hand, right_shoulder.
- *
- * @property {Object} point
- * The 2D point of the detected landmark using the normalized image
- * coordindate system. The normalized coordinates have the range from 0 to 1.
- *
- * This object should have the same structure as [NormalizedVertex]{@link google.cloud.videointelligence.v1p3beta1.NormalizedVertex}
- *
- * @property {number} confidence
- * The confidence score of the detected landmark. Range [0, 1].
- *
- * @typedef DetectedLandmark
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.DetectedLandmark definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const DetectedLandmark = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Face detection annotation.
- *
- * @property {Object[]} tracks
- * The face tracks with attributes.
- *
- * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track}
- *
- * @property {Buffer} thumbnail
- * The thumbnail of a person's face.
- *
- * @typedef FaceDetectionAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const FaceDetectionAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Person detection annotation per video.
- *
- * @property {Object[]} tracks
- * The trackes that a person is detected.
- *
- * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track}
- *
- * @typedef PersonDetectionAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const PersonDetectionAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation results for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {Object} segment
- * Video segment on which the annotation is run.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {Object[]} segmentLabelAnnotations
- * Topical label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation}
- *
- * @property {Object[]} segmentPresenceLabelAnnotations
- * Presence label annotations on video level or user specified segment level.
- * There is exactly one element for each unique label. Compared to the
- * existing topical `segment_label_annotations`, this field presents more
- * fine-grained, segment-level labels detected in video content and is made
- * available only when the client sets `LabelDetectionConfig.model` to
- * "builtin/latest" in the request.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation}
- *
- * @property {Object[]} shotLabelAnnotations
- * Topical label annotations on shot level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation}
- *
- * @property {Object[]} shotPresenceLabelAnnotations
- * Presence label annotations on shot level. There is exactly one element for
- * each unique label. Compared to the existing topical
- * `shot_label_annotations`, this field presents more fine-grained, shot-level
- * labels detected in video content and is made available only when the client
- * sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation}
- *
- * @property {Object[]} frameLabelAnnotations
- * Label annotations on frame level.
- * There is exactly one element for each unique label.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation}
- *
- * @property {Object[]} faceDetectionAnnotations
- * Face detection annotations.
- *
- * This object should have the same structure as [FaceDetectionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation}
- *
- * @property {Object[]} shotAnnotations
- * Shot annotations. Each shot is represented as a video segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {Object} explicitAnnotation
- * Explicit content annotation.
- *
- * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation}
- *
- * @property {Object[]} speechTranscriptions
- * Speech transcription.
- *
- * This object should have the same structure as [SpeechTranscription]{@link google.cloud.videointelligence.v1p3beta1.SpeechTranscription}
- *
- * @property {Object[]} textAnnotations
- * OCR text detection and tracking.
- * Annotations for list of detected text snippets. Each will have list of
- * frame information associated with it.
- *
- * This object should have the same structure as [TextAnnotation]{@link google.cloud.videointelligence.v1p3beta1.TextAnnotation}
- *
- * @property {Object[]} objectAnnotations
- * Annotations for list of objects detected and tracked in video.
- *
- * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation}
- *
- * @property {Object[]} logoRecognitionAnnotations
- * Annotations for list of logos detected, tracked and recognized in video.
- *
- * This object should have the same structure as [LogoRecognitionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation}
- *
- * @property {Object[]} personDetectionAnnotations
- * Person detection annotations.
- *
- * This object should have the same structure as [PersonDetectionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation}
- *
- * @property {Object} celebrityRecognitionAnnotations
- * Celebrity recognition annotations.
- *
- * This object should have the same structure as [CelebrityRecognitionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation}
- *
- * @property {Object} error
- * If set, indicates an error. Note that for a single `AnnotateVideoRequest`
- * some videos may succeed and some may fail.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @typedef VideoAnnotationResults
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const VideoAnnotationResults = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation response. Included in the `response`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationResults
- * Annotation results for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationResults]{@link google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults}
- *
- * @typedef AnnotateVideoResponse
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const AnnotateVideoResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation progress for a single video.
- *
- * @property {string} inputUri
- * Video file location in
- * [Google Cloud Storage](https://cloud.google.com/storage/).
- *
- * @property {number} progressPercent
- * Approximate percentage processed thus far. Guaranteed to be
- * 100 when fully processed.
- *
- * @property {Object} startTime
- * Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} updateTime
- * Time of the most recent update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {number} feature
- * Specifies which feature is being tracked if the request contains more than
- * one features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p3beta1.Feature}
- *
- * @property {Object} segment
- * Specifies which segment is being tracked if the request contains more than
- * one segments.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @typedef VideoAnnotationProgress
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const VideoAnnotationProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation progress. Included in the `metadata`
- * field of the `Operation` returned by the `GetOperation`
- * call of the `google::longrunning::Operations` service.
- *
- * @property {Object[]} annotationProgress
- * Progress metadata for all videos specified in `AnnotateVideoRequest`.
- *
- * This object should have the same structure as [VideoAnnotationProgress]{@link google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress}
- *
- * @typedef AnnotateVideoProgress
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const AnnotateVideoProgress = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for SPEECH_TRANSCRIPTION.
- *
- * @property {string} languageCode
- * Required. *Required* The language of the supplied audio as a
- * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- * Example: "en-US".
- * See [Language Support](https://cloud.google.com/speech/docs/languages)
- * for a list of the currently supported language codes.
- *
- * @property {number} maxAlternatives
- * Optional. Maximum number of recognition hypotheses to be returned.
- * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- * within each `SpeechTranscription`. The server may return fewer than
- * `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
- * return a maximum of one. If omitted, will return a maximum of one.
- *
- * @property {boolean} filterProfanity
- * Optional. If set to `true`, the server will attempt to filter out
- * profanities, replacing all but the initial character in each filtered word
- * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- * won't be filtered out.
- *
- * @property {Object[]} speechContexts
- * Optional. A means to provide context to assist the speech recognition.
- *
- * This object should have the same structure as [SpeechContext]{@link google.cloud.videointelligence.v1p3beta1.SpeechContext}
- *
- * @property {boolean} enableAutomaticPunctuation
- * Optional. If 'true', adds punctuation to recognition result hypotheses.
- * This feature is only available in select languages. Setting this for
- * requests in other languages has no effect at all. The default 'false' value
- * does not add punctuation to result hypotheses. NOTE: "This is currently
- * offered as an experimental service, complimentary to all users. In the
- * future this may be exclusively available as a premium feature."
- *
- * @property {number[]} audioTracks
- * Optional. For file formats, such as MXF or MKV, supporting multiple audio
- * tracks, specify up to two tracks. Default: track 0.
- *
- * @property {boolean} enableSpeakerDiarization
- * Optional. If 'true', enables speaker detection for each recognized word in
- * the top alternative of the recognition result using a speaker_tag provided
- * in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- *
- * @property {number} diarizationSpeakerCount
- * Optional. If set, specifies the estimated number of speakers in the
- * conversation. If not set, defaults to '2'. Ignored unless
- * enable_speaker_diarization is set to true.
- *
- * @property {boolean} enableWordConfidence
- * Optional. If `true`, the top result includes a list of words and the
- * confidence for those words. If `false`, no word-level confidence
- * information is returned. The default is `false`.
- *
- * @typedef SpeechTranscriptionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const SpeechTranscriptionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides "hints" to the speech recognizer to favor specific words and phrases
- * in the results.
- *
- * @property {string[]} phrases
- * Optional. A list of strings containing words and phrases "hints" so that
- * the speech recognition is more likely to recognize them. This can be used
- * to improve the accuracy for specific words and phrases, for example, if
- * specific commands are typically spoken by the user. This can also be used
- * to add additional words to the vocabulary of the recognizer. See
- * [usage limits](https://cloud.google.com/speech/limits#content).
- *
- * @typedef SpeechContext
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const SpeechContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A speech recognition result corresponding to a portion of the audio.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the maximum specified
- * in `max_alternatives`). These alternatives are ordered in terms of
- * accuracy, with the top (first) alternative being the most probable, as
- * ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative}
- *
- * @property {string} languageCode
- * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
- * language tag of the language in this result. This language code was
- * detected to have the most likelihood of being spoken in the audio.
- *
- * @typedef SpeechTranscription
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.SpeechTranscription definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const SpeechTranscription = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Alternative hypotheses (a.k.a. n-best list).
- *
- * @property {string} transcript
- * Transcript text representing the words that the user spoke.
- *
- * @property {number} confidence
- * Output only. The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {Object[]} words
- * Output only. A list of word-specific information for each recognized word.
- * Note: When `enable_speaker_diarization` is true, you will see all the words
- * from the beginning of the audio.
- *
- * This object should have the same structure as [WordInfo]{@link google.cloud.videointelligence.v1p3beta1.WordInfo}
- *
- * @typedef SpeechRecognitionAlternative
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const SpeechRecognitionAlternative = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Word-specific information for recognized words. Word information is only
- * included in the response when certain request parameters are set, such
- * as `enable_word_time_offsets`.
- *
- * @property {Object} startTime
- * Time offset relative to the beginning of the audio, and
- * corresponding to the start of the spoken word. This field is only set if
- * `enable_word_time_offsets=true` and only in the top hypothesis. This is an
- * experimental feature and the accuracy of the time offset can vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTime
- * Time offset relative to the beginning of the audio, and
- * corresponding to the end of the spoken word. This field is only set if
- * `enable_word_time_offsets=true` and only in the top hypothesis. This is an
- * experimental feature and the accuracy of the time offset can vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {string} word
- * The word corresponding to this set of information.
- *
- * @property {number} confidence
- * Output only. The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {number} speakerTag
- * Output only. A distinct integer value is assigned for every speaker within
- * the audio. This field specifies which one of those speakers was detected to
- * have spoken this word. Value ranges from 1 up to diarization_speaker_count,
- * and is only set if speaker diarization is enabled.
- *
- * @typedef WordInfo
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.WordInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const WordInfo = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A vertex represents a 2D point in the image.
- * NOTE: the normalized vertex coordinates are relative to the original image
- * and range from 0 to 1.
- *
- * @property {number} x
- * X coordinate.
- *
- * @property {number} y
- * Y coordinate.
- *
- * @typedef NormalizedVertex
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.NormalizedVertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const NormalizedVertex = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Normalized bounding polygon for text (that might not be aligned with axis).
- * Contains list of the corner points in clockwise order starting from
- * top-left corner. For example, for a rectangular bounding box:
- * When the text is horizontal it might look like:
- * 0----1
- * | |
- * 3----2
- *
- * When it's clockwise rotated 180 degrees around the top-left corner it
- * becomes:
- * 2----3
- * | |
- * 1----0
- *
- * and the vertex order will still be (0, 1, 2, 3). Note that values can be less
- * than 0, or greater than 1 due to trignometric calculations for location of
- * the box.
- *
- * @property {Object[]} vertices
- * Normalized vertices of the bounding polygon.
- *
- * This object should have the same structure as [NormalizedVertex]{@link google.cloud.videointelligence.v1p3beta1.NormalizedVertex}
- *
- * @typedef NormalizedBoundingPoly
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const NormalizedBoundingPoly = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video segment level annotation results for text detection.
- *
- * @property {Object} segment
- * Video segment where a text snippet was detected.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {number} confidence
- * Confidence for the track of detected text. It is calculated as the highest
- * over all frames where OCR detected text appears.
- *
- * @property {Object[]} frames
- * Information related to the frames where OCR detected text appears.
- *
- * This object should have the same structure as [TextFrame]{@link google.cloud.videointelligence.v1p3beta1.TextFrame}
- *
- * @typedef TextSegment
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.TextSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const TextSegment = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotation results for text annotation (OCR).
- * Contains information regarding timestamp and bounding box locations for the
- * frames containing detected OCR text snippets.
- *
- * @property {Object} rotatedBoundingBox
- * Bounding polygon of the detected text for this frame.
- *
- * This object should have the same structure as [NormalizedBoundingPoly]{@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly}
- *
- * @property {Object} timeOffset
- * Timestamp of this frame.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef TextFrame
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.TextFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const TextFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotations related to one detected OCR text snippet. This will contain the
- * corresponding text, confidence value, and frame level information for each
- * detection.
- *
- * @property {string} text
- * The detected text.
- *
- * @property {Object[]} segments
- * All video segments where OCR detected text appears.
- *
- * This object should have the same structure as [TextSegment]{@link google.cloud.videointelligence.v1p3beta1.TextSegment}
- *
- * @typedef TextAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const TextAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video frame level annotations for object detection and tracking. This field
- * stores per frame location, time offset, and confidence.
- *
- * @property {Object} normalizedBoundingBox
- * The normalized bounding box location of this object track for the frame.
- *
- * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox}
- *
- * @property {Object} timeOffset
- * The timestamp of the frame in microseconds.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @typedef ObjectTrackingFrame
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ObjectTrackingFrame = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotations corresponding to one tracked object.
- *
- * @property {Object} entity
- * Entity to specify the object category that this track is labeled as.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity}
- *
- * @property {number} confidence
- * Object category's labeling confidence of this track.
- *
- * @property {Object[]} frames
- * Information corresponding to all frames where this object track appears.
- * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
- * messages in frames.
- * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
- *
- * This object should have the same structure as [ObjectTrackingFrame]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame}
- *
- * @property {Object} segment
- * Non-streaming batch mode ONLY.
- * Each object track corresponds to one video segment where it appears.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {number} trackId
- * Streaming mode ONLY.
- * In streaming mode, we do not know the end time of a tracked object
- * before it is completed. Hence, there is no VideoSegment info returned.
- * Instead, we provide a unique identifiable integer track_id so that
- * the customers can correlate the results of the ongoing
- * ObjectTrackAnnotation of the same track_id over time.
- *
- * @typedef ObjectTrackingAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const ObjectTrackingAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Annotation corresponding to one detected, tracked and recognized logo class.
- *
- * @property {Object} entity
- * Entity category information to specify the logo class that all the logo
- * tracks within this LogoRecognitionAnnotation are recognized as.
- *
- * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity}
- *
- * @property {Object[]} tracks
- * All logo tracks where the recognized logo appears. Each track corresponds
- * to one logo instance appearing in consecutive frames.
- *
- * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track}
- *
- * @property {Object[]} segments
- * All video segments where the recognized logo appears. There might be
- * multiple instances of the same logo class appearing in one VideoSegment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @typedef LogoRecognitionAnnotation
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const LogoRecognitionAnnotation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The top-level message sent by the client for the `StreamingAnnotateVideo`
- * method. Multiple `StreamingAnnotateVideoRequest` messages are sent.
- * The first message must only contain a `StreamingVideoConfig` message.
- * All subsequent messages must only contain `input_content` data.
- *
- * @property {Object} videoConfig
- * Provides information to the annotator, specifing how to process the
- * request. The first `AnnotateStreamingVideoRequest` message must only
- * contain a `video_config` message.
- *
- * This object should have the same structure as [StreamingVideoConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig}
- *
- * @property {Buffer} inputContent
- * The video data to be annotated. Chunks of video data are sequentially
- * sent in `StreamingAnnotateVideoRequest` messages. Except the initial
- * `StreamingAnnotateVideoRequest` message containing only
- * `video_config`, all subsequent `AnnotateStreamingVideoRequest`
- * messages must only contain `input_content` field.
- * Note: as with all bytes fields, protobuffers use a pure binary
- * representation (not base64).
- *
- * @typedef StreamingAnnotateVideoRequest
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingAnnotateVideoRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * `StreamingAnnotateVideoResponse` is the only message returned to the client
- * by `StreamingAnnotateVideo`. A series of zero or more
- * `StreamingAnnotateVideoResponse` messages are streamed back to the client.
- *
- * @property {Object} error
- * If set, returns a google.rpc.Status message that
- * specifies the error for the operation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} annotationResults
- * Streaming annotation results.
- *
- * This object should have the same structure as [StreamingVideoAnnotationResults]{@link google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults}
- *
- * @property {string} annotationResultsUri
- * GCS URI that stores annotation results of one streaming session.
- * It is a directory that can hold multiple files in JSON format.
- * Example uri format:
- * gs://bucket_id/object_id/cloud_project_name-session_id
- *
- * @typedef StreamingAnnotateVideoResponse
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingAnnotateVideoResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for STREAMING_AUTOML_CLASSIFICATION.
- *
- * @property {string} modelName
- * Resource name of AutoML model.
- * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
- *
- * @typedef StreamingAutomlClassificationConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingAutomlClassificationConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for STREAMING_AUTOML_OBJECT_TRACKING.
- *
- * @property {string} modelName
- * Resource name of AutoML model.
- * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
- *
- * @typedef StreamingAutomlObjectTrackingConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingAutomlObjectTrackingConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
- * @typedef StreamingExplicitContentDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingExplicitContentDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for STREAMING_LABEL_DETECTION.
- *
- * @property {boolean} stationaryCamera
- * Whether the video has been captured from a stationary (i.e. non-moving)
- * camera. When set to true, might improve detection accuracy for moving
- * objects. Default: false.
- *
- * @typedef StreamingLabelDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingLabelDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for STREAMING_OBJECT_TRACKING.
- * @typedef StreamingObjectTrackingConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingObjectTrackingConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for STREAMING_SHOT_CHANGE_DETECTION.
- * @typedef StreamingShotChangeDetectionConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingShotChangeDetectionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Config for streaming storage option.
- *
- * @property {boolean} enableStorageAnnotationResult
- * Enable streaming storage. Default: false.
- *
- * @property {string} annotationResultStorageDirectory
- * GCS URI to store all annotation results for one client. Client should
- * specify this field as the top-level storage directory. Annotation results
- * of different sessions will be put into different sub-directories denoted
- * by project_name and session_id. All sub-directories will be auto generated
- * by program and will be made accessible to client in response proto.
- * URIs must be specified in the following format: `gs://bucket-id/object-id`
- * `bucket-id` should be a valid GCS bucket created by client and bucket
- * permission shall also be configured properly. `object-id` can be arbitrary
- * string that make sense to client. Other URI formats will return error and
- * cause GCS write failure.
- *
- * @typedef StreamingStorageConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingStorageConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Streaming annotation results corresponding to a portion of the video
- * that is currently being processed.
- *
- * @property {Object[]} shotAnnotations
- * Shot annotation results. Each shot is represented as a video segment.
- *
- * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment}
- *
- * @property {Object[]} labelAnnotations
- * Label annotation results.
- *
- * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation}
- *
- * @property {Object} explicitAnnotation
- * Explicit content annotation results.
- *
- * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation}
- *
- * @property {Object[]} objectAnnotations
- * Object tracking results.
- *
- * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation}
- *
- * @typedef StreamingVideoAnnotationResults
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingVideoAnnotationResults = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides information to the annotator that specifies how to process the
- * request.
- *
- * @property {number} feature
- * Requested annotation feature.
- *
- * The number should be among the values of [StreamingFeature]{@link google.cloud.videointelligence.v1p3beta1.StreamingFeature}
- *
- * @property {Object} shotChangeDetectionConfig
- * Config for STREAMING_SHOT_CHANGE_DETECTION.
- *
- * This object should have the same structure as [StreamingShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig}
- *
- * @property {Object} labelDetectionConfig
- * Config for STREAMING_LABEL_DETECTION.
- *
- * This object should have the same structure as [StreamingLabelDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig}
- *
- * @property {Object} explicitContentDetectionConfig
- * Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
- *
- * This object should have the same structure as [StreamingExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig}
- *
- * @property {Object} objectTrackingConfig
- * Config for STREAMING_OBJECT_TRACKING.
- *
- * This object should have the same structure as [StreamingObjectTrackingConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig}
- *
- * @property {Object} automlClassificationConfig
- * Config for STREAMING_AUTOML_CLASSIFICATION.
- *
- * This object should have the same structure as [StreamingAutomlClassificationConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig}
- *
- * @property {Object} automlObjectTrackingConfig
- * Config for STREAMING_AUTOML_OBJECT_TRACKING.
- *
- * This object should have the same structure as [StreamingAutomlObjectTrackingConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig}
- *
- * @property {Object} storageConfig
- * Streaming storage option. By default: storage is disabled.
- *
- * This object should have the same structure as [StreamingStorageConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig}
- *
- * @typedef StreamingVideoConfig
- * @memberof google.cloud.videointelligence.v1p3beta1
- * @see [google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto}
- */
-const StreamingVideoConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Video annotation feature.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p3beta1
- */
-const Feature = {
-
- /**
- * Unspecified.
- */
- FEATURE_UNSPECIFIED: 0,
-
- /**
- * Label detection. Detect objects, such as dog or flower.
- */
- LABEL_DETECTION: 1,
-
- /**
- * Shot change detection.
- */
- SHOT_CHANGE_DETECTION: 2,
-
- /**
- * Explicit content detection.
- */
- EXPLICIT_CONTENT_DETECTION: 3,
-
- /**
- * Human face detection.
- */
- FACE_DETECTION: 4,
-
- /**
- * Speech transcription.
- */
- SPEECH_TRANSCRIPTION: 6,
-
- /**
- * OCR text detection and tracking.
- */
- TEXT_DETECTION: 7,
-
- /**
- * Object detection and tracking.
- */
- OBJECT_TRACKING: 9,
-
- /**
- * Logo detection, tracking, and recognition.
- */
- LOGO_RECOGNITION: 12,
-
- /**
- * Celebrity recognition.
- */
- CELEBRITY_RECOGNITION: 13,
-
- /**
- * Person detection.
- */
- PERSON_DETECTION: 14
-};
-
-/**
- * Label detection mode.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p3beta1
- */
-const LabelDetectionMode = {
-
- /**
- * Unspecified.
- */
- LABEL_DETECTION_MODE_UNSPECIFIED: 0,
-
- /**
- * Detect shot-level labels.
- */
- SHOT_MODE: 1,
-
- /**
- * Detect frame-level labels.
- */
- FRAME_MODE: 2,
-
- /**
- * Detect both shot-level and frame-level labels.
- */
- SHOT_AND_FRAME_MODE: 3
-};
-
-/**
- * Bucketized representation of likelihood.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p3beta1
- */
-const Likelihood = {
-
- /**
- * Unspecified likelihood.
- */
- LIKELIHOOD_UNSPECIFIED: 0,
-
- /**
- * Very unlikely.
- */
- VERY_UNLIKELY: 1,
-
- /**
- * Unlikely.
- */
- UNLIKELY: 2,
-
- /**
- * Possible.
- */
- POSSIBLE: 3,
-
- /**
- * Likely.
- */
- LIKELY: 4,
-
- /**
- * Very likely.
- */
- VERY_LIKELY: 5
-};
-
-/**
- * Streaming video annotation feature.
- *
- * @enum {number}
- * @memberof google.cloud.videointelligence.v1p3beta1
- */
-const StreamingFeature = {
-
- /**
- * Unspecified.
- */
- STREAMING_FEATURE_UNSPECIFIED: 0,
-
- /**
- * Label detection. Detect objects, such as dog or flower.
- */
- STREAMING_LABEL_DETECTION: 1,
-
- /**
- * Shot change detection.
- */
- STREAMING_SHOT_CHANGE_DETECTION: 2,
-
- /**
- * Explicit content detection.
- */
- STREAMING_EXPLICIT_CONTENT_DETECTION: 3,
-
- /**
- * Object detection and tracking.
- */
- STREAMING_OBJECT_TRACKING: 4,
-
- /**
- * Video classification based on AutoML model.
- */
- STREAMING_AUTOML_CLASSIFICATION: 21,
-
- /**
- * Object detection and tracking based on AutoML model.
- */
- STREAMING_AUTOML_OBJECT_TRACKING: 22
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 099e418d620..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should be a resource name ending with `operations/{unique_id}`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js
deleted file mode 100644
index 813682aa336..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index bd4b4ee6067..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js
deleted file mode 100644
index 750e0af7689..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). Each `Status` message contains
- * three pieces of data: error code, error message, and error details.
- *
- * You can find out more about this error model and how to work with it in the
- * [API Design Guide](https://cloud.google.com/apis/design/errors).
- *
- * @property {number} code
- * The status code, which should be an enum value of google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/index.js b/packages/google-cloud-videointelligence/src/v1p3beta1/index.js
deleted file mode 100644
index 612cf0bc4a0..00000000000
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/index.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-'use strict';
-
-const VideoIntelligenceServiceClient = require('./video_intelligence_service_client');
-const StreamingVideoIntelligenceServiceClient = require('./streaming_video_intelligence_service_client');
-
-module.exports.VideoIntelligenceServiceClient = VideoIntelligenceServiceClient;
-module.exports.StreamingVideoIntelligenceServiceClient = StreamingVideoIntelligenceServiceClient;
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/index.ts b/packages/google-cloud-videointelligence/src/v1p3beta1/index.ts
new file mode 100644
index 00000000000..f24651ba907
--- /dev/null
+++ b/packages/google-cloud-videointelligence/src/v1p3beta1/index.ts
@@ -0,0 +1,20 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+export {StreamingVideoIntelligenceServiceClient} from './streaming_video_intelligence_service_client';
+export {VideoIntelligenceServiceClient} from './video_intelligence_service_client';
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.ts
similarity index 62%
rename from packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js
rename to packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.ts
index 7f525168f77..02017302bb7 100644
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js
+++ b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,22 +11,38 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+} from 'google-gax';
+import * as path from 'path';
-const gapicConfig = require('./streaming_video_intelligence_service_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './streaming_video_intelligence_service_client_config.json';
-const VERSION = require('../../package.json').version;
+const version = require('../../../package.json').version;
/**
- * Service that implements streaming Google Cloud Video Intelligence API.
- *
+ * Service that implements streaming Google Cloud Video Intelligence API.
* @class
* @memberof v1p3beta1
*/
-class StreamingVideoIntelligenceServiceClient {
+export class StreamingVideoIntelligenceServiceClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+ streamingVideoIntelligenceServiceStub: Promise<{[name: string]: Function}>;
+
/**
* Construct an instance of StreamingVideoIntelligenceServiceClient.
*
@@ -54,58 +70,57 @@ class StreamingVideoIntelligenceServiceClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this
+ .constructor as typeof StreamingVideoIntelligenceServiceClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this
+ .constructor as typeof StreamingVideoIntelligenceServiceClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -132,8 +147,8 @@ class StreamingVideoIntelligenceServiceClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -144,35 +159,51 @@ class StreamingVideoIntelligenceServiceClient {
// Put together the "service stub" for
// google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService.
- const streamingVideoIntelligenceServiceStub = gaxGrpc.createStub(
+ this.streamingVideoIntelligenceServiceStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService(
+ ? (protos as protobuf.Root).lookupService(
'google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService'
)
- : protos.google.cloud.videointelligence.v1p3beta1
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.videointelligence.v1p3beta1
.StreamingVideoIntelligenceService,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
const streamingVideoIntelligenceServiceStubMethods = [
'streamingAnnotateVideo',
];
+
for (const methodName of streamingVideoIntelligenceServiceStubMethods) {
- const innerCallPromise = streamingVideoIntelligenceServiceStub.then(
- stub => (...args) => {
+ const innerCallPromise = this.streamingVideoIntelligenceServiceStub.then(
+ stub => (...args: Array<{}>) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.stream[methodName]
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
+ this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -206,13 +237,21 @@ class StreamingVideoIntelligenceServiceClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
@@ -224,34 +263,29 @@ class StreamingVideoIntelligenceServiceClient {
* while sending video/audio bytes.
* This method is only available via the gRPC API (not REST).
*
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Stream}
* An object stream which is both readable and writable. It accepts objects
* representing [StreamingAnnotateVideoRequest]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest} for write() method, and
* will emit objects representing [StreamingAnnotateVideoResponse]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse} on 'data' event asynchronously.
- *
- * @example
- *
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * const client = new videoIntelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient({
- * // optional auth parameters.
- * });
- *
- * const stream = client.streamingAnnotateVideo().on('data', response => {
- * // doThingsWith(response)
- * });
- * const request = {};
- * // Write request objects.
- * stream.write(request);
*/
- streamingAnnotateVideo(options) {
- options = options || {};
-
+ streamingAnnotateVideo(options?: gax.CallOptions): gax.CancellableStream {
return this._innerApiCalls.streamingAnnotateVideo(options);
}
-}
-module.exports = StreamingVideoIntelligenceServiceClient;
+ /**
+ * Terminate the GRPC channel and close the client.
+ *
+ * The client will no longer be usable and all future behavior is undefined.
+ */
+ close(): Promise {
+ if (!this._terminated) {
+ return this.streamingVideoIntelligenceServiceStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
+ }
+}
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json
index 7638632a36e..b569dbe81f9 100644
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json
+++ b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json
@@ -2,21 +2,21 @@
"interfaces": {
"google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
- "initial_rpc_timeout_millis": 10800000,
- "rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 10800000,
- "total_timeout_millis": 10800000
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
}
},
"methods": {
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.ts
similarity index 55%
rename from packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js
rename to packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.ts
index 166d89d1560..afeef9058a9 100644
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js
+++ b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,22 +11,40 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
-const gapicConfig = require('./video_intelligence_service_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './video_intelligence_service_client_config.json';
-const VERSION = require('../../package.json').version;
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Video Intelligence API.
- *
+ * Service that implements Google Cloud Video Intelligence API.
* @class
* @memberof v1p3beta1
*/
-class VideoIntelligenceServiceClient {
+export class VideoIntelligenceServiceClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+ operationsClient: gax.OperationsClient;
+ videoIntelligenceServiceStub: Promise<{[name: string]: Function}>;
+
/**
* Construct an instance of VideoIntelligenceServiceClient.
*
@@ -54,58 +72,57 @@ class VideoIntelligenceServiceClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this
+ .constructor as typeof VideoIntelligenceServiceClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this
+ .constructor as typeof VideoIntelligenceServiceClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -121,24 +138,25 @@ class VideoIntelligenceServiceClient {
opts.fallback ? require('../../protos/protos.json') : nodejsProtoPath
);
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ this.operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const annotateVideoResponse = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse'
- );
+ '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse'
+ ) as gax.protobuf.Type;
const annotateVideoMetadata = protoFilesRoot.lookup(
- 'google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress'
- );
+ '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
annotateVideo: new gaxModule.LongrunningDescriptor(
@@ -151,8 +169,8 @@ class VideoIntelligenceServiceClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -163,33 +181,49 @@ class VideoIntelligenceServiceClient {
// Put together the "service stub" for
// google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService.
- const videoIntelligenceServiceStub = gaxGrpc.createStub(
+ this.videoIntelligenceServiceStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService(
+ ? (protos as protobuf.Root).lookupService(
'google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService'
)
- : protos.google.cloud.videointelligence.v1p3beta1
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.videointelligence.v1p3beta1
.VideoIntelligenceService,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
const videoIntelligenceServiceStubMethods = ['annotateVideo'];
+
for (const methodName of videoIntelligenceServiceStubMethods) {
- const innerCallPromise = videoIntelligenceServiceStub.then(
- stub => (...args) => {
+ const innerCallPromise = this.videoIntelligenceServiceStub.then(
+ stub => (...args: Array<{}>) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.longrunning[methodName]
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
+ this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -223,19 +257,52 @@ class VideoIntelligenceServiceClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous video annotation. Progress and results can be
* retrieved through the `google.longrunning.Operations` interface.
@@ -244,141 +311,96 @@ class VideoIntelligenceServiceClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {string} [request.inputUri]
+ * @param {string} request.inputUri
* Input video location. Currently, only
* [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
* supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
* URI may include wildcards in `object-id`, and thus identify multiple
* videos. Supported wildcards: '*' to match 0 or more characters;
* '?' to match 1 character. If unset, the input video should be embedded
* in the request as `input_content`. If set, `input_content` should be unset.
- * @param {Buffer} [request.inputContent]
+ * @param {Buffer} request.inputContent
* The video data bytes.
* If unset, the input video(s) should be specified via `input_uri`.
* If set, `input_uri` should be unset.
- * @param {number[]} [request.features]
+ * @param {number[]} request.features
* Required. Requested video annotation features.
- *
- * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p3beta1.Feature}
- * @param {Object} [request.videoContext]
+ * @param {google.cloud.videointelligence.v1p3beta1.VideoContext} request.videoContext
* Additional video context and/or feature-specific parameters.
- *
- * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p3beta1.VideoContext}
* @param {string} [request.outputUri]
* Optional. Location where the output (in JSON format) should be stored.
* Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
* URIs are supported, which must be specified in the following format:
* `gs://bucket-id/object-id` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For
- * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
* @param {string} [request.locationId]
* Optional. Cloud region where annotation should take place. Supported cloud
* regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
* is specified, a region will be determined based on video file location.
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const videoIntelligence = require('@google-cloud/video-intelligence');
- *
- * const client = new videoIntelligence.v1p3beta1.VideoIntelligenceServiceClient({
- * // optional auth parameters.
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.annotateVideo(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- * const featuresElement = 'LABEL_DETECTION';
- * const features = [featuresElement];
- * const request = {
- * inputUri: inputUri,
- * features: features,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.annotateVideo(request);
- *
- * const [response] = await operation.promise();
*/
- annotateVideo(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ annotateVideo(
+ request: protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoResponse,
+ protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoProgress
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.annotateVideo(request, options, callback);
}
-}
-module.exports = VideoIntelligenceServiceClient;
+ /**
+ * Terminate the GRPC channel and close the client.
+ *
+ * The client will no longer be usable and all future behavior is undefined.
+ */
+ close(): Promise {
+ if (!this._terminated) {
+ return this.videoIntelligenceServiceStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
+ }
+}
diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json
index 2023e52ad84..c9796e48a4a 100644
--- a/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json
+++ b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json
@@ -2,20 +2,29 @@
"interfaces": {
"google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
+ },
+ "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
- "rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1,
+ "max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000
}
},
@@ -23,7 +32,7 @@
"AnnotateVideo": {
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
- "retry_params_name": "default"
+ "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3"
}
}
}
diff --git a/packages/google-cloud-videointelligence/synth.metadata b/packages/google-cloud-videointelligence/synth.metadata
index 7a2d40faed4..b8c94543e20 100644
--- a/packages/google-cloud-videointelligence/synth.metadata
+++ b/packages/google-cloud-videointelligence/synth.metadata
@@ -1,33 +1,13 @@
{
- "updateTime": "2020-02-13T20:34:22.826443Z",
+ "updateTime": "2020-03-05T22:26:31.988229Z",
"sources": [
- {
- "git": {
- "name": ".",
- "remote": "https://github.com/googleapis/nodejs-video-intelligence.git",
- "sha": "eb9b400c24bdf306d8263ec402922b3235754034"
- }
- },
- {
- "git": {
- "name": "synthtool",
- "remote": "https://github.com/googleapis/synthtool.git",
- "sha": "f320a85654b7c8eedcce5f66e7ccc4fbf8fae6ac"
- }
- },
- {
- "generator": {
- "name": "artman",
- "version": "0.45.0",
- "dockerImage": "googleapis/artman@sha256:6aec9c34db0e4be221cdaf6faba27bdc07cfea846808b3d3b964dfce3a9a0f9b"
- }
- },
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "56b55aa8818cd0a532a7d779f6ef337ba809ccbd",
- "internalRef": "294931650"
+ "sha": "f0b581b5bdf803e45201ecdb3688b60e381628a8",
+ "internalRef": "299181282",
+ "log": "f0b581b5bdf803e45201ecdb3688b60e381628a8\nfix: recommendationengine/v1beta1 update some comments\n\nPiperOrigin-RevId: 299181282\n\n10e9a0a833dc85ff8f05b2c67ebe5ac785fe04ff\nbuild: add generated BUILD file for Routes Preferred API\n\nPiperOrigin-RevId: 299164808\n\n86738c956a8238d7c77f729be78b0ed887a6c913\npublish v1p1beta1: update with absolute address in comments\n\nPiperOrigin-RevId: 299152383\n\n73d9f2ad4591de45c2e1f352bc99d70cbd2a6d95\npublish v1: update with absolute address in comments\n\nPiperOrigin-RevId: 299147194\n\nd2158f24cb77b0b0ccfe68af784c6a628705e3c6\npublish v1beta2: update with absolute address in comments\n\nPiperOrigin-RevId: 299147086\n\n7fca61292c11b4cd5b352cee1a50bf88819dd63b\npublish v1p2beta1: update with absolute address in comments\n\nPiperOrigin-RevId: 299146903\n\n583b7321624736e2c490e328f4b1957335779295\npublish v1p3beta1: update with absolute address in comments\n\nPiperOrigin-RevId: 299146674\n\n638253bf86d1ce1c314108a089b7351440c2f0bf\nfix: add java_multiple_files option for automl text_sentiment.proto\n\nPiperOrigin-RevId: 298971070\n\n373d655703bf914fb8b0b1cc4071d772bac0e0d1\nUpdate Recs AI Beta public bazel file\n\nPiperOrigin-RevId: 298961623\n\ndcc5d00fc8a8d8b56f16194d7c682027b2c66a3b\nfix: add java_multiple_files option for automl classification.proto\n\nPiperOrigin-RevId: 298953301\n\na3f791827266f3496a6a5201d58adc4bb265c2a3\nchore: automl/v1 publish annotations and retry config\n\nPiperOrigin-RevId: 298942178\n\n01c681586d8d6dbd60155289b587aee678530bd9\nMark return_immediately in PullRequest deprecated.\n\nPiperOrigin-RevId: 298893281\n\nc9f5e9c4bfed54bbd09227e990e7bded5f90f31c\nRemove out of date documentation for predicate support on the Storage API\n\nPiperOrigin-RevId: 298883309\n\nfd5b3b8238d783b04692a113ffe07c0363f5de0f\ngenerate webrisk v1 proto\n\nPiperOrigin-RevId: 298847934\n\n"
}
},
{
@@ -44,9 +24,8 @@
"source": "googleapis",
"apiName": "video-intelligence",
"apiVersion": "v1",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/videointelligence/artman_videointelligence_v1.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
},
{
@@ -54,9 +33,8 @@
"source": "googleapis",
"apiName": "video-intelligence",
"apiVersion": "v1beta2",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
},
{
@@ -64,9 +42,8 @@
"source": "googleapis",
"apiName": "video-intelligence",
"apiVersion": "v1p1beta1",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/videointelligence/artman_videointelligence_v1p1beta1.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
},
{
@@ -74,9 +51,8 @@
"source": "googleapis",
"apiName": "video-intelligence",
"apiVersion": "v1p2beta1",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/videointelligence/artman_videointelligence_v1p2beta1.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
},
{
@@ -84,9 +60,8 @@
"source": "googleapis",
"apiName": "video-intelligence",
"apiVersion": "v1p3beta1",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/videointelligence/artman_videointelligence_v1p3beta1.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
}
]
diff --git a/packages/google-cloud-videointelligence/synth.py b/packages/google-cloud-videointelligence/synth.py
index 949058bdda0..fe5f04950e2 100644
--- a/packages/google-cloud-videointelligence/synth.py
+++ b/packages/google-cloud-videointelligence/synth.py
@@ -5,30 +5,28 @@
logging.basicConfig(level=logging.DEBUG)
-gapic = gcp.GAPICGenerator()
+gapic = gcp.GAPICMicrogenerator()
common_templates = gcp.CommonTemplates()
versions = ["v1", "v1beta2", "v1p1beta1", "v1p2beta1", "v1p3beta1"]
for version in versions:
- library = gapic.node_library(
+ library = gapic.typescript_library(
"video-intelligence",
version,
- config_path="/google/cloud/videointelligence/"
- f"artman_videointelligence_{version}.yaml",
+ generator_args={
+ "grpc-service-config": f"google/cloud/videointelligence/{version}/videointelligence_grpc_service_config.json",
+ "package-name": f"@google-cloud/video-intelligence",
+ "main-service": f"videointelligence",
+ },
+ proto_path=f'/google/cloud/videointelligence/{version}',
)
# skip index, protos, package.json, and README.md
- s.copy(library, excludes=["package.json", "README.md", "src/index.js", "smoke-test/video_intelligence_service_smoke_test.js"])
+ s.copy(library, excludes=["package.json", "README.md", "src/index.ts",
+ "smoke-test/video_intelligence_service_smoke_test.ts"])
-#
-# Generator emitted unused helper mockSimpleGrpcMethod, add a temporary
-# s.replace to remove that function.
-# ref: https://github.com/googleapis/gapic-generator/issues/2120
-#
-s.replace("test/gapic-*.js", "function mockSimpleGrpcMethod.*\n(.*\n)*?}\n", "")
-
-templates = common_templates.node_library()
+templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# Node.js specific cleanup
diff --git a/packages/google-cloud-videointelligence/system-test/fixtures/sample/src/index.js b/packages/google-cloud-videointelligence/system-test/fixtures/sample/src/index.js
new file mode 100644
index 00000000000..efa1f2db1a6
--- /dev/null
+++ b/packages/google-cloud-videointelligence/system-test/fixtures/sample/src/index.js
@@ -0,0 +1,27 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+/* eslint-disable node/no-missing-require, no-unused-vars */
+const videointelligence = require('@google-cloud/video-intelligence');
+
+function main() {
+ const streamingVideoIntelligenceServiceClient = new videointelligence.StreamingVideoIntelligenceServiceClient();
+ const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient();
+}
+
+main();
diff --git a/packages/google-cloud-videointelligence/system-test/fixtures/sample/src/index.ts b/packages/google-cloud-videointelligence/system-test/fixtures/sample/src/index.ts
new file mode 100644
index 00000000000..479e287cb43
--- /dev/null
+++ b/packages/google-cloud-videointelligence/system-test/fixtures/sample/src/index.ts
@@ -0,0 +1,26 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import {StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence';
+
+function main() {
+ const streamingVideoIntelligenceServiceClient = new StreamingVideoIntelligenceServiceClient();
+ const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient();
+}
+
+main();
diff --git a/packages/google-cloud-videointelligence/system-test/install.ts b/packages/google-cloud-videointelligence/system-test/install.ts
new file mode 100644
index 00000000000..c9aa74ec221
--- /dev/null
+++ b/packages/google-cloud-videointelligence/system-test/install.ts
@@ -0,0 +1,51 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import {packNTest} from 'pack-n-play';
+import {readFileSync} from 'fs';
+import {describe, it} from 'mocha';
+
+describe('typescript consumer tests', () => {
+ it('should have correct type signature for typescript users', async function() {
+ this.timeout(300000);
+ const options = {
+ packageDir: process.cwd(), // path to your module.
+ sample: {
+ description: 'typescript based user can use the type definitions',
+ ts: readFileSync(
+ './system-test/fixtures/sample/src/index.ts'
+ ).toString(),
+ },
+ };
+ await packNTest(options); // will throw upon error.
+ });
+
+ it('should have correct type signature for javascript users', async function() {
+ this.timeout(300000);
+ const options = {
+ packageDir: process.cwd(), // path to your module.
+ sample: {
+ description: 'typescript based user can use the type definitions',
+ ts: readFileSync(
+ './system-test/fixtures/sample/src/index.js'
+ ).toString(),
+ },
+ };
+ await packNTest(options); // will throw upon error.
+ });
+});
diff --git a/packages/google-cloud-videointelligence/system-test/video_intelligence_service_smoke_test.js b/packages/google-cloud-videointelligence/system-test/video_intelligence_service_smoke_test.ts
similarity index 89%
rename from packages/google-cloud-videointelligence/system-test/video_intelligence_service_smoke_test.js
rename to packages/google-cloud-videointelligence/system-test/video_intelligence_service_smoke_test.ts
index ebe48556bd2..be6d69d3584 100644
--- a/packages/google-cloud-videointelligence/system-test/video_intelligence_service_smoke_test.js
+++ b/packages/google-cloud-videointelligence/system-test/video_intelligence_service_smoke_test.ts
@@ -12,9 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-'use strict';
-
-const {describe, it} = require('mocha');
+import {describe, it} from 'mocha';
+import {Operation} from 'google-gax';
describe('VideoIntelligenceServiceSmokeTest', () => {
it('successfully makes a call to the service', done => {
@@ -30,14 +29,14 @@ describe('VideoIntelligenceServiceSmokeTest', () => {
const featuresElement = 'LABEL_DETECTION';
const features = [featuresElement];
const request = {
- inputUri: inputUri,
- features: features,
+ inputUri,
+ features,
};
// Handle the operation using the promise pattern.
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: Operation[]) => {
const operation = responses[0];
const initialApiResponse = responses[1];
console.log(operation);
@@ -46,7 +45,7 @@ describe('VideoIntelligenceServiceSmokeTest', () => {
// Operation#promise starts polling for the completion of the LRO.
return operation.promise();
})
- .then(responses => {
+ .then((responses: Operation[]) => {
// The final result of the operation.
const result = responses[0];
@@ -77,25 +76,25 @@ describe('VideoIntelligenceServiceSmokeTest', () => {
const featuresElement = 'LABEL_DETECTION';
const features = [featuresElement];
const request = {
- inputUri: inputUri,
- features: features,
+ inputUri,
+ features,
};
// Handle the operation using the event emitter pattern.
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: Operation[]) => {
const operation = responses[0];
// Adding a listener for the "complete" event starts polling for the
// completion of the operation.
- operation.on('complete', result => {
+ operation.on('complete', (result: string) => {
console.log(result);
});
// Adding a listener for the "progress" event causes the callback to be
// called on any change in metadata when the operation is polled.
- operation.on('progress', metadata => {
+ operation.on('progress', (metadata: string) => {
console.log(metadata);
});
diff --git a/packages/google-cloud-videointelligence/test/gapic-streaming_video_intelligence_service-v1p3beta1.ts b/packages/google-cloud-videointelligence/test/gapic-streaming_video_intelligence_service-v1p3beta1.ts
new file mode 100644
index 00000000000..c04259d5cbb
--- /dev/null
+++ b/packages/google-cloud-videointelligence/test/gapic-streaming_video_intelligence_service-v1p3beta1.ts
@@ -0,0 +1,157 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
+import {describe, it} from 'mocha';
+const streamingvideointelligenceserviceModule = require('../src');
+
+import {PassThrough} from 'stream';
+
+const FAKE_STATUS_CODE = 1;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
+
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockBidiStreamingGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error: FakeError | null
+) {
+ return () => {
+ const mockStream = new PassThrough({
+ objectMode: true,
+ transform: (chunk: {}, enc: {}, callback: Callback) => {
+ assert.deepStrictEqual(chunk, expectedRequest);
+ if (error) {
+ callback(error);
+ } else {
+ callback(null, response);
+ }
+ },
+ });
+ return mockStream;
+ };
+}
+describe('v1p3beta1.StreamingVideoIntelligenceServiceClient', () => {
+ it('has servicePath', () => {
+ const servicePath =
+ streamingvideointelligenceserviceModule.v1p3beta1
+ .StreamingVideoIntelligenceServiceClient.servicePath;
+ assert(servicePath);
+ });
+ it('has apiEndpoint', () => {
+ const apiEndpoint =
+ streamingvideointelligenceserviceModule.v1p3beta1
+ .StreamingVideoIntelligenceServiceClient.apiEndpoint;
+ assert(apiEndpoint);
+ });
+ it('has port', () => {
+ const port =
+ streamingvideointelligenceserviceModule.v1p3beta1
+ .StreamingVideoIntelligenceServiceClient.port;
+ assert(port);
+ assert(typeof port === 'number');
+ });
+ it('should create a client with no option', () => {
+ const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient();
+ assert(client);
+ });
+ it('should create a client with gRPC fallback', () => {
+ const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(
+ {
+ fallback: true,
+ }
+ );
+ assert(client);
+ });
+ describe('streamingAnnotateVideo', () => {
+ it('invokes streamingAnnotateVideo without error', done => {
+ const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(
+ {
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ }
+ );
+ // Mock request
+ const request: protosTypes.google.cloud.videointelligence.v1p3beta1.IStreamingAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
+ client._innerApiCalls.streamingAnnotateVideo = mockBidiStreamingGrpcMethod(
+ request,
+ expectedResponse,
+ null
+ );
+ const stream = client
+ .streamingAnnotateVideo()
+ .on('data', (response: {}) => {
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ })
+ .on('error', (err: FakeError) => {
+ done(err);
+ });
+ stream.write(request);
+ });
+ it('invokes streamingAnnotateVideo with error', done => {
+ const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(
+ {
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ }
+ );
+ // Mock request
+ const request: protosTypes.google.cloud.videointelligence.v1p3beta1.IStreamingAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
+ client._innerApiCalls.streamingAnnotateVideo = mockBidiStreamingGrpcMethod(
+ request,
+ null,
+ error
+ );
+ const stream = client
+ .streamingAnnotateVideo()
+ .on('data', () => {
+ assert.fail();
+ })
+ .on('error', (err: FakeError) => {
+ assert(err instanceof FakeError);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ stream.write(request);
+ });
+ });
+});
diff --git a/packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js b/packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js
deleted file mode 100644
index 5d149f65233..00000000000
--- a/packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-'use strict';
-
-const assert = require('assert');
-const {describe, it} = require('mocha');
-const {PassThrough} = require('stream');
-
-const videoIntelligenceModule = require('../src');
-
-const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
-
-describe('VideoIntelligenceServiceClient', () => {
- it('has servicePath', () => {
- const servicePath =
- videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient
- .servicePath;
- assert(servicePath);
- });
-
- it('has apiEndpoint', () => {
- const apiEndpoint =
- videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient
- .apiEndpoint;
- assert(apiEndpoint);
- });
-
- it('has port', () => {
- const port =
- videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient.port;
- assert(port);
- assert(typeof port === 'number');
- });
-
- it('should create a client with no options', () => {
- const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient();
- assert(client);
- });
-
- it('should create a client with gRPC fallback', () => {
- const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient(
- {fallback: true}
- );
- assert(client);
- });
-
- describe('annotateVideo', function() {
- it('invokes annotateVideo without error', done => {
- const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
-
- // Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
- // Mock response
- const expectedResponse = {};
-
- // Mock Grpc layer
- client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
- request,
- expectedResponse
- );
-
- client
- .annotateVideo(request)
- .then(responses => {
- const operation = responses[0];
- return operation.promise();
- })
- .then(responses => {
- assert.deepStrictEqual(responses[0], expectedResponse);
- done();
- })
- .catch(err => {
- done(err);
- });
- });
-
- it('invokes annotateVideo with error', done => {
- const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
-
- // Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
- // Mock Grpc layer
- client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
- request,
- null,
- error
- );
-
- client
- .annotateVideo(request)
- .then(responses => {
- const operation = responses[0];
- return operation.promise();
- })
- .then(() => {
- assert.fail();
- })
- .catch(err => {
- assert(err instanceof Error);
- assert.strictEqual(err.code, FAKE_STATUS_CODE);
- done();
- });
- });
-
- it('has longrunning decoder functions', () => {
- const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
- assert(
- client._descriptors.longrunning.annotateVideo.responseDecoder instanceof
- Function
- );
- assert(
- client._descriptors.longrunning.annotateVideo.metadataDecoder instanceof
- Function
- );
- });
- });
-});
-describe('StreamingVideoIntelligenceServiceClient', () => {
- it('has servicePath', () => {
- const servicePath =
- videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient
- .servicePath;
- assert(servicePath);
- });
-
- it('has apiEndpoint', () => {
- const apiEndpoint =
- videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient
- .apiEndpoint;
- assert(apiEndpoint);
- });
-
- it('has port', () => {
- const port =
- videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient
- .port;
- assert(port);
- assert(typeof port === 'number');
- });
-
- it('should create a client with no options', () => {
- const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient();
- assert(client);
- });
-
- it('should create a client with gRPC fallback', () => {
- const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(
- {fallback: true}
- );
- assert(client);
- });
-
- describe('streamingAnnotateVideo', () => {
- it('invokes streamingAnnotateVideo without error', done => {
- const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
-
- // Mock request
- const request = {};
-
- // Mock response
- const annotationResultsUri = 'annotationResultsUri-238075757';
- const expectedResponse = {
- annotationResultsUri: annotationResultsUri,
- };
-
- // Mock Grpc layer
- client._innerApiCalls.streamingAnnotateVideo = mockBidiStreamingGrpcMethod(
- request,
- expectedResponse
- );
-
- const stream = client
- .streamingAnnotateVideo()
- .on('data', response => {
- assert.deepStrictEqual(response, expectedResponse);
- done();
- })
- .on('error', err => {
- done(err);
- });
-
- stream.write(request);
- });
-
- it('invokes streamingAnnotateVideo with error', done => {
- const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
-
- // Mock request
- const request = {};
-
- // Mock Grpc layer
- client._innerApiCalls.streamingAnnotateVideo = mockBidiStreamingGrpcMethod(
- request,
- null,
- error
- );
-
- const stream = client
- .streamingAnnotateVideo()
- .on('data', () => {
- assert.fail();
- })
- .on('error', err => {
- assert(err instanceof Error);
- assert.strictEqual(err.code, FAKE_STATUS_CODE);
- done();
- });
-
- stream.write(request);
- });
- });
-});
-
-function mockBidiStreamingGrpcMethod(expectedRequest, response, error) {
- return () => {
- const mockStream = new PassThrough({
- objectMode: true,
- transform: (chunk, enc, callback) => {
- assert.deepStrictEqual(chunk, expectedRequest);
- if (error) {
- callback(error);
- } else {
- callback(null, response);
- }
- },
- });
- return mockStream;
- };
-}
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/packages/google-cloud-videointelligence/test/gapic-v1p2beta1.js b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1.ts
similarity index 54%
rename from packages/google-cloud-videointelligence/test/gapic-v1p2beta1.js
rename to packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1.ts
index 1c4f1bd3ad0..f970f74f13a 100644
--- a/packages/google-cloud-videointelligence/test/gapic-v1p2beta1.js
+++ b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,167 +11,151 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const assert = require('assert');
-const {describe, it} = require('mocha');
-
-const videoIntelligenceModule = require('../src');
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
+import {describe, it} from 'mocha';
+const videointelligenceserviceModule = require('../src');
const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
-describe('VideoIntelligenceServiceClient', () => {
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1.VideoIntelligenceServiceClient', () => {
it('has servicePath', () => {
const servicePath =
- videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient
+ videointelligenceserviceModule.v1.VideoIntelligenceServiceClient
.servicePath;
assert(servicePath);
});
-
it('has apiEndpoint', () => {
const apiEndpoint =
- videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient
+ videointelligenceserviceModule.v1.VideoIntelligenceServiceClient
.apiEndpoint;
assert(apiEndpoint);
});
-
it('has port', () => {
const port =
- videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient.port;
+ videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.port;
assert(port);
assert(typeof port === 'number');
});
-
- it('should create a client with no options', () => {
- const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient();
+ it('should create a client with no option', () => {
+ const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient();
assert(client);
});
-
it('should create a client with gRPC fallback', () => {
- const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient(
- {fallback: true}
+ const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient(
+ {
+ fallback: true,
+ }
);
assert(client);
});
-
- describe('annotateVideo', function() {
+ describe('annotateVideo', () => {
it('invokes annotateVideo without error', done => {
- const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
+ const request: protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
expectedResponse
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
- .then(responses => {
+ .then((responses: [Operation]) => {
assert.deepStrictEqual(responses[0], expectedResponse);
done();
})
- .catch(err => {
+ .catch((err: {}) => {
done(err);
});
});
it('invokes annotateVideo with error', done => {
- const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.videointelligence.v1.IAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
null,
error
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
.then(() => {
assert.fail();
})
- .catch(err => {
- assert(err instanceof Error);
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
});
-
- it('has longrunning decoder functions', () => {
- const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
- assert(
- client._descriptors.longrunning.annotateVideo.responseDecoder instanceof
- Function
- );
- assert(
- client._descriptors.longrunning.annotateVideo.metadataDecoder instanceof
- Function
- );
- });
});
});
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/packages/google-cloud-videointelligence/test/gapic-v1beta2.js b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1beta2.ts
similarity index 51%
rename from packages/google-cloud-videointelligence/test/gapic-v1beta2.js
rename to packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1beta2.ts
index 761a0cf529e..136b93736db 100644
--- a/packages/google-cloud-videointelligence/test/gapic-v1beta2.js
+++ b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1beta2.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,167 +11,152 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const assert = require('assert');
-const {describe, it} = require('mocha');
-
-const videoIntelligenceModule = require('../src');
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
+import {describe, it} from 'mocha';
+const videointelligenceserviceModule = require('../src');
const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
-describe('VideoIntelligenceServiceClient', () => {
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1beta2.VideoIntelligenceServiceClient', () => {
it('has servicePath', () => {
const servicePath =
- videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient
+ videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient
.servicePath;
assert(servicePath);
});
-
it('has apiEndpoint', () => {
const apiEndpoint =
- videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient
+ videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient
.apiEndpoint;
assert(apiEndpoint);
});
-
it('has port', () => {
const port =
- videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient.port;
+ videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient
+ .port;
assert(port);
assert(typeof port === 'number');
});
-
- it('should create a client with no options', () => {
- const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient();
+ it('should create a client with no option', () => {
+ const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient();
assert(client);
});
-
it('should create a client with gRPC fallback', () => {
- const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient(
- {fallback: true}
+ const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient(
+ {
+ fallback: true,
+ }
);
assert(client);
});
-
- describe('annotateVideo', function() {
+ describe('annotateVideo', () => {
it('invokes annotateVideo without error', done => {
- const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
+ const request: protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
expectedResponse
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
- .then(responses => {
+ .then((responses: [Operation]) => {
assert.deepStrictEqual(responses[0], expectedResponse);
done();
})
- .catch(err => {
+ .catch((err: {}) => {
done(err);
});
});
it('invokes annotateVideo with error', done => {
- const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
null,
error
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
.then(() => {
assert.fail();
})
- .catch(err => {
- assert(err instanceof Error);
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
});
-
- it('has longrunning decoder functions', () => {
- const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
- assert(
- client._descriptors.longrunning.annotateVideo.responseDecoder instanceof
- Function
- );
- assert(
- client._descriptors.longrunning.annotateVideo.metadataDecoder instanceof
- Function
- );
- });
});
});
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/packages/google-cloud-videointelligence/test/gapic-v1p1beta1.js b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p1beta1.ts
similarity index 51%
rename from packages/google-cloud-videointelligence/test/gapic-v1p1beta1.js
rename to packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p1beta1.ts
index 2d7ae2d736d..cb41aabab3b 100644
--- a/packages/google-cloud-videointelligence/test/gapic-v1p1beta1.js
+++ b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p1beta1.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,167 +11,152 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const assert = require('assert');
-const {describe, it} = require('mocha');
-
-const videoIntelligenceModule = require('../src');
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
+import {describe, it} from 'mocha';
+const videointelligenceserviceModule = require('../src');
const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
-describe('VideoIntelligenceServiceClient', () => {
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1p1beta1.VideoIntelligenceServiceClient', () => {
it('has servicePath', () => {
const servicePath =
- videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient
+ videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient
.servicePath;
assert(servicePath);
});
-
it('has apiEndpoint', () => {
const apiEndpoint =
- videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient
+ videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient
.apiEndpoint;
assert(apiEndpoint);
});
-
it('has port', () => {
const port =
- videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient.port;
+ videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient
+ .port;
assert(port);
assert(typeof port === 'number');
});
-
- it('should create a client with no options', () => {
- const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient();
+ it('should create a client with no option', () => {
+ const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient();
assert(client);
});
-
it('should create a client with gRPC fallback', () => {
- const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient(
- {fallback: true}
+ const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient(
+ {
+ fallback: true,
+ }
);
assert(client);
});
-
- describe('annotateVideo', function() {
+ describe('annotateVideo', () => {
it('invokes annotateVideo without error', done => {
- const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
+ const request: protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
expectedResponse
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
- .then(responses => {
+ .then((responses: [Operation]) => {
assert.deepStrictEqual(responses[0], expectedResponse);
done();
})
- .catch(err => {
+ .catch((err: {}) => {
done(err);
});
});
it('invokes annotateVideo with error', done => {
- const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const request = {
- inputUri: inputUri,
- features: features,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
null,
error
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
.then(() => {
assert.fail();
})
- .catch(err => {
- assert(err instanceof Error);
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
});
-
- it('has longrunning decoder functions', () => {
- const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
- assert(
- client._descriptors.longrunning.annotateVideo.responseDecoder instanceof
- Function
- );
- assert(
- client._descriptors.longrunning.annotateVideo.metadataDecoder instanceof
- Function
- );
- });
});
});
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/packages/google-cloud-videointelligence/test/gapic-v1.js b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p2beta1.ts
similarity index 50%
rename from packages/google-cloud-videointelligence/test/gapic-v1.js
rename to packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p2beta1.ts
index 1c065bd4af6..553893674da 100644
--- a/packages/google-cloud-videointelligence/test/gapic-v1.js
+++ b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p2beta1.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,164 +11,152 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const assert = require('assert');
-const {describe, it} = require('mocha');
-
-const videoIntelligenceModule = require('../src');
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
+import {describe, it} from 'mocha';
+const videointelligenceserviceModule = require('../src');
const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
-describe('VideoIntelligenceServiceClient', () => {
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1p2beta1.VideoIntelligenceServiceClient', () => {
it('has servicePath', () => {
const servicePath =
- videoIntelligenceModule.v1.VideoIntelligenceServiceClient.servicePath;
+ videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient
+ .servicePath;
assert(servicePath);
});
-
it('has apiEndpoint', () => {
const apiEndpoint =
- videoIntelligenceModule.v1.VideoIntelligenceServiceClient.apiEndpoint;
+ videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient
+ .apiEndpoint;
assert(apiEndpoint);
});
-
it('has port', () => {
- const port = videoIntelligenceModule.v1.VideoIntelligenceServiceClient.port;
+ const port =
+ videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient
+ .port;
assert(port);
assert(typeof port === 'number');
});
-
- it('should create a client with no options', () => {
- const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient();
+ it('should create a client with no option', () => {
+ const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient();
assert(client);
});
-
it('should create a client with gRPC fallback', () => {
- const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient(
- {fallback: true}
+ const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient(
+ {
+ fallback: true,
+ }
);
assert(client);
});
-
- describe('annotateVideo', function() {
+ describe('annotateVideo', () => {
it('invokes annotateVideo without error', done => {
- const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const request = {
- features: features,
- inputUri: inputUri,
- };
-
+ const request: protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
expectedResponse
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
- .then(responses => {
+ .then((responses: [Operation]) => {
assert.deepStrictEqual(responses[0], expectedResponse);
done();
})
- .catch(err => {
+ .catch((err: {}) => {
done(err);
});
});
it('invokes annotateVideo with error', done => {
- const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient(
+ const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient(
{
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
}
);
-
// Mock request
- const featuresElement = 'LABEL_DETECTION';
- const features = [featuresElement];
- const inputUri = 'gs://cloud-samples-data/video/cat.mp4';
- const request = {
- features: features,
- inputUri: inputUri,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
request,
null,
error
);
-
client
.annotateVideo(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
.then(() => {
assert.fail();
})
- .catch(err => {
- assert(err instanceof Error);
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
});
-
- it('has longrunning decoder functions', () => {
- const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient(
- {
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- }
- );
- assert(
- client._descriptors.longrunning.annotateVideo.responseDecoder instanceof
- Function
- );
- assert(
- client._descriptors.longrunning.annotateVideo.metadataDecoder instanceof
- Function
- );
- });
});
});
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p3beta1.ts b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p3beta1.ts
new file mode 100644
index 00000000000..9d4acfc7430
--- /dev/null
+++ b/packages/google-cloud-videointelligence/test/gapic-video_intelligence_service-v1p3beta1.ts
@@ -0,0 +1,162 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
+import {describe, it} from 'mocha';
+const videointelligenceserviceModule = require('../src');
+
+const FAKE_STATUS_CODE = 1;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
+
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1p3beta1.VideoIntelligenceServiceClient', () => {
+ it('has servicePath', () => {
+ const servicePath =
+ videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient
+ .servicePath;
+ assert(servicePath);
+ });
+ it('has apiEndpoint', () => {
+ const apiEndpoint =
+ videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient
+ .apiEndpoint;
+ assert(apiEndpoint);
+ });
+ it('has port', () => {
+ const port =
+ videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient
+ .port;
+ assert(port);
+ assert(typeof port === 'number');
+ });
+ it('should create a client with no option', () => {
+ const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient();
+ assert(client);
+ });
+ it('should create a client with gRPC fallback', () => {
+ const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient(
+ {
+ fallback: true,
+ }
+ );
+ assert(client);
+ });
+ describe('annotateVideo', () => {
+ it('invokes annotateVideo without error', done => {
+ const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient(
+ {
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ }
+ );
+ // Mock request
+ const request: protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
+ client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
+ request,
+ expectedResponse
+ );
+ client
+ .annotateVideo(request)
+ .then((responses: [Operation]) => {
+ const operation = responses[0];
+ return operation ? operation.promise() : {};
+ })
+ .then((responses: [Operation]) => {
+ assert.deepStrictEqual(responses[0], expectedResponse);
+ done();
+ })
+ .catch((err: {}) => {
+ done(err);
+ });
+ });
+
+ it('invokes annotateVideo with error', done => {
+ const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient(
+ {
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ }
+ );
+ // Mock request
+ const request: protosTypes.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
+ client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod(
+ request,
+ null,
+ error
+ );
+ client
+ .annotateVideo(request)
+ .then((responses: [Operation]) => {
+ const operation = responses[0];
+ return operation ? operation.promise() : {};
+ })
+ .then(() => {
+ assert.fail();
+ })
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+});
diff --git a/packages/google-cloud-videointelligence/tsconfig.json b/packages/google-cloud-videointelligence/tsconfig.json
new file mode 100644
index 00000000000..613d35597b5
--- /dev/null
+++ b/packages/google-cloud-videointelligence/tsconfig.json
@@ -0,0 +1,19 @@
+{
+ "extends": "./node_modules/gts/tsconfig-google.json",
+ "compilerOptions": {
+ "rootDir": ".",
+ "outDir": "build",
+ "resolveJsonModule": true,
+ "lib": [
+ "es2016",
+ "dom"
+ ]
+ },
+ "include": [
+ "src/*.ts",
+ "src/**/*.ts",
+ "test/*.ts",
+ "test/**/*.ts",
+ "system-test/*.ts"
+ ]
+}
diff --git a/packages/google-cloud-videointelligence/tslint.json b/packages/google-cloud-videointelligence/tslint.json
new file mode 100644
index 00000000000..617dc975bae
--- /dev/null
+++ b/packages/google-cloud-videointelligence/tslint.json
@@ -0,0 +1,3 @@
+{
+ "extends": "gts/tslint.json"
+}
diff --git a/packages/google-cloud-videointelligence/webpack.config.js b/packages/google-cloud-videointelligence/webpack.config.js
index c8b97cca605..8a0a183124c 100644
--- a/packages/google-cloud-videointelligence/webpack.config.js
+++ b/packages/google-cloud-videointelligence/webpack.config.js
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,11 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+const path = require('path');
+
module.exports = {
- entry: './src/browser.js',
+ entry: './src/index.ts',
output: {
- library: 'video-intelligence',
- filename: './video-intelligence.js',
+ library: 'videointelligence',
+ filename: './videointelligence.js',
},
node: {
child_process: 'empty',
@@ -24,20 +26,36 @@ module.exports = {
crypto: 'empty',
},
resolve: {
- extensions: ['.js', '.json'],
+ alias: {
+ '../../../package.json': path.resolve(__dirname, 'package.json'),
+ },
+ extensions: ['.js', '.json', '.ts'],
},
module: {
rules: [
{
- test: /node_modules[\\/]retry-request[\\/]/,
+ test: /\.tsx?$/,
+ use: 'ts-loader',
+ exclude: /node_modules/,
+ },
+ {
+ test: /node_modules[\\/]@grpc[\\/]grpc-js/,
+ use: 'null-loader',
+ },
+ {
+ test: /node_modules[\\/]grpc/,
+ use: 'null-loader',
+ },
+ {
+ test: /node_modules[\\/]retry-request/,
use: 'null-loader',
},
{
- test: /node_modules[\\/]https-proxy-agent[\\/]/,
+ test: /node_modules[\\/]https?-proxy-agent/,
use: 'null-loader',
},
{
- test: /node_modules[\\/]gtoken[\\/]/,
+ test: /node_modules[\\/]gtoken/,
use: 'null-loader',
},
],