From a3406b0238f8de314cc91c359b89f1afca01fdd9 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 10:48:24 -0700 Subject: [PATCH] Vision Partial Veneer (#2298) --- .nycrc | 26 + package.json | 33 +- packages/vision/README.md | 208 +- packages/vision/package.json | 37 +- .../smoke-test/image_annotator_smoke_test.js | 51 + packages/vision/src/helpers.js | 588 +++++ packages/vision/src/index.js | 2128 +---------------- packages/vision/src/v1/doc/doc_geometry.js | 73 + .../src/v1/doc/doc_google_protobuf_any.js | 121 + .../v1/doc/doc_google_protobuf_wrappers.js | 128 + .../src/v1/doc/doc_google_rpc_status.js | 92 + .../src/v1/doc/doc_google_type_color.js | 164 ++ .../src/v1/doc/doc_google_type_latlng.js | 71 + .../vision/src/v1/doc/doc_image_annotator.js | 906 +++++++ .../vision/src/v1/doc/doc_text_annotation.js | 362 +++ .../vision/src/v1/doc/doc_web_detection.js | 108 + .../vision/src/v1/image_annotator_client.js | 21 +- packages/vision/src/v1/index.js | 6 +- packages/vision/system-test/vision.js | 569 +---- packages/vision/test/gapic-v1.js | 82 + packages/vision/test/helpers.test.js | 283 +++ packages/vision/test/index.js | 1868 --------------- packages/vision/test/index.test.js | 50 + scripts/docs/parser.js | 9 +- test/docs.js | 3 + 25 files changed, 3274 insertions(+), 4713 deletions(-) create mode 100644 .nycrc create mode 100644 packages/vision/smoke-test/image_annotator_smoke_test.js create mode 100644 packages/vision/src/helpers.js create mode 100644 packages/vision/src/v1/doc/doc_geometry.js create mode 100644 packages/vision/src/v1/doc/doc_google_protobuf_any.js create mode 100644 packages/vision/src/v1/doc/doc_google_protobuf_wrappers.js create mode 100644 packages/vision/src/v1/doc/doc_google_rpc_status.js create mode 100644 packages/vision/src/v1/doc/doc_google_type_color.js create mode 100644 packages/vision/src/v1/doc/doc_google_type_latlng.js create mode 100644 packages/vision/src/v1/doc/doc_image_annotator.js create mode 100644 packages/vision/src/v1/doc/doc_text_annotation.js create mode 100644 packages/vision/src/v1/doc/doc_web_detection.js create mode 100644 packages/vision/test/gapic-v1.js create mode 100644 packages/vision/test/helpers.test.js delete mode 100644 packages/vision/test/index.js create mode 100644 packages/vision/test/index.test.js diff --git a/.nycrc b/.nycrc new file mode 100644 index 00000000000..19191fb88ad --- /dev/null +++ b/.nycrc @@ -0,0 +1,26 @@ +{ + "report-dir": "./.coverage", + "exclude": [ + "packages/*/src/*{/*,/**/*}.js", + "packages/*/src/*/v*/*.js", + "packages/*/test/**/*.js" + ], + "watermarks": { + "branches": [ + 95, + 100 + ], + "functions": [ + 95, + 100 + ], + "lines": [ + 95, + 100 + ], + "statements": [ + 95, + 100 + ] + } +} diff --git a/package.json b/package.json index c021a006351..42878b76721 100644 --- a/package.json +++ b/package.json @@ -13,6 +13,7 @@ "extend": "^3.0.0", "glob": "^5.0.9", "globby": "^3.0.1", + "intelli-espower-loader": "^1.0.1", "is": "^3.1.0", "jscs": "^2.1.1", "jshint": "^2.9.1", @@ -25,6 +26,7 @@ "multiline": "^1.0.2", "nyc": "^10.3.0", "package-json": "^2.4.0", + "power-assert": "^1.4.2", "propprop": "^0.3.1", "semver": "^5.3.0", "shelljs": "^0.7.3", @@ -41,37 +43,12 @@ "remove-ghpages": "node ./scripts/docs/remove.js", "lint": "jshint scripts/ packages/ system-test/ test/ && jscs packages/ system-test/ test/", "test": "npm run unit-test && npm run docs && npm run snippet-test", - "unit-test": "mocha --timeout 5000 --bail packages/*/test/*.js", + "unit-test": "mocha --timeout 5000 --bail --require intelli-espower-loader packages/*/test/*.js", "snippet-test": "mocha --timeout 5000 --bail test/docs.js", - "system-test": "mocha packages/*/system-test/*.js --no-timeouts --bail", - "cover": "nyc --reporter=lcov --reporter=html mocha --no-timeouts packages/*/test/*.js && nyc report", + "system-test": "mocha packages/*/system-test/*.js --require intelli-espower-loader --no-timeouts --bail", + "cover": "nyc --reporter=lcov --reporter=html mocha --require intelli-espower-loader --no-timeouts packages/*/test/*.js && nyc report", "coveralls": "npm run cover && nyc report --reporter=text-lcov | coveralls" }, - "nyc": { - "report-dir": "./.coverage", - "exclude": [ - "packages/*/src/*{/*,/**/*}.js", - "packages/*/test/**/*.js" - ], - "watermarks": { - "branches": [ - 95, - 100 - ], - "functions": [ - 95, - 100 - ], - "lines": [ - 95, - 100 - ], - "statements": [ - 95, - 100 - ] - } - }, "license": "Apache-2.0", "engines": { "node": ">=4.0.0" diff --git a/packages/vision/README.md b/packages/vision/README.md index 6061e4fd54a..f99e475ccd5 100644 --- a/packages/vision/README.md +++ b/packages/vision/README.md @@ -1,168 +1,60 @@ -# @google-cloud/vision ([Beta][versioning]) -> Cloud Vision Client Library for Node.js +# Node.js Client for Google Cloud Vision API ([Beta](https://github.com/GoogleCloudPlatform/google-cloud-node#versioning)) -*Looking for more Google APIs than just Vision? You might want to check out [`google-cloud`][google-cloud].* +[Google Cloud Vision API][Product Documentation]: Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications. +- [Client Library Documentation][] +- [Product Documentation][] -- [API Documentation][gcloud-vision-docs] -- [Official Documentation][cloud-vision-docs] +## Quick Start +In order to use this library, you first need to go through the following steps: +1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project) +2. [Enable the Google Cloud Vision API.](https://console.cloud.google.com/apis/api/vision) +3. [Setup Authentication.](https://googlecloudplatform.github.io/google-cloud-node/#/docs/google-cloud/master/guides/authentication) -```sh -$ npm install --save @google-cloud/vision +### Installation ``` -```js -var vision = require('@google-cloud/vision')({ - projectId: 'grape-spaceship-123', - keyFilename: '/path/to/keyfile.json' -}); - -// Read the text from an image. -vision.detectText('./image.jpg', function(err, text) { - // text = [ - // 'This was text found in the image', - // 'This was more text found in the image' - // ] -}); - -// Detect faces and the locations of their features in an image. -vision.detectFaces('./image.jpg', function(err, faces) { - // faces = [ - // { - // angles: {pan,tilt,roll}, - // bounds: { - // head: [{x,y},{x,y},{x,y},{x,y}], - // face: [{x,y},{x,y},{x,y},{x,y}] - // }, - // features: { - // confidence: 34.489909, - // chin: { - // center: {x,y,z}, - // left: {x,y,z}, - // right: {x,y,z} - // }, - // ears: { - // left: {x,y,z}, - // right: {x,y,z} - // }, - // eyebrows: { - // left: { - // left: {x,y,z}, - // right: {x,y,z}, - // top: {x,y,z} - // }, - // right: { - // left: {x,y,z}, - // right: {x,y,z}, - // top: {x,y,z} - // } - // }, - // eyes: { - // left: { - // bottom: {x,y,z}, - // center: {x,y,z}, - // left: {x,y,z}, - // pupil: {x,y,z}, - // right: {x,y,z}, - // top: {x,y,z} - // }, - // right: { - // bottom: {x,y,z}, - // center: {x,y,z}, - // left: {x,y,z}, - // pupil: {x,y,z}, - // right: {x,y,z}, - // top: {x,y,z} - // } - // }, - // forehead: {x,y,z}, - // lips: { - // bottom: {x,y,z}, - // top: {x,y,z} - // }, - // mouth: { - // center: {x,y,z}, - // left: {x,y,z}, - // right: {x,y,z} - // }, - // nose: { - // bottom: { - // center: {x,y,z}, - // left: {x,y,z}, - // right: {x,y,z} - // }, - // tip: {x,y,z}, - // top: {x,y,z} - // } - // }, - // confidence: 56.748849, - // blurry: false, - // dark: false, - // happy: false, - // hat: false, - // mad: false, - // sad: false, - // surprised: false - // } - // ] -}); - -// Promises are also supported by omitting callbacks. -vision.detectFaces('./image.jpg').then(function(data) { - var faces = data[0]; -}); - -// It's also possible to integrate with third-party Promise libraries. -var vision = require('@google-cloud/vision')({ - promise: require('bluebird') -}); -``` - - -## Authentication - -It's incredibly easy to get authenticated and start using Google's APIs. You can set your credentials on a global basis as well as on a per-API basis. See each individual API section below to see how you can auth on a per-API-basis. This is useful if you want to use different accounts for different Cloud services. - -### On Google Cloud Platform - -If you are running this client on Google Cloud Platform, we handle authentication for you with no configuration. You just need to make sure that when you [set up the GCE instance][gce-how-to], you add the correct scopes for the APIs you want to access. - -``` js -var vision = require('@google-cloud/vision')(); -// ...you're good to go! +$ npm install --save @google-cloud/vision ``` -### Elsewhere - -If you are not running this client on Google Cloud Platform, you need a Google Developers service account. To create a service account: - -1. Visit the [Google Developers Console][dev-console]. -2. Create a new project or click on an existing project. -3. Navigate to **APIs & auth** > **APIs section** and turn on the following APIs (you may need to enable billing in order to use these services): - * Google Cloud Vision API -4. Navigate to **APIs & auth** > **Credentials** and then: - * If you want to use a new service account, click on **Create new Client ID** and select **Service account**. After the account is created, you will be prompted to download the JSON key file that the library uses to authenticate your requests. - * If you want to generate a new key for an existing service account, click on **Generate new JSON key** and download the JSON key file. - -``` js -var projectId = process.env.GCLOUD_PROJECT; // E.g. 'grape-spaceship-123' - -var vision = require('@google-cloud/vision')({ - projectId: projectId, - - // The path to your key file: - keyFilename: '/path/to/keyfile.json' - - // Or the contents of the key file: - credentials: require('./path/to/keyfile.json') -}); - -// ...you're good to go! +### Preview +#### ImageAnnotatorClient +```js + var vision = require('@google-cloud/vision'); + + var client = vision({ + // optional auth parameters. + }); + + var gcsImageUri = 'gs://gapic-toolkit/President_Barack_Obama.jpg'; + var source = { + gcsImageUri : gcsImageUri + }; + var image = { + source : source + }; + var type = vision.v1.types.Feature.Type.FACE_DETECTION; + var featuresElement = { + type : type + }; + var features = [featuresElement]; + var requestsElement = { + image : image, + features : features + }; + var requests = [requestsElement]; + client.batchAnnotateImages({requests: requests}).then(function(responses) { + var response = responses[0]; + // doThingsWith(response) + }) + .catch(function(err) { + console.error(err); + }); ``` +### Next Steps +- Read the [Client Library Documentation][] for Google Cloud Vision API to see other available methods on the client. +- Read the [Google Cloud Vision API Product documentation][Product Documentation] to learn more about the product and see How-to Guides. +- View this [repository's main README](https://github.com/GoogleCloudPlatform/google-cloud-node/blob/master/README.md) to see the full list of Cloud APIs that we cover. -[versioning]: https://github.com/GoogleCloudPlatform/google-cloud-node#versioning -[google-cloud]: https://github.com/GoogleCloudPlatform/google-cloud-node/ -[gce-how-to]: https://cloud.google.com/compute/docs/authentication#using -[dev-console]: https://console.developers.google.com/project -[gcloud-vision-docs]: https://googlecloudplatform.github.io/google-cloud-node/#/docs/vision -[cloud-vision-docs]: https://cloud.google.com/vision/docs +[Client Library Documentation]: https://googlecloudplatform.github.io/google-cloud-node/#/docs/vision +[Product Documentation]: https://cloud.google.com/vision \ No newline at end of file diff --git a/packages/vision/package.json b/packages/vision/package.json index 0d829c0fc4c..a1a1b3dce2b 100644 --- a/packages/vision/package.json +++ b/packages/vision/package.json @@ -1,8 +1,9 @@ { + "repository": "GoogleCloudPlatform/google-cloud-node", "name": "@google-cloud/vision", "version": "0.11.5", - "author": "Google Inc.", - "description": "Cloud Vision Client Library for Node.js", + "author": "Google Inc", + "description": "Google Cloud Vision API client for Node.js", "contributors": [ { "name": "Burcu Dogan", @@ -29,14 +30,12 @@ "email": "sawchuk@gmail.com" } ], - "main": "./src/index.js", + "main": "src/index.js", "files": [ "src", "AUTHORS", - "CONTRIBUTORS", "COPYING" ], - "repository": "googlecloudplatform/google-cloud-node", "keywords": [ "google apis client", "google api client", @@ -47,36 +46,32 @@ "google cloud", "cloud", "google vision", - "vision" + "vision", + "Google Cloud Vision API" ], "dependencies": { "@google-cloud/common": "^0.13.0", - "@google-cloud/common-grpc": "^0.4.0", - "arrify": "^1.0.0", "async": "^2.0.1", "extend": "^3.0.0", - "google-gax": "^0.13.0", + "google-gax": "^0.13.2", "google-proto-files": "^0.12.0", - "is": "^3.0.1", - "prop-assign": "^1.0.0", - "propprop": "^0.3.0", - "rgb-hex": "^1.0.0", - "string-format-obj": "^1.0.0" + "is": "^3.0.1" }, "devDependencies": { "@google-cloud/storage": "*", - "deep-strict-equal": "^0.2.0", + "intelli-espower-loader": "^1.0.1", "mocha": "^3.0.1", - "multiline": "^1.0.2", "node-uuid": "^1.4.7", - "normalize-newline": "^2.0.0", - "proxyquire": "^1.7.10", - "tmp": "^0.0.31" + "nyc": "^10.3.0", + "power-assert": "^1.4.2", + "sinon": "^2.2.0" }, "scripts": { + "cover": "nyc --reporter=lcov --reporter=html mocha --no-timeouts --require intelli-espower-loader test/*.js && nyc report", "publish-module": "node ../../scripts/publish.js vision", - "test": "mocha test/*.js", - "system-test": "mocha system-test/*.js --no-timeouts --bail" + "test": "mocha --require intelli-espower-loader test/*.js", + "smoke-test": "mocha smoke-test/*.js --timeout 5000", + "system-test": "mocha system-test/*.js --require intelli-espower-loader --no-timeouts --bail" }, "license": "Apache-2.0", "engines": { diff --git a/packages/vision/smoke-test/image_annotator_smoke_test.js b/packages/vision/smoke-test/image_annotator_smoke_test.js new file mode 100644 index 00000000000..6ee7a38e231 --- /dev/null +++ b/packages/vision/smoke-test/image_annotator_smoke_test.js @@ -0,0 +1,51 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +'use strict'; + +describe('ImageAnnotatorSmokeTest', function() { + + it('successfully makes a call to the service', function(done) { + var vision = require('../src'); + + var client = vision.v1({ + // optional auth parameters. + }); + + var gcsImageUri = 'gs://gapic-toolkit/President_Barack_Obama.jpg'; + var source = { + gcsImageUri : gcsImageUri + }; + var image = { + source : source + }; + var type = vision.v1.types.Feature.Type.FACE_DETECTION; + var featuresElement = { + type : type + }; + var features = [featuresElement]; + var requestsElement = { + image : image, + features : features + }; + var requests = [requestsElement]; + client.batchAnnotateImages({requests: requests}).then(function(responses) { + var response = responses[0]; + console.log(response); + }) + .then(done) + .catch(done); + }); +}); \ No newline at end of file diff --git a/packages/vision/src/helpers.js b/packages/vision/src/helpers.js new file mode 100644 index 00000000000..d71c32412ae --- /dev/null +++ b/packages/vision/src/helpers.js @@ -0,0 +1,588 @@ +/*! + * Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * @module vision/helpers + */ + +'use strict'; + +var fs = require('fs'); +var is = require('is'); + +var promisify = require('@google-cloud/common').util.promisify; +var gax = require('google-gax'); +var protoFiles = require('google-proto-files'); + + +/*! + * Find a given image and fire a callback with the appropriate image structure. + * + * @param {Object} image - An object representing what is known about the + * image. + * @param {Function} callback - The callback to run. + */ +var coerceImage = (image, callback) => { + // If this is a buffer, read it and send the object + // that the Vision API expects. + if (Buffer.isBuffer(image)) { + callback(null, { + content: image.toString('base64') + }); + return; + } + + // File exists on disk. + if (image.source && image.source.filename) { + fs.readFile(image.source.filename, {encoding: 'base64'}, (err, blob) => { + if (err) { + callback(err); + return; + } + callback(null, {content: blob.toString('base64')}); + }); + return; + } + + // No other options were relevant; return the image with no modification. + callback(null, image); + return; +}; + + +/*! + * + * Return a method that calls annotateImage asking for a single feature. + * + * @param {Number} featureValue - The feature being requested. This is taken + * from the Feature.Type enum, and will be an integer. + * + * @return {Function} - The function that, when called, will call annotateImage + * asking for the single feature annotation. + */ +var _createSingleFeatureMethod = featureValue => { + return function(image, options) { + return this.annotateImage({ + image: image, + features: [{type: featureValue}], + }, options); + }; +}; + + +/*! + * Return a dictionary-like object with helpers to augment the Vision + * GAPIC. + * + * @param {string} apiVersion - The API version (e.g. "v1") + * + * @return {Object} - An object with keys and functions which are placed + * onto the pure GAPIC. + */ +module.exports = apiVersion => { + var methods = {}; + + /** + * Annotate a single image with the requested features. + * + * @param {Object=} request + * A representation of the request being sent to the Vision API. + * @param {Object=} request.image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Array} request.features + * An array of the specific annotation features being requested. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var request = { + * image: {source: {imageUri: 'gs://path/to/image.jpg'}}, + * features: [], + * }; + * vision.annotateImage(request).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.annotateImage = promisify(function(request, options, callback) { + // If a callback was provided and options were skipped, normalize + // the argument names. + if (is.undefined(callback) && is.function(options)) { + callback = options; + options = undefined; + } + + // If there is no image, throw an exception. + if (is.undefined(request.image)) { + throw new Error('Attempted to call `annotateImage` with no image.'); + } + + // If we got a filename for the image, open the file and transform + // it to content. + return coerceImage(request.image, (err, image) => { + if (err) { + return callback(err); + } + request.image = image; + + // Call the GAPIC batch annotation function. + return this.batchAnnotateImages([request], options, (err, r) => { + // If there is an error, handle it. + if (err) { + return callback(err); + } + + // We are guaranteed to only have one response element, since we + // only sent one image. + var response = r.responses[0]; + + // Fire the callback if applicable. + return callback(undefined, response); + }); + }); + }); + + // Get a list of features available on the API. Although we could iterate over + // them and create single-feature methods for each dynamically, for + // documentation purpose, we manually list all the single-feature methods + // below. + const features = gax.grpc().load([{ + root: protoFiles('..'), + file: `google/cloud/vision/${apiVersion}/image_annotator.proto`, + }]).google.cloud.vision[apiVersion].Feature.Type; + + /** + * Annotate a single image with face detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.faceDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.faceDetection = + promisify(_createSingleFeatureMethod(features.FACE_DETECTION)); + + /** + * Annotate a single image with landmark detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.landmarkDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.landmarkDetection = + promisify(_createSingleFeatureMethod(features.LANDMARK_DETECTION)); + + /** + * Annotate a single image with logo detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.logoDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.logoDetection = + promisify(_createSingleFeatureMethod(features.LOGO_DETECTION)); + + /** + * Annotate a single image with label detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.labelDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.labelDetection = + promisify(_createSingleFeatureMethod(features.LABEL_DETECTION)); + + /** + * Annotate a single image with text detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.textDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.textDetection = + promisify(_createSingleFeatureMethod(features.TEXT_DETECTION)); + + /** + * Annotate a single image with document text detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.documentTextDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.documentTextDetection = + promisify(_createSingleFeatureMethod(features.DOCUMENT_TEXT_DETECTION)); + + /** + * Annotate a single image with safe search detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.safeSearchDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.safeSearchDetection = + promisify(_createSingleFeatureMethod(features.SAFE_SEARCH_DETECTION)); + + /** + * Annotate a single image with image properties. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.imageProperties(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.imageProperties = + promisify(_createSingleFeatureMethod(features.IMAGE_PROPERTIES)); + + /** + * Annotate a single image with crop hints. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.cropHints(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.cropHints = + promisify(_createSingleFeatureMethod(features.CROP_HINTS)); + + /** + * Annotate a single image with web detection. + * + * @param {Object=} image + * A dictionary-like object representing the image. This should have a + * single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {Object=} options + * Optional parameters. You can override the default settings for this + * call, e.g, timeout, retries, paginations, etc. See + * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing + * API call. + * + * @example + * var image = { + * source: {imageUri: 'gs://path/to/image.jpg'} + * }; + * vision.webDetection(image).then(response => { + * // doThingsWith(response); + * }).catch(err => { + * console.error(err); + * }); + */ + methods.webDetection = + promisify(_createSingleFeatureMethod(features.WEB_DETECTION)); + + return methods; +}; diff --git a/packages/vision/src/index.js b/packages/vision/src/index.js index d2b55077e91..a001f886c94 100644 --- a/packages/vision/src/index.js +++ b/packages/vision/src/index.js @@ -16,2101 +16,65 @@ /*! * @module vision + * @name Vision */ 'use strict'; -var arrify = require('arrify'); -var async = require('async'); -var common = require('@google-cloud/common'); -var commonGrpc = require('@google-cloud/common-grpc'); -var extend = require('extend'); -var format = require('string-format-obj'); -var fs = require('fs'); var is = require('is'); -var prop = require('propprop'); -var propAssign = require('prop-assign'); -var rgbHex = require('rgb-hex'); - -var v1 = require('./v1'); - -var VERY_UNLIKELY = 0; -var UNLIKELY = 1; -var POSSIBLE = 2; -var LIKELY = 3; -var VERY_LIKELY = 4; - -/** - * The [Cloud Vision API](https://cloud.google.com/vision/docs) allows easy - * integration of vision detection features, including image labeling, face and - * landmark detection, optical character recognition (OCR), and tagging of - * explicit content. - * - * @constructor - * @alias module:vision - * - * @resource [Getting Started]{@link https://cloud.google.com/vision/docs/getting-started} - * @resource [Image Best Practices]{@link https://cloud.google.com/vision/docs/image-best-practices} - * - * @param {object} options - [Configuration object](#/docs). - */ -function Vision(options) { - if (!(this instanceof Vision)) { - options = common.util.normalizeArguments(this, options); - return new Vision(options); - } - - options = extend({}, options, { - libName: 'gccl', - libVersion: require('../package.json').version - }); - - this.api = { - Vision: v1(options).imageAnnotatorClient(options) - }; -} - -Vision.likelihood = { - VERY_UNLIKELY: VERY_UNLIKELY, - UNLIKELY: UNLIKELY, - POSSIBLE: POSSIBLE, - LIKELY: LIKELY, - VERY_LIKELY: VERY_LIKELY -}; - -/** - * Run image detection and annotation for an image or batch of images. - * - * This is an advanced API method that requires raw - * [`AnnotateImageRequest`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#AnnotateImageRequest) - * objects to be provided. If that doesn't sound like what you're looking for, - * you'll probably appreciate {module:vision#detect}. - * - * @resource [images.annotate API Reference]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate} - * - * @param {object|object[]} requests - An `AnnotateImageRequest` or array of - * `AnnotateImageRequest`s. See an - * [`AnnotateImageRequest`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#AnnotateImageRequest). - * @param {function} callback - The callback function. - * @param {?error} callback.err - An error returned while making this request. - * @param {object} callback.annotations - See an - * [`AnnotateImageResponse`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#AnnotateImageResponse). - * @param {object} callback.apiResponse - Raw API response. - * - * @example - * var annotateImageReq = { - * // See the link in the parameters for `AnnotateImageRequest`. - * }; - * - * vision.annotate(annotateImageReq, function(err, annotations, apiResponse) { - * // annotations = apiResponse.responses - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.annotate(annotateImageReq).then(function(data) { - * var annotations = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.annotate = function(requests, callback) { - this.api.Vision.batchAnnotateImages({ - requests: arrify(requests) - }, function(err, resp) { - if (err) { - callback(err, null, resp); - return; - } - - callback(null, resp.responses, resp); - }); -}; - -// jscs:disable maximumLineLength -/** - * Detect properties from an image (or images) of one or more types. - * - *

API simplifications

- * - * The raw API response will return some values in a range from `VERY_UNLIKELY` - * to `VERY_LIKELY`. For simplification, any value less than `LIKELY` is - * converted to `false`. - * - * - **False** - * - `VERY_UNLIKELY` - * - `UNLIKELY` - * - `POSSIBLE` - * - **True** - * - `LIKELY` - * - `VERY_LIKELY` - * - * The API will also return many values represented in a `[0,1]` range. We - * convert these to a `[0,100]` value. E.g, `0.4` is represented as `40`. - * - * For the response in the original format, review the `apiResponse` argument - * your callback receives. - * - * @param {string|string[]|buffer|buffer[]|module:storage/file|module:storage/file[]} images - The - * source image(s) to run the detection on. It can be either a local image - * path, a remote image URL, a Buffer, or a @google-cloud/storage File - * object. - * @param {string[]|object=} options - An array of types or a configuration - * object. - * @param {object=} options.imageContext - See an - * [`ImageContext`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#ImageContext) - * resource. - * @param {number} options.maxResults - The maximum number of results, per type, - * to return in the response. - * @param {string[]} options.types - An array of feature types to detect from - * the provided images. Acceptable values: `crops`, `document`, `faces`, - * `landmarks`, `labels`, `logos`, `properties`, `safeSearch`, `similar`, - * `text`. - * @param {boolean=} options.verbose - Use verbose mode, which returns a less- - * simplistic representation of the annotation (default: `false`). - * @param {function} callback - The callback function. - * @param {?error} callback.err - An error returned while making this request. - * @param {object[]} callback.err.errors - If present, these represent partial - * failures. It's possible for part of your request to be completed - * successfully, while a single feature request was not successful. - * @param {object|object[]} callback.detections - If a single detection type was - * asked for, it will be returned in its raw form; either an object or array - * of objects. If multiple detection types were requested, you will receive - * an object with keys for each detection type (listed above in - * `config.types`). Additionally, if multiple images were provided, you will - * receive an array of detection objects, each representing an image. See - * the examples below for more information. - * @param {object} callback.apiResponse - Raw API response. - * - * @example - * var types = [ - * 'face', - * 'label' - * ]; - * - * vision.detect('image.jpg', types, function(err, detections, apiResponse) { - * // detections = { - * // faces: [...], - * // labels: [...] - * // } - * }); - * - * //- - * // Run feature detection over a remote image. - * //- - * var img = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png'; - * - * vision.detect(img, types, function(err, detection, apiResponse) {}); - * - * //- - * // Run feature detection over a Buffer. - * //- - * var level = require('level'); - * var db = level('./users-database'); - * - * db.get('user-image', { encoding: 'binary' }, function(err, image) { - * if (err) { - * // Error handling omitted. - * } - * - * vision.detect(image, types, function(err, detection, apiResponse) {}); - * }); - * - * //- - * // If you have a base64 string, provide it in a Buffer. - * //- - * var myBase64ImageRepresentation = '...'; - * - * var image = new Buffer(myBase64ImageRepresentation, 'base64'); - * - * vision.detect(image, types, function(err, detection, apiResponse) {}); - * - * //- - * // Supply multiple images for feature detection. - * //- - * var images = [ - * 'image.jpg', - * 'image-two.jpg' - * ]; - * - * var types = [ - * 'face', - * 'label' - * ]; - * - * vision.detect(images, types, function(err, detections, apiResponse) { - * // detections = [ - * // // Detections for image.jpg: - * // { - * // faces: [...], - * // labels: [...] - * // }, - * // - * // // Detections for image-two.jpg: - * // { - * // faces: [...], - * // labels: [...] - * // } - * // ] - * }); - * - * //- - * // It's possible for part of your request to be completed successfully, while - * // a single feature request was not successful. - * //- - * vision.detect('malformed-image.jpg', types, function(err, detections) { - * if (err) { - * // An API error or partial failure occurred. - * - * if (err.name === 'PartialFailureError') { - * // err.errors = [ - * // { - * // image: 'malformed-image.jpg', - * // errors: [ - * // { - * // code: 400, - * // message: 'Bad image data', - * // type: 'faces' - * // }, - * // { - * // code: 400, - * // message: 'Bad image data', - * // type: 'labels' - * // } - * // ] - * // } - * // ] - * } - * } - * - * // `detections` will still be populated with all of the results that could - * // be annotated. - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detect('image.jpg', types).then(function(data) { - * var detections = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detect = function(images, options, callback) { - var self = this; - var isSingleImage = !is.array(images) || images.length === 1; - - if (!is.object(options)) { - options = { - types: options - }; - } - - var types = arrify(options.types); - - var typeShortNameToFullName = { - crop: 'CROP_HINTS', - crops: 'CROP_HINTS', - - doc: 'DOCUMENT_TEXT_DETECTION', - document: 'DOCUMENT_TEXT_DETECTION', - - face: 'FACE_DETECTION', - faces: 'FACE_DETECTION', - - label: 'LABEL_DETECTION', - labels: 'LABEL_DETECTION', - - landmark: 'LANDMARK_DETECTION', - landmarks: 'LANDMARK_DETECTION', - - logo: 'LOGO_DETECTION', - logos: 'LOGO_DETECTION', - - properties: 'IMAGE_PROPERTIES', - - safeSearch: 'SAFE_SEARCH_DETECTION', - - similar: 'WEB_DETECTION', - - text: 'TEXT_DETECTION' - }; - - var typeShortNameToRespName = { - crop: 'cropHintsAnnotation', - crops: 'cropHintsAnnotation', - - doc: 'fullTextAnnotation', - document: 'fullTextAnnotation', - - face: 'faceAnnotations', - faces: 'faceAnnotations', - - label: 'labelAnnotations', - labels: 'labelAnnotations', - - landmark: 'landmarkAnnotations', - landmarks: 'landmarkAnnotations', - - logo: 'logoAnnotations', - logos: 'logoAnnotations', - - properties: 'imagePropertiesAnnotation', - - safeSearch: 'safeSearchAnnotation', - - similar: 'webDetection', - - text: 'textAnnotations' - }; - - var typeRespNameToShortName = { - cropHintsAnnotation: 'crops', - faceAnnotations: 'faces', - fullTextAnnotation: 'document', - imagePropertiesAnnotation: 'properties', - labelAnnotations: 'labels', - landmarkAnnotations: 'landmarks', - logoAnnotations: 'logos', - safeSearchAnnotation: 'safeSearch', - textAnnotations: 'text', - webDetection: 'similar' - }; - - Vision.findImages_(images, function(err, foundImages) { - if (err) { - callback(err); - return; - } - - var config = []; - - foundImages.forEach(function(image) { - types.forEach(function(type) { - var typeName = typeShortNameToFullName[type]; - - if (!typeName) { - throw new Error('Requested detection feature not found: ' + type); - } - - var cfg = { - image: image, - features: [ - { - type: typeName - } - ] - }; - - if (is.object(options.imageContext)) { - cfg.imageContext = options.imageContext; - } - - if (is.number(options.maxResults)) { - cfg.features.map(propAssign('maxResults', options.maxResults)); - } - - config.push(cfg); - }); - }); - - self.annotate(config, function(err, annotations, resp) { - if (err) { - callback(err, null, resp); - return; - } - - var originalResp = extend(true, {}, resp); - var partialFailureErrors = []; - - var detections = foundImages - .map(groupDetectionsByImage) - .map(removeExtraneousAnnotationObjects) - .map(assignTypeToEmptyAnnotations) - .map(removeDetectionsWithErrors) - .map(flattenAnnotations) - .map(decorateAnnotations); - - if (partialFailureErrors.length > 0) { - err = new common.util.PartialFailureError({ - errors: partialFailureErrors, - response: originalResp - }); - } - - if (isSingleImage && detections.length > 0) { - // If only a single image was given, expose it from the array. - detections = detections[0]; - } - - callback(err, detections, originalResp); - - function groupDetectionsByImage() { - // detections = [ - // // Image one: - // [ - // { - // faceAnnotations: {}, - // labelAnnotations: {}, - // ... - // } - // ], - // - // // Image two: - // [ - // { - // faceAnnotations: {}, - // labelAnnotations: {}, - // ... - // } - // ] - // ] - return annotations.splice(0, types.length); - } - - function removeExtraneousAnnotationObjects(annotations) { - // The API response includes empty annotations for features that weren't - // requested. - // - // Before: - // [ - // { - // faceAnnotations: {}, - // labelAnnotations: {} - // } - // ] - // - // After: - // [ - // { - // faceAnnotations: {} - // } - // ] - return annotations.map(function(annotation, index) { - var requestedAnnotationType = typeShortNameToRespName[types[index]]; - - for (var prop in annotation) { - if (prop !== requestedAnnotationType && prop !== 'error') { - /* istanbul ignore next */ - delete annotation[prop]; - } - } - - return annotation; - }); - } - - function assignTypeToEmptyAnnotations(annotations) { - // Before: - // [ - // {}, // What annotation type was attempted? - // { labelAnnotations: {...} } - // ] - // - // After: - // [ - // { faceAnnotations: [] }, - // { labelAnnotations: {...} } - // ] - return annotations.map(function(annotation, index) { - var detectionType = types[index]; - var typeName = typeShortNameToRespName[detectionType]; - - if (is.empty(annotation) || annotation.error) { - var isPlural = typeName.charAt(typeName.length - 1) === 's'; - annotation[typeName] = isPlural ? [] : {}; - } - - return annotation; - }); - } - - function removeDetectionsWithErrors(annotations, index) { - // Before: - // [ - // { - // faceAnnotations: [] - // }, - // { - // error: {...}, - // imagePropertiesAnnotation: {} - // } - // ] - - // After: - // [ - // { - // faceAnnotations: [] - // }, - // undefined - // ] - var errors = []; - - annotations.forEach(function(annotation, index) { - if (!is.empty(annotation.error)) { - var userInputType = types[index]; - var respNameType = typeShortNameToRespName[userInputType]; - annotation.error.type = typeRespNameToShortName[respNameType]; - errors.push(Vision.formatError_(annotation.error)); - } - }); - - if (errors.length > 0) { - partialFailureErrors.push({ - image: isSingleImage ? images : images[index], - errors: errors - }); - - return; - } - - return annotations; - } - - function flattenAnnotations(annotations) { - return extend.apply(null, annotations); - } - - function formatAnnotationBuilder(type) { - return function(annotation) { - if (is.empty(annotation)) { - return annotation; - } - - var formatMethodMap = { - cropHintsAnnotation: Vision.formatCropHintsAnnotation_, - error: Vision.formatError_, - faceAnnotations: Vision.formatFaceAnnotation_, - fullTextAnnotation: Vision.formatFullTextAnnotation_, - imagePropertiesAnnotation: Vision.formatImagePropertiesAnnotation_, - labelAnnotations: Vision.formatEntityAnnotation_, - landmarkAnnotations: Vision.formatEntityAnnotation_, - logoAnnotations: Vision.formatEntityAnnotation_, - safeSearchAnnotation: Vision.formatSafeSearchAnnotation_, - textAnnotations: Vision.formatEntityAnnotation_, - webDetection: Vision.formatWebDetection_ - }; - - var formatMethod = formatMethodMap[type] || function(annotation) { - return annotation; - }; - - return formatMethod(annotation, options); - }; - } - - function decorateAnnotations(annotations) { - for (var annotationType in annotations) { - if (annotations.hasOwnProperty(annotationType)) { - var annotationGroup = arrify(annotations[annotationType]); - - var formattedAnnotationGroup = annotationGroup - .map(formatAnnotationBuilder(annotationType)); - - // An annotation can be singular, e.g. SafeSearch. It is either - // violent or not. Unlike face detection, where there can be - // multiple results. - // - // Be sure the original type (object or array) is preserved and - // not wrapped in an array if it wasn't originally. - if (!is.array(annotations[annotationType])) { - formattedAnnotationGroup = formattedAnnotationGroup[0]; - } - - delete annotations[annotationType]; - var typeShortName = typeRespNameToShortName[annotationType]; - annotations[typeShortName] = formattedAnnotationGroup; - } - } - - if (types.length === 1) { - // Only a single detection type was asked for, so no need to box in - // the results. Make them accessible without using a key. - var key = typeRespNameToShortName[typeShortNameToRespName[types[0]]]; - annotations = annotations[key]; - } - - return annotations; - } - }); - }); -}; - -// jscs:enable maximumLineLength - -/** - * Detect the crop hints within an image. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [CropHintsAnnotation JSON respresentation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#CropHintsAnnotation} - * - * @example - * vision.detectCrops('image.jpg', function(err, crops, apiResponse) { - * // crops = [ - * // [ - * // { - * // x: 1 - * // }, - * // { - * // x: 295 - * // }, - * // { - * // x: 295, - * // y: 301 - * // }, - * // { - * // x: 1, - * // y: 301 - * // } - * // ], - * // // ... - * // ] - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var options = { - * verbose: true - * }; - * - * vision.detectCrops('image.jpg', options, function(err, crops, apiResponse) { - * // crops = [ - * // { - * // bounds: [ - * // { - * // x: 1 - * // }, - * // { - * // x: 295 - * // }, - * // { - * // x: 295, - * // y: 301 - * // }, - * // { - * // x: 1, - * // y: 301 - * // } - * // ], - * // confidence: 0.799999995 - * // }, - * // // ... - * // ] - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectCrops('image.jpg').then(function(data) { - * var crops = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectCrops = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['crops'] - }); - - this.detect(images, options, callback); -}; - -/** - * Run face detection against an image. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [FaceAnnotation JSON respresentation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#FaceAnnotation} - * - * @example - * vision.detectFaces('image.jpg', function(err, faces, apiResponse) { - * // faces = [ - * // { - * // angles: { - * // pan: -8.1090336, - * // roll: -5.0002542, - * // tilt: 18.012161 - * // }, - * // bounds: { - * // head: [ - * // { - * // x: 1 - * // }, - * // { - * // x: 295 - * // }, - * // { - * // x: 295, - * // y: 301 - * // }, - * // { - * // x: 1, - * // y: 301 - * // } - * // ], - * // face: [ - * // { - * // x: 28, - * // y: 40 - * // }, - * // { - * // x: 250, - * // y: 40 - * // }, - * // { - * // x: 250, - * // y: 262 - * // }, - * // { - * // x: 28, - * // y: 262 - * // } - * // ] - * // }, - * // features: { - * // confidence: 34.489909, - * // chin: { - * // center: { - * // x: 143.34183, - * // y: 262.22998, - * // z: -57.388493 - * // }, - * // left: { - * // x: 63.102425, - * // y: 248.99081, - * // z: 44.207638 - * // }, - * // right: { - * // x: 241.72728, - * // y: 225.53488, - * // z: 19.758242 - * // } - * // }, - * // ears: { - * // left: { - * // x: 54.872219, - * // y: 207.23712, - * // z: 97.030685 - * // }, - * // right: { - * // x: 252.67567, - * // y: 180.43124, - * // z: 70.15992 - * // } - * // }, - * // eyebrows: { - * // left: { - * // left: { - * // x: 58.790176, - * // y: 113.28249, - * // z: 17.89735 - * // }, - * // right: { - * // x: 106.14151, - * // y: 98.593758, - * // z: -13.116687 - * // }, - * // top: { - * // x: 80.248711, - * // y: 94.04303, - * // z: 0.21131183 - * // } - * // }, - * // right: { - * // left: { - * // x: 148.61565, - * // y: 92.294594, - * // z: -18.804882 - * // }, - * // right: { - * // x: 204.40808, - * // y: 94.300117, - * // z: -2.0009689 - * // }, - * // top: { - * // x: 174.70135, - * // y: 81.580917, - * // z: -12.702137 - * // } - * // } - * // }, - * // eyes: { - * // left: { - * // bottom: { - * // x: 84.883934, - * // y: 134.59479, - * // z: -2.8677137 - * // }, - * // center: { - * // x: 83.707092, - * // y: 128.34, - * // z: -0.00013388535 - * // }, - * // left: { - * // x: 72.213913, - * // y: 132.04138, - * // z: 9.6985674 - * // }, - * // pupil: { - * // x: 86.531624, - * // y: 126.49807, - * // z: -2.2496929 - * // }, - * // right: { - * // x: 105.28892, - * // y: 125.57655, - * // z: -2.51554 - * // }, - * // top: { - * // x: 86.706947, - * // y: 119.47144, - * // z: -4.1606765 - * // } - * // }, - * // right: { - * // bottom: { - * // x: 179.30353, - * // y: 121.03307, - * // z: -14.843414 - * // }, - * // center: { - * // x: 181.17694, - * // y: 115.16437, - * // z: -12.82961 - * // }, - * // left: { - * // x: 158.2863, - * // y: 118.491, - * // z: -9.723031 - * // }, - * // pupil: { - * // x: 175.99976, - * // y: 114.64407, - * // z: -14.53744 - * // }, - * // right: { - * // x: 194.59413, - * // y: 115.91954, - * // z: -6.952745 - * // }, - * // top: { - * // x: 173.99446, - * // y: 107.94287, - * // z: -16.050705 - * // } - * // } - * // }, - * // forehead: { - * // x: 126.53813, - * // y: 93.812057, - * // z: -18.863352 - * // }, - * // lips: { - * // bottom: { - * // x: 137.28528, - * // y: 219.23564, - * // z: -56.663128 - * // }, - * // top: { - * // x: 134.74164, - * // y: 192.50438, - * // z: -53.876408 - * // } - * // }, - * // mouth: { - * // center: { - * // x: 136.43481, - * // y: 204.37952, - * // z: -51.620205 - * // }, - * // left: { - * // x: 104.53558, - * // y: 214.05037, - * // z: -30.056231 - * // }, - * // right: { - * // x: 173.79134, - * // y: 204.99333, - * // z: -39.725758 - * // } - * // }, - * // nose: { - * // bottom: { - * // center: { - * // x: 133.81947, - * // y: 173.16437, - * // z: -48.287724 - * // }, - * // left: { - * // x: 110.98372, - * // y: 173.61331, - * // z: -29.7784 - * // }, - * // right: { - * // x: 161.31354, - * // y: 168.24527, - * // z: -36.1628 - * // } - * // }, - * // tip: { - * // x: 128.14919, - * // y: 153.68129, - * // z: -63.198204 - * // }, - * // top: { - * // x: 127.83745, - * // y: 110.17557, - * // z: -22.650913 - * // } - * // } - * // }, - * // confidence: 56.748849, - * // anger: false, - * // angerLikelihood: 1, - * // blurred: false, - * // blurredLikelihood: 1, - * // headwear: false, - * // headwearLikelihood: 1, - * // joy: false, - * // joyLikelihood: 1, - * // sorrow: false, - * // sorrowLikelihood: 1, - * // surprise: false, - * // surpriseLikelihood: 1, - * // underExposed: false, - * // underExposedLikelihood: 1 - * // } - * // ] - * }); - * - * //- - * // Our library simplifies the response from the API. Use the map below to see - * // each response name's original name. - * //- - * var shortNameToLongNameMap = { - * chin: { - * center: 'CHIN_GNATHION', - * left: 'CHIN_LEFT_GONION', - * right: 'CHIN_RIGHT_GONION' - * }, - * - * ears: { - * left: 'LEFT_EAR_TRAGION', - * right: 'RIGHT_EAR_TRAGION' - * }, - * - * eyebrows: { - * left: { - * left: 'LEFT_OF_LEFT_EYEBROW', - * right: 'RIGHT_OF_LEFT_EYEBROW', - * top: 'LEFT_EYEBROW_UPPER_MIDPOINT' - * }, - * right: { - * left: 'LEFT_OF_RIGHT_EYEBROW', - * right: 'RIGHT_OF_RIGHT_EYEBROW', - * top: 'RIGHT_EYEBROW_UPPER_MIDPOINT' - * } - * }, - * - * eyes: { - * left: { - * bottom: 'LEFT_EYE_BOTTOM_BOUNDARY', - * center: 'LEFT_EYE', - * left: 'LEFT_EYE_LEFT_CORNER', - * pupil: 'LEFT_EYE_PUPIL', - * right: 'LEFT_EYE_RIGHT_CORNER', - * top: 'LEFT_EYE_TOP_BOUNDARY' - * }, - * right: { - * bottom: 'RIGHT_EYE_BOTTOM_BOUNDARY', - * center: 'RIGHT_EYE', - * left: 'RIGHT_EYE_LEFT_CORNER', - * pupil: 'RIGHT_EYE_PUPIL', - * right: 'RIGHT_EYE_RIGHT_CORNER', - * top: 'RIGHT_EYE_TOP_BOUNDARY' - * } - * }, - * - * forehead: 'FOREHEAD_GLABELLA', - * - * lips: { - * bottom: 'LOWER_LIP', - * top: 'UPPER_LIP' - * }, - * - * mouth: { - * center: 'MOUTH_CENTER', - * left: 'MOUTH_LEFT', - * right: 'MOUTH_RIGHT' - * }, - * - * nose: { - * bottom: { - * center: 'NOSE_BOTTOM_CENTER', - * left: 'NOSE_BOTTOM_LEFT', - * right: 'NOSE_BOTTOM_RIGHT' - * }, - * tip: 'NOSE_TIP', - * top: 'MIDPOINT_BETWEEN_EYES' - * } - * }; - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectFaces('image.jpg').then(function(data) { - * var faces = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectFaces = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['faces'] - }); - - this.detect(images, options, callback); -}; - -/** - * Annotate an image with descriptive labels. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [EntityAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#EntityAnnotation} - * - * @example - * vision.detectLabels('image.jpg', function(err, labels, apiResponse) { - * // labels = [ - * // 'classical sculpture', - * // 'statue', - * // 'landmark', - * // 'ancient history', - * // 'artwork' - * // ] - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var opts = { - * verbose: true - * }; - * - * vision.detectLabels('image.jpg', opts, function(err, labels, apiResponse) { - * // labels = [ - * // { - * // desc: 'classical sculpture', - * // id: '/m/095yjj', - * // score: 98.092282 - * // }, - * // { - * // desc: 'statue', - * // id: '/m/013_1c', - * // score: 90.66112 - * // }, - * // // ... - * // ] - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectLabels('image.jpg').then(function(data) { - * var labels = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectLabels = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['labels'] - }); - - this.detect(images, options, callback); -}; - -/** - * Detect the landmarks from an image. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [EntityAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#EntityAnnotation} - * - * @example - * vision.detectLandmarks('image.jpg', function(err, landmarks, apiResponse) { - * // landmarks = [ - * // 'Mount Rushmore' - * // ] - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var image = 'image.jpg'; - * - * var opts = { - * verbose: true - * }; - * - * vision.detectLandmarks(image, opts, function(err, landmarks, apiResponse) { - * // landmarks = [ - * // { - * // desc: 'Mount Rushmore', - * // id: '/m/019dvv', - * // score: 28.651705, - * // bounds: [ - * // { - * // x: 79, - * // y: 130 - * // }, - * // { - * // x: 284, - * // y: 130 - * // }, - * // { - * // x: 284, - * // y: 226 - * // }, - * // { - * // x: 79, - * // y: 226 - * // } - * // ], - * // locations: [ - * // { - * // latitude: 43.878264, - * // longitude: -103.45700740814209 - * // } - * // ] - * // } - * // ] - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectLandmarks('image.jpg').then(function(data) { - * var landmarks = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectLandmarks = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['landmarks'] - }); - - this.detect(images, options, callback); -}; - -/** - * Detect the logos from an image. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [EntityAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#EntityAnnotation} - * - * @example - * vision.detectLogos('image.jpg', function(err, logos, apiResponse) { - * // logos = [ - * // 'Google' - * // ] - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var options = { - * verbose: true - * }; - * - * vision.detectLogos('image.jpg', options, function(err, logos, apiResponse) { - * // logos = [ - * // { - * // desc: 'Google', - * // id: '/m/045c7b', - * // score: 64.35439, - * // bounds: [ - * // { - * // x: 11, - * // y: 11 - * // }, - * // { - * // x: 330, - * // y: 11 - * // }, - * // { - * // x: 330, - * // y: 72 - * // }, - * // { - * // x: 11, - * // y: 72 - * // } - * // ] - * // } - * // ] - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectLogos('image.jpg').then(function(data) { - * var logos = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectLogos = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['logos'] - }); - - this.detect(images, options, callback); -}; - -/** - * Get a set of properties about an image, such as its dominant colors. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [ImageProperties JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#ImageProperties} - * - * @example - * vision.detectProperties('image.jpg', function(err, props, apiResponse) { - * // props = { - * // colors: [ - * // '3b3027', - * // '727d81', - * // '3f2f22', - * // '838e92', - * // '482a16', - * // '5f4f3c', - * // '261b14', - * // 'b39b7f', - * // '51473f', - * // '2c1e12' - * // ] - * // } - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var image = 'image.jpg'; - * - * var options = { - * verbose: true - * }; - * - * vision.detectProperties(image, options, function(err, props, apiResponse) { - * // props = { - * // colors: [ - * // { - * // red: 59, - * // green: 48, - * // blue: 39, - * // score: 26.618013, - * // coverage: 15.948276, - * // hex: '3b3027' - * // }, - * // { - * // red: 114, - * // green: 125, - * // blue: 129, - * // score: 10.319714, - * // coverage: 8.3977409, - * // hex: '727d81' - * // }, - * // // ... - * // ] - * // } - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectProperties('image.jpg').then(function(data) { - * var props = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectProperties = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['properties'] - }); - - this.detect(images, options, callback); -}; - -/** - * Detect the SafeSearch flags from an image. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [SafeSearch JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#SafeSearchAnnotation} - * - * @example - * vision.detectSafeSearch('image.jpg', function(err, safeSearch, apiResponse) { - * // safeSearch = { - * // adult: false, - * // medical: false, - * // spoof: false, - * // violence: true - * // } - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectSafeSearch('image.jpg').then(function(data) { - * var safeSearch = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectSafeSearch = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['safeSearch'] - }); - - this.detect(images, options, callback); -}; - -/** - * Detect similar images from the internet. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [WebAnnotation JSON representation]{@link https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#WebAnnotation} - * - * @example - * vision.detectSimilar('image.jpg', function(err, images, apiResponse) { - * // images = [ - * // 'http://www.example.com/most-similar-image', - * // // ... - * // 'http://www.example.com/least-similar-image' - * // ] - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var opts = { - * verbose: true - * }; - * - * vision.detectSimilar('image.jpg', opts, function(err, similar, apiResponse) { - * // similar = { - * // entities: [ - * // 'Logo', - * // // ... - * // ], - * // fullMatches: [ - * // 'http://www.example.com/most-similar-image', - * // // ... - * // 'http://www.example.com/least-similar-image' - * // ], - * // partialMatches: [ - * // 'http://www.example.com/most-similar-image', - * // // ... - * // 'http://www.example.com/least-similar-image' - * // ], - * // pages: [ - * // 'http://www.example.com/page-with-most-similar-image', - * // // ... - * // 'http://www.example.com/page-with-least-similar-image' - * // ] - * // } - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectSimilar('image.jpg').then(function(data) { - * var images = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectSimilar = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['similar'] - }); - - this.detect(images, options, callback); -}; - -/** - * Detect the text within an image. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @example - * vision.detectText('image.jpg', function(err, text, apiResponse) { - * // text = [ - * // 'This was text found in the image' - * // ] - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var options = { - * verbose: true - * }; - * - * vision.detectText('image.jpg', options, function(err, text, apiResponse) { - * // text = [ - * // { - * // desc: 'This was text found in the image', - * // bounds: [ - * // { - * // x: 4, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 89 - * // }, - * // { - * // x: 4, - * // y: 89 - * // } - * // ] - * // } - * // ] - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.detectText('image.jpg').then(function(data) { - * var text = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.detectText = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['text'] - }); - - this.detect(images, options, callback); -}; - -/** - * Annotate a document. - * - *

Parameters

- * - * See {module:vision#detect}. - * - * @resource [FullTextAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#FullTextAnnotation} - * - * @example - * vision.readDocument('image.jpg', function(err, text, apiResponse) { - * // text = 'This paragraph was extracted from image.jpg'; - * }); - * - * //- - * // Activate `verbose` mode for a more detailed response. - * //- - * var opts = { - * verbose: true - * }; - * - * vision.readDocument('image.jpg', opts, function(err, pages, apiResponse) { - * // pages = [ - * // { - * // languages: [ - * // 'en' - * // ], - * // width: 688, - * // height: 1096, - * // blocks: [ - * // { - * // type: 'TEXT', - * // bounds: [ - * // { - * // x: 4, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 89 - * // }, - * // { - * // x: 4, - * // y: 89 - * // } - * // ], - * // paragraphs: [ - * // { - * // bounds: [ - * // { - * // x: 4, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 89 - * // }, - * // { - * // x: 4, - * // y: 89 - * // } - * // ], - * // words: [ - * // { - * // bounds: [ - * // { - * // x: 4, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 89 - * // }, - * // { - * // x: 4, - * // y: 89 - * // } - * // ], - * // symbols: [ - * // { - * // bounds: [ - * // { - * // x: 4, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 5 - * // }, - * // { - * // x: 493, - * // y: 89 - * // }, - * // { - * // x: 4, - * // y: 89 - * // } - * // ], - * // text: 'T' - * // }, - * // // ... - * // ] - * // }, - * // // ... - * // ] - * // }, - * // // ... - * // ] - * // }, - * // // ... - * // ] - * // } - * // ] - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * vision.readDocument('image.jpg').then(function(data) { - * var pages = data[0]; - * var apiResponse = data[1]; - * }); - */ -Vision.prototype.readDocument = function(images, options, callback) { - if (is.fn(options)) { - callback = options; - options = {}; - } - - options = extend({}, options, { - types: ['document'] - }); - - this.detect(images, options, callback); -}; - -/** - * Determine the type of image the user is asking to be annotated. If a - * {module:storage/file}, convert to its "gs://{bucket}/{file}" URL. If a remote - * URL, format as the API expects. If a file path to a local file, convert to a - * base64 string. - * - * @private - */ -Vision.findImages_ = function(images, callback) { - if (global.GCLOUD_SANDBOX_ENV) { - callback(null, [ - { - content: new Buffer('') - } - ]); - return; - } - - var MAX_PARALLEL_LIMIT = 5; - images = arrify(images); - - function findImage(image, callback) { - if (Buffer.isBuffer(image)) { - callback(null, { - content: image.toString('base64') - }); - return; - } - - if (common.util.isCustomType(image, 'storage/file')) { - callback(null, { - source: { - gcsImageUri: format('gs://{bucketName}/{fileName}', { - bucketName: image.bucket.name, - fileName: image.name - }) - } - }); - return; - } - - // File is a URL. - if (/^http/.test(image)) { - callback(null, { - source: { - imageUri: image - } - }); - return; - } - - // File exists on disk. - fs.readFile(image, { encoding: 'base64' }, function(err, contents) { - if (err) { - callback(err); - return; - } - - callback(null, { content: contents }); - }); - } - - async.mapLimit(images, MAX_PARALLEL_LIMIT, findImage, callback); -}; - -/** - * Format a raw crop hint annotation response from the API. - * - * @private - */ -Vision.formatCropHintsAnnotation_ = function(cropHintsAnnotation, options) { - return cropHintsAnnotation.cropHints.map(function(cropHint) { - cropHint = { - bounds: cropHint.boundingPoly.vertices, - confidence: cropHint.confidence - }; - - return options.verbose ? cropHint : cropHint.bounds; - }); -}; - -/** - * Format a raw entity annotation response from the API. - * - * @private - */ -Vision.formatEntityAnnotation_ = function(entityAnnotation, options) { - if (!options.verbose) { - return entityAnnotation.description; - } - - var formattedEntityAnnotation = { - desc: entityAnnotation.description - }; - - if (entityAnnotation.mid) { - formattedEntityAnnotation.mid = entityAnnotation.mid; - } - - if (entityAnnotation.score) { - formattedEntityAnnotation.score = entityAnnotation.score * 100; - } - - if (entityAnnotation.boundingPoly) { - formattedEntityAnnotation.bounds = entityAnnotation.boundingPoly.vertices; - } - - if (is.defined(entityAnnotation.confidence)) { - formattedEntityAnnotation.confidence = entityAnnotation.confidence * 100; - } - - if (entityAnnotation.locations) { - var locations = entityAnnotation.locations; - formattedEntityAnnotation.locations = locations.map(prop('latLng')); - } - - if (entityAnnotation.properties) { - formattedEntityAnnotation.properties = entityAnnotation.properties; - } - - return formattedEntityAnnotation; -}; - -/** - * Format a raw error from the API. - * - * @private - */ -Vision.formatError_ = function(err) { - var httpError = commonGrpc.Service.GRPC_ERROR_CODE_TO_HTTP[err.code]; - - if (httpError) { - err.code = httpError.code; - } - - delete err.details; - - return err; -}; - -/** - * Format a raw face annotation response from the API. - * - * @private - */ -Vision.formatFaceAnnotation_ = function(faceAnnotation) { - function findLandmark(type) { - var landmarks = faceAnnotation.landmarks; - - return landmarks.filter(function(landmark) { - return landmark.type === type; - })[0].position; - } - - var formattedFaceAnnotation = { - angles: { - pan: faceAnnotation.panAngle, - roll: faceAnnotation.rollAngle, - tilt: faceAnnotation.tiltAngle - }, - - bounds: { - head: faceAnnotation.boundingPoly.vertices, - face: faceAnnotation.fdBoundingPoly.vertices - }, - - features: { - confidence: faceAnnotation.landmarkingConfidence * 100, - chin: { - center: findLandmark('CHIN_GNATHION'), - left: findLandmark('CHIN_LEFT_GONION'), - right: findLandmark('CHIN_RIGHT_GONION') - }, - ears: { - left: findLandmark('LEFT_EAR_TRAGION'), - right: findLandmark('RIGHT_EAR_TRAGION'), - }, - eyebrows: { - left: { - left: findLandmark('LEFT_OF_LEFT_EYEBROW'), - right: findLandmark('RIGHT_OF_LEFT_EYEBROW'), - top: findLandmark('LEFT_EYEBROW_UPPER_MIDPOINT') - }, - right: { - left: findLandmark('LEFT_OF_RIGHT_EYEBROW'), - right: findLandmark('RIGHT_OF_RIGHT_EYEBROW'), - top: findLandmark('RIGHT_EYEBROW_UPPER_MIDPOINT') - } - }, - eyes: { - left: { - bottom: findLandmark('LEFT_EYE_BOTTOM_BOUNDARY'), - center: findLandmark('LEFT_EYE'), - left: findLandmark('LEFT_EYE_LEFT_CORNER'), - pupil: findLandmark('LEFT_EYE_PUPIL'), - right: findLandmark('LEFT_EYE_RIGHT_CORNER'), - top: findLandmark('LEFT_EYE_TOP_BOUNDARY') - }, - right: { - bottom: findLandmark('RIGHT_EYE_BOTTOM_BOUNDARY'), - center: findLandmark('RIGHT_EYE'), - left: findLandmark('RIGHT_EYE_LEFT_CORNER'), - pupil: findLandmark('RIGHT_EYE_PUPIL'), - right: findLandmark('RIGHT_EYE_RIGHT_CORNER'), - top: findLandmark('RIGHT_EYE_TOP_BOUNDARY') - } - }, - forehead: findLandmark('FOREHEAD_GLABELLA'), - lips: { - bottom: findLandmark('LOWER_LIP'), - top: findLandmark('UPPER_LIP') - }, - mouth: { - center: findLandmark('MOUTH_CENTER'), - left: findLandmark('MOUTH_LEFT'), - right: findLandmark('MOUTH_RIGHT') - }, - nose: { - bottom: { - center: findLandmark('NOSE_BOTTOM_CENTER'), - left: findLandmark('NOSE_BOTTOM_LEFT'), - right: findLandmark('NOSE_BOTTOM_RIGHT') - }, - tip: findLandmark('NOSE_TIP'), - top: findLandmark('MIDPOINT_BETWEEN_EYES') - } - }, - - confidence: faceAnnotation.detectionConfidence * 100 - }; - - // Remove the `Likelihood` part from a property name. - // input: "joyLikelihood", output: "joy" - for (var prop in faceAnnotation) { - if (prop.indexOf('Likelihood') > -1) { - var shortenedProp = prop.replace('Likelihood', ''); - - formattedFaceAnnotation[shortenedProp] = - Vision.gteLikelihood_(LIKELY, faceAnnotation[prop]); - - formattedFaceAnnotation[prop] = Vision.likelihood[faceAnnotation[prop]]; - } - } - - return formattedFaceAnnotation; -}; - -/** - * Format a raw full text annotation response from the API. - * - * @private - */ -Vision.formatFullTextAnnotation_ = function(fullTextAnnotation, options) { - if (!options.verbose) { - return fullTextAnnotation.text; - } +var extend = require('extend'); - return fullTextAnnotation.pages - .map(function(page) { - return { - languages: page.property.detectedLanguages.map(prop('languageCode')), - width: page.width, - height: page.height, - blocks: page.blocks.map(function(block) { - return { - type: block.blockType, - bounds: block.boundingBox && block.boundingBox.vertices || [], - paragraphs: arrify(block.paragraphs) - .map(function(paragraph) { - return { - bounds: paragraph.boundingBox.vertices, - words: paragraph.words.map(function(word) { - return { - bounds: word.boundingBox.vertices, - symbols: word.symbols.map(function(symbol) { - return { - bounds: symbol.boundingBox.vertices, - text: symbol.text - }; - }) - }; - }) - }; - }) - }; - }) - }; - }); +var gapic = { + v1: require('./v1'), }; +var gaxGrpc = require('google-gax').grpc(); -/** - * Format a raw image properties annotation response from the API. - * - * @private - */ -Vision.formatImagePropertiesAnnotation_ = function(imageAnnotation, options) { - var formattedImageAnnotation = { - colors: imageAnnotation.dominantColors.colors - .map(function(colorObject) { - var red = colorObject.color.red; - var green = colorObject.color.green; - var blue = colorObject.color.blue; - - var hex = rgbHex(red, green, blue); +var helpers = require('./helpers'); - if (!options.verbose) { - return hex; - } - - colorObject.hex = hex; - - colorObject.red = red; - colorObject.green = green; - colorObject.blue = blue; - delete colorObject.color; - - colorObject.coverage = colorObject.pixelFraction *= 100; - delete colorObject.pixelFraction; - - colorObject.score *= 100; - - return colorObject; - }) - }; - - return formattedImageAnnotation; -}; - -/** - * Format a raw SafeSearch annotation response from the API. - * - * @private - */ -Vision.formatSafeSearchAnnotation_ = function(ssAnnotation, options) { - if (!options.verbose) { - for (var prop in ssAnnotation) { - var value = ssAnnotation[prop]; - ssAnnotation[prop] = Vision.gteLikelihood_(LIKELY, value); - } - return ssAnnotation; - } +const VERSION = require('../package.json').version; - return ssAnnotation; -}; /** - * Format a raw web detection response from the API. + * Create an imageAnnotatorClient with additional helpers for common + * tasks. * - * @private - */ -Vision.formatWebDetection_ = function(webDetection, options) { - function sortByScore(a, b) { - return a.score < b.score ? 1 : a.score > b.score ? -1 : 0; - } - - var formattedWebDetection = { - entities: arrify(webDetection.webEntities).map(prop('description')), - - fullMatches: arrify(webDetection.fullMatchingImages) - .sort(sortByScore) - .map(prop('url')), - - partialMatches: arrify(webDetection.partialMatchingImages) - .sort(sortByScore) - .map(prop('url')), - - pages: arrify(webDetection.pagesWithMatchingImages) - .sort(sortByScore) - .map(prop('url')) - }; - - if (!options.verbose) { - // Combine all matches. - formattedWebDetection = formattedWebDetection.fullMatches - .concat(formattedWebDetection.partialMatches); - } - - return formattedWebDetection; -}; + * @constructor + * @alias module:vision + * @mixes module:vision/helpers + * + * @param {Object=} opts - The optional parameters. + * @param {String=} opts.servicePath + * The domain name of the API remote host. + * @param {number=} opts.port + * The port on which to connect to the remote host. + * @param {grpc.ClientCredentials=} opts.sslCreds + * A ClientCredentials for use with an SSL-enabled channel. + * @param {Object=} opts.clientConfig + * The customized config to build the call settings. See + * {@link gax.constructSettings} for the format. + */ +function visionV1(opts) { + // Define the header options. + opts = opts || {}; + opts.libName = 'gccl'; + opts.libVersion = VERSION; + + // Create the image annotator client with the provided options. + var client = gapic.v1(opts).imageAnnotatorClient(opts); + if (is.undefined(client.annotateImage)) { + Object.assign(client.constructor.prototype, helpers('v1')); + } + return client; +} -/** - * Convert a "likelihood" value to a boolean representation, based on the lowest - * likelihood provided. - * - * @private - * - * @example - * Vision.gteLikelihood_(Vision.likelihood.VERY_LIKELY, 'POSSIBLE'); - * // false - * - * Vision.gteLikelihood_(Vision.likelihood.UNLIKELY, 'POSSIBLE'); - * // true - */ -Vision.gteLikelihood_ = function(baseLikelihood, likelihood) { - return Vision.likelihood[likelihood] >= baseLikelihood; -}; +var v1Protos = {}; -/*! Developer Documentation - * - * All async methods (except for streams) will return a Promise in the event - * that a callback is omitted. - */ -common.util.promisifyAll(Vision); +extend(v1Protos, gaxGrpc.load([{ + root: require('google-proto-files')('..'), + file: 'google/cloud/vision/v1/image_annotator.proto' +}]).google.cloud.vision.v1); -module.exports = Vision; -module.exports.v1 = v1; +module.exports = visionV1; +module.exports.types = v1Protos; +module.exports.v1 = visionV1; +module.exports.v1.types = v1Protos; diff --git a/packages/vision/src/v1/doc/doc_geometry.js b/packages/vision/src/v1/doc/doc_geometry.js new file mode 100644 index 00000000000..cfff72bd26c --- /dev/null +++ b/packages/vision/src/v1/doc/doc_geometry.js @@ -0,0 +1,73 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * A vertex represents a 2D point in the image. + * NOTE: the vertex coordinates are in the same scale as the original image. + * + * @property {number} x + * X coordinate. + * + * @property {number} y + * Y coordinate. + * + * @class + * @see [google.cloud.vision.v1.Vertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/geometry.proto} + */ +var Vertex = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A bounding polygon for the detected image annotation. + * + * @property {Object[]} vertices + * The bounding polygon vertices. + * + * This object should have the same structure as [Vertex]{@link Vertex} + * + * @class + * @see [google.cloud.vision.v1.BoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/geometry.proto} + */ +var BoundingPoly = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A 3D position in the image, used primarily for Face detection landmarks. + * A valid Position must have both x and y coordinates. + * The position coordinates are in the same scale as the original image. + * + * @property {number} x + * X coordinate. + * + * @property {number} y + * Y coordinate. + * + * @property {number} z + * Z coordinate (or depth). + * + * @class + * @see [google.cloud.vision.v1.Position definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/geometry.proto} + */ +var Position = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_google_protobuf_any.js b/packages/vision/src/v1/doc/doc_google_protobuf_any.js new file mode 100644 index 00000000000..0697ec15814 --- /dev/null +++ b/packages/vision/src/v1/doc/doc_google_protobuf_any.js @@ -0,0 +1,121 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * `Any` contains an arbitrary serialized protocol buffer message along with a + * URL that describes the type of the serialized message. + * + * Protobuf library provides support to pack/unpack Any values in the form + * of utility functions or additional generated methods of the Any type. + * + * Example 1: Pack and unpack a message in C++. + * + * Foo foo = ...; + * Any any; + * any.PackFrom(foo); + * ... + * if (any.UnpackTo(&foo)) { + * ... + * } + * + * Example 2: Pack and unpack a message in Java. + * + * Foo foo = ...; + * Any any = Any.pack(foo); + * ... + * if (any.is(Foo.class)) { + * foo = any.unpack(Foo.class); + * } + * + * Example 3: Pack and unpack a message in Python. + * + * foo = Foo(...) + * any = Any() + * any.Pack(foo) + * ... + * if any.Is(Foo.DESCRIPTOR): + * any.Unpack(foo) + * ... + * + * The pack methods provided by protobuf library will by default use + * 'type.googleapis.com/full.type.name' as the type URL and the unpack + * methods only use the fully qualified type name after the last '/' + * in the type URL, for example "foo.bar.com/x/y.z" will yield type + * name "y.z". + * + * + * # JSON + * + * The JSON representation of an `Any` value uses the regular + * representation of the deserialized, embedded message, with an + * additional field `@type` which contains the type URL. Example: + * + * package google.profile; + * message Person { + * string first_name = 1; + * string last_name = 2; + * } + * + * { + * "@type": "type.googleapis.com/google.profile.Person", + * "firstName": , + * "lastName": + * } + * + * If the embedded message type is well-known and has a custom JSON + * representation, that representation will be embedded adding a field + * `value` which holds the custom JSON in addition to the `@type` + * field. Example (for message {@link google.protobuf.Duration}): + * + * { + * "@type": "type.googleapis.com/google.protobuf.Duration", + * "value": "1.212s" + * } + * + * @external "google.protobuf.Any" + * @property {string} typeUrl + * A URL/resource name whose content describes the type of the + * serialized protocol buffer message. + * + * For URLs which use the scheme `http`, `https`, or no scheme, the + * following restrictions and interpretations apply: + * + * * If no scheme is provided, `https` is assumed. + * * The last segment of the URL's path must represent the fully + * qualified name of the type (as in `path/google.protobuf.Duration`). + * The name should be in a canonical form (e.g., leading "." is + * not accepted). + * * An HTTP GET on the URL must yield a {@link google.protobuf.Type} + * value in binary format, or produce an error. + * * Applications are allowed to cache lookup results based on the + * URL, or have them precompiled into a binary to avoid any + * lookup. Therefore, binary compatibility needs to be preserved + * on changes to types. (Use versioned type names to manage + * breaking changes.) + * + * Schemes other than `http`, `https` (or the empty scheme) might be + * used with implementation specific semantics. + * + * @property {string} value + * Must be a valid serialized protocol buffer of the above specified type. + * + * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto} + */ \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_google_protobuf_wrappers.js b/packages/vision/src/v1/doc/doc_google_protobuf_wrappers.js new file mode 100644 index 00000000000..46a5e3e2213 --- /dev/null +++ b/packages/vision/src/v1/doc/doc_google_protobuf_wrappers.js @@ -0,0 +1,128 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * Wrapper message for `double`. + * + * The JSON representation for `DoubleValue` is JSON number. + * + * @external "google.protobuf.DoubleValue" + * @property {number} value + * The double value. + * + * @see [google.protobuf.DoubleValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `float`. + * + * The JSON representation for `FloatValue` is JSON number. + * + * @external "google.protobuf.FloatValue" + * @property {number} value + * The float value. + * + * @see [google.protobuf.FloatValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `int64`. + * + * The JSON representation for `Int64Value` is JSON string. + * + * @external "google.protobuf.Int64Value" + * @property {number} value + * The int64 value. + * + * @see [google.protobuf.Int64Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `uint64`. + * + * The JSON representation for `UInt64Value` is JSON string. + * + * @external "google.protobuf.UInt64Value" + * @property {number} value + * The uint64 value. + * + * @see [google.protobuf.UInt64Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `int32`. + * + * The JSON representation for `Int32Value` is JSON number. + * + * @external "google.protobuf.Int32Value" + * @property {number} value + * The int32 value. + * + * @see [google.protobuf.Int32Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `uint32`. + * + * The JSON representation for `UInt32Value` is JSON number. + * + * @external "google.protobuf.UInt32Value" + * @property {number} value + * The uint32 value. + * + * @see [google.protobuf.UInt32Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `bool`. + * + * The JSON representation for `BoolValue` is JSON `true` and `false`. + * + * @external "google.protobuf.BoolValue" + * @property {boolean} value + * The bool value. + * + * @see [google.protobuf.BoolValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `string`. + * + * The JSON representation for `StringValue` is JSON string. + * + * @external "google.protobuf.StringValue" + * @property {string} value + * The string value. + * + * @see [google.protobuf.StringValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ + +/** + * Wrapper message for `bytes`. + * + * The JSON representation for `BytesValue` is JSON string. + * + * @external "google.protobuf.BytesValue" + * @property {string} value + * The bytes value. + * + * @see [google.protobuf.BytesValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_google_rpc_status.js b/packages/vision/src/v1/doc/doc_google_rpc_status.js new file mode 100644 index 00000000000..c85f1befe90 --- /dev/null +++ b/packages/vision/src/v1/doc/doc_google_rpc_status.js @@ -0,0 +1,92 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * The `Status` type defines a logical error model that is suitable for different + * programming environments, including REST APIs and RPC APIs. It is used by + * [gRPC](https://github.com/grpc). The error model is designed to be: + * + * - Simple to use and understand for most users + * - Flexible enough to meet unexpected needs + * + * # Overview + * + * The `Status` message contains three pieces of data: error code, error message, + * and error details. The error code should be an enum value of + * {@link google.rpc.Code}, but it may accept additional error codes if needed. The + * error message should be a developer-facing English message that helps + * developers *understand* and *resolve* the error. If a localized user-facing + * error message is needed, put the localized message in the error details or + * localize it in the client. The optional error details may contain arbitrary + * information about the error. There is a predefined set of error detail types + * in the package `google.rpc` which can be used for common error conditions. + * + * # Language mapping + * + * The `Status` message is the logical representation of the error model, but it + * is not necessarily the actual wire format. When the `Status` message is + * exposed in different client libraries and different wire protocols, it can be + * mapped differently. For example, it will likely be mapped to some exceptions + * in Java, but more likely mapped to some error codes in C. + * + * # Other uses + * + * The error model and the `Status` message can be used in a variety of + * environments, either with or without APIs, to provide a + * consistent developer experience across different environments. + * + * Example uses of this error model include: + * + * - Partial errors. If a service needs to return partial errors to the client, + * it may embed the `Status` in the normal response to indicate the partial + * errors. + * + * - Workflow errors. A typical workflow has multiple steps. Each step may + * have a `Status` message for error reporting purpose. + * + * - Batch operations. If a client uses batch request and batch response, the + * `Status` message should be used directly inside batch response, one for + * each error sub-response. + * + * - Asynchronous operations. If an API call embeds asynchronous operation + * results in its response, the status of those operations should be + * represented directly using the `Status` message. + * + * - Logging. If some API errors are stored in logs, the message `Status` could + * be used directly after any stripping needed for security/privacy reasons. + * + * @external "google.rpc.Status" + * @property {number} code + * The status code, which should be an enum value of {@link google.rpc.Code}. + * + * @property {string} message + * A developer-facing error message, which should be in English. Any + * user-facing error message should be localized and sent in the + * {@link google.rpc.Status.details} field, or localized by the client. + * + * @property {Object[]} details + * A list of messages that carry the error details. There will be a + * common set of message types for APIs to use. + * + * This object should have the same structure as [google.protobuf.Any]{@link external:"google.protobuf.Any"} + * + * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto} + */ \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_google_type_color.js b/packages/vision/src/v1/doc/doc_google_type_color.js new file mode 100644 index 00000000000..679c7f72339 --- /dev/null +++ b/packages/vision/src/v1/doc/doc_google_type_color.js @@ -0,0 +1,164 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * Represents a color in the RGBA color space. This representation is designed + * for simplicity of conversion to/from color representations in various + * languages over compactness; for example, the fields of this representation + * can be trivially provided to the constructor of "java.awt.Color" in Java; it + * can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha" + * method in iOS; and, with just a little work, it can be easily formatted into + * a CSS "rgba()" string in JavaScript, as well. Here are some examples: + * + * Example (Java): + * + * import com.google.type.Color; + * + * // ... + * public static java.awt.Color fromProto(Color protocolor) { + * float alpha = protocolor.hasAlpha() + * ? protocolor.getAlpha().getValue() + * : 1.0; + * + * return new java.awt.Color( + * protocolor.getRed(), + * protocolor.getGreen(), + * protocolor.getBlue(), + * alpha); + * } + * + * public static Color toProto(java.awt.Color color) { + * float red = (float) color.getRed(); + * float green = (float) color.getGreen(); + * float blue = (float) color.getBlue(); + * float denominator = 255.0; + * Color.Builder resultBuilder = + * Color + * .newBuilder() + * .setRed(red / denominator) + * .setGreen(green / denominator) + * .setBlue(blue / denominator); + * int alpha = color.getAlpha(); + * if (alpha != 255) { + * result.setAlpha( + * FloatValue + * .newBuilder() + * .setValue(((float) alpha) / denominator) + * .build()); + * } + * return resultBuilder.build(); + * } + * // ... + * + * Example (iOS / Obj-C): + * + * // ... + * static UIColor* fromProto(Color* protocolor) { + * float red = [protocolor red]; + * float green = [protocolor green]; + * float blue = [protocolor blue]; + * FloatValue* alpha_wrapper = [protocolor alpha]; + * float alpha = 1.0; + * if (alpha_wrapper != nil) { + * alpha = [alpha_wrapper value]; + * } + * return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; + * } + * + * static Color* toProto(UIColor* color) { + * CGFloat red, green, blue, alpha; + * if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { + * return nil; + * } + * Color* result = [Color alloc] init]; + * [result setRed:red]; + * [result setGreen:green]; + * [result setBlue:blue]; + * if (alpha <= 0.9999) { + * [result setAlpha:floatWrapperWithValue(alpha)]; + * } + * [result autorelease]; + * return result; + * } + * // ... + * + * Example (JavaScript): + * + * // ... + * + * var protoToCssColor = function(rgb_color) { + * var redFrac = rgb_color.red || 0.0; + * var greenFrac = rgb_color.green || 0.0; + * var blueFrac = rgb_color.blue || 0.0; + * var red = Math.floor(redFrac * 255); + * var green = Math.floor(greenFrac * 255); + * var blue = Math.floor(blueFrac * 255); + * + * if (!('alpha' in rgb_color)) { + * return rgbToCssColor_(red, green, blue); + * } + * + * var alphaFrac = rgb_color.alpha.value || 0.0; + * var rgbParams = [red, green, blue].join(','); + * return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); + * }; + * + * var rgbToCssColor_ = function(red, green, blue) { + * var rgbNumber = new Number((red << 16) | (green << 8) | blue); + * var hexString = rgbNumber.toString(16); + * var missingZeros = 6 - hexString.length; + * var resultBuilder = ['#']; + * for (var i = 0; i < missingZeros; i++) { + * resultBuilder.push('0'); + * } + * resultBuilder.push(hexString); + * return resultBuilder.join(''); + * }; + * + * // ... + * + * @external "google.type.Color" + * @property {number} red + * The amount of red in the color as a value in the interval [0, 1]. + * + * @property {number} green + * The amount of green in the color as a value in the interval [0, 1]. + * + * @property {number} blue + * The amount of blue in the color as a value in the interval [0, 1]. + * + * @property {Object} alpha + * The fraction of this color that should be applied to the pixel. That is, + * the final pixel color is defined by the equation: + * + * pixel color = alpha * (this color) + (1.0 - alpha) * (background color) + * + * This means that a value of 1.0 corresponds to a solid color, whereas + * a value of 0.0 corresponds to a completely transparent color. This + * uses a wrapper message rather than a simple float scalar so that it is + * possible to distinguish between a default value and the value being unset. + * If omitted, this color object is to be rendered as a solid color + * (as if the alpha value had been explicitly given with a value of 1.0). + * + * This object should have the same structure as [google.protobuf.FloatValue]{@link external:"google.protobuf.FloatValue"} + * + * @see [google.type.Color definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/color.proto} + */ \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_google_type_latlng.js b/packages/vision/src/v1/doc/doc_google_type_latlng.js new file mode 100644 index 00000000000..82dd2e824f9 --- /dev/null +++ b/packages/vision/src/v1/doc/doc_google_type_latlng.js @@ -0,0 +1,71 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * An object representing a latitude/longitude pair. This is expressed as a pair + * of doubles representing degrees latitude and degrees longitude. Unless + * specified otherwise, this must conform to the + * WGS84 + * standard. Values must be within normalized ranges. + * + * Example of normalization code in Python: + * + * def NormalizeLongitude(longitude): + * """Wraps decimal degrees longitude to [-180.0, 180.0].""" + * q, r = divmod(longitude, 360.0) + * if r > 180.0 or (r == 180.0 and q <= -1.0): + * return r - 360.0 + * return r + * + * def NormalizeLatLng(latitude, longitude): + * """Wraps decimal degrees latitude and longitude to + * [-90.0, 90.0] and [-180.0, 180.0], respectively.""" + * r = latitude % 360.0 + * if r <= 90.0: + * return r, NormalizeLongitude(longitude) + * elif r >= 270.0: + * return r - 360, NormalizeLongitude(longitude) + * else: + * return 180 - r, NormalizeLongitude(longitude + 180.0) + * + * assert 180.0 == NormalizeLongitude(180.0) + * assert -180.0 == NormalizeLongitude(-180.0) + * assert -179.0 == NormalizeLongitude(181.0) + * assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) + * assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) + * assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) + * assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) + * assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) + * assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) + * assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) + * assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) + * assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) + * assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) + * + * @external "google.type.LatLng" + * @property {number} latitude + * The latitude in degrees. It must be in the range [-90.0, +90.0]. + * + * @property {number} longitude + * The longitude in degrees. It must be in the range [-180.0, +180.0]. + * + * @see [google.type.LatLng definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/latlng.proto} + */ \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_image_annotator.js b/packages/vision/src/v1/doc/doc_image_annotator.js new file mode 100644 index 00000000000..e7845a3df3c --- /dev/null +++ b/packages/vision/src/v1/doc/doc_image_annotator.js @@ -0,0 +1,906 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * Users describe the type of Google Cloud Vision API tasks to perform over + * images by using *Feature*s. Each Feature indicates a type of image + * detection task to perform. Features encode the Cloud Vision API + * vertical to operate on and the number of top-scoring results to return. + * + * @property {number} type + * The feature type. + * + * The number should be among the values of [Type]{@link Type} + * + * @property {number} maxResults + * Maximum number of results of this type. + * + * @class + * @see [google.cloud.vision.v1.Feature definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var Feature = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Type of image feature. + * + * @enum {number} + */ + Type: { + + /** + * Unspecified feature type. + */ + TYPE_UNSPECIFIED: 0, + + /** + * Run face detection. + */ + FACE_DETECTION: 1, + + /** + * Run landmark detection. + */ + LANDMARK_DETECTION: 2, + + /** + * Run logo detection. + */ + LOGO_DETECTION: 3, + + /** + * Run label detection. + */ + LABEL_DETECTION: 4, + + /** + * Run OCR. + */ + TEXT_DETECTION: 5, + + /** + * Run dense text document OCR. Takes precedence when both + * DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + */ + DOCUMENT_TEXT_DETECTION: 11, + + /** + * Run computer vision models to compute image safe-search properties. + */ + SAFE_SEARCH_DETECTION: 6, + + /** + * Compute a set of image properties, such as the image's dominant colors. + */ + IMAGE_PROPERTIES: 7, + + /** + * Run crop hints. + */ + CROP_HINTS: 9, + + /** + * Run web detection. + */ + WEB_DETECTION: 10 + } +}; + +/** + * External image source (Google Cloud Storage image location). + * + * @property {string} gcsImageUri + * NOTE: For new code `image_uri` below is preferred. + * Google Cloud Storage image URI, which must be in the following form: + * `gs://bucket_name/object_name` (for details, see + * [Google Cloud Storage Request + * URIs](https://cloud.google.com/storage/docs/reference-uris)). + * NOTE: Cloud Storage object versioning is not supported. + * + * @property {string} imageUri + * Image URI which supports: + * 1) Google Cloud Storage image URI, which must be in the following form: + * `gs://bucket_name/object_name` (for details, see + * [Google Cloud Storage Request + * URIs](https://cloud.google.com/storage/docs/reference-uris)). + * NOTE: Cloud Storage object versioning is not supported. + * 2) Publicly accessible image HTTP/HTTPS URL. + * This is preferred over the legacy `gcs_image_uri` above. When both + * `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + * precedence. + * + * @class + * @see [google.cloud.vision.v1.ImageSource definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var ImageSource = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Client image to perform Google Cloud Vision API tasks over. + * + * @property {string} content + * Image content, represented as a stream of bytes. + * Note: as with all `bytes` fields, protobuffers use a pure binary + * representation, whereas JSON representations use base64. + * + * @property {Object} source + * Google Cloud Storage image location. If both `content` and `source` + * are provided for an image, `content` takes precedence and is + * used to perform the image annotation request. + * + * This object should have the same structure as [ImageSource]{@link ImageSource} + * + * @class + * @see [google.cloud.vision.v1.Image definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var Image = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A face annotation object contains the results of face detection. + * + * @property {Object} boundingPoly + * The bounding polygon around the face. The coordinates of the bounding box + * are in the original image's scale, as returned in `ImageParams`. + * The bounding box is computed to "frame" the face in accordance with human + * expectations. It is based on the landmarker results. + * Note that one or more x and/or y coordinates may not be generated in the + * `BoundingPoly` (the polygon will be unbounded) if only a partial face + * appears in the image to be annotated. + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {Object} fdBoundingPoly + * The `fd_bounding_poly` bounding polygon is tighter than the + * `boundingPoly`, and encloses only the skin part of the face. Typically, it + * is used to eliminate the face from any image analysis that detects the + * "amount of skin" visible in an image. It is not based on the + * landmarker results, only on the initial face detection, hence + * the fd (face detection) prefix. + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {Object[]} landmarks + * Detected face landmarks. + * + * This object should have the same structure as [Landmark]{@link Landmark} + * + * @property {number} rollAngle + * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + * of the face relative to the image vertical about the axis perpendicular to + * the face. Range [-180,180]. + * + * @property {number} panAngle + * Yaw angle, which indicates the leftward/rightward angle that the face is + * pointing relative to the vertical plane perpendicular to the image. Range + * [-180,180]. + * + * @property {number} tiltAngle + * Pitch angle, which indicates the upwards/downwards angle that the face is + * pointing relative to the image's horizontal plane. Range [-180,180]. + * + * @property {number} detectionConfidence + * Detection confidence. Range [0, 1]. + * + * @property {number} landmarkingConfidence + * Face landmarking confidence. Range [0, 1]. + * + * @property {number} joyLikelihood + * Joy likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} sorrowLikelihood + * Sorrow likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} angerLikelihood + * Anger likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} surpriseLikelihood + * Surprise likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} underExposedLikelihood + * Under-exposed likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} blurredLikelihood + * Blurred likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} headwearLikelihood + * Headwear likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @class + * @see [google.cloud.vision.v1.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var FaceAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * A face-specific landmark (for example, a face feature). + * Landmark positions may fall outside the bounds of the image + * if the face is near one or more edges of the image. + * Therefore it is NOT guaranteed that `0 <= x < width` or + * `0 <= y < height`. + * + * @property {number} type + * Face landmark type. + * + * The number should be among the values of [Type]{@link Type} + * + * @property {Object} position + * Face landmark position. + * + * This object should have the same structure as [Position]{@link Position} + * + * @class + * @see [google.cloud.vision.v1.FaceAnnotation.Landmark definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ + Landmark: { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Face landmark (feature) type. + * Left and right are defined from the vantage of the viewer of the image + * without considering mirror projections typical of photos. So, `LEFT_EYE`, + * typically, is the person's right eye. + * + * @enum {number} + */ + Type: { + + /** + * Unknown face landmark detected. Should not be filled. + */ + UNKNOWN_LANDMARK: 0, + + /** + * Left eye. + */ + LEFT_EYE: 1, + + /** + * Right eye. + */ + RIGHT_EYE: 2, + + /** + * Left of left eyebrow. + */ + LEFT_OF_LEFT_EYEBROW: 3, + + /** + * Right of left eyebrow. + */ + RIGHT_OF_LEFT_EYEBROW: 4, + + /** + * Left of right eyebrow. + */ + LEFT_OF_RIGHT_EYEBROW: 5, + + /** + * Right of right eyebrow. + */ + RIGHT_OF_RIGHT_EYEBROW: 6, + + /** + * Midpoint between eyes. + */ + MIDPOINT_BETWEEN_EYES: 7, + + /** + * Nose tip. + */ + NOSE_TIP: 8, + + /** + * Upper lip. + */ + UPPER_LIP: 9, + + /** + * Lower lip. + */ + LOWER_LIP: 10, + + /** + * Mouth left. + */ + MOUTH_LEFT: 11, + + /** + * Mouth right. + */ + MOUTH_RIGHT: 12, + + /** + * Mouth center. + */ + MOUTH_CENTER: 13, + + /** + * Nose, bottom right. + */ + NOSE_BOTTOM_RIGHT: 14, + + /** + * Nose, bottom left. + */ + NOSE_BOTTOM_LEFT: 15, + + /** + * Nose, bottom center. + */ + NOSE_BOTTOM_CENTER: 16, + + /** + * Left eye, top boundary. + */ + LEFT_EYE_TOP_BOUNDARY: 17, + + /** + * Left eye, right corner. + */ + LEFT_EYE_RIGHT_CORNER: 18, + + /** + * Left eye, bottom boundary. + */ + LEFT_EYE_BOTTOM_BOUNDARY: 19, + + /** + * Left eye, left corner. + */ + LEFT_EYE_LEFT_CORNER: 20, + + /** + * Right eye, top boundary. + */ + RIGHT_EYE_TOP_BOUNDARY: 21, + + /** + * Right eye, right corner. + */ + RIGHT_EYE_RIGHT_CORNER: 22, + + /** + * Right eye, bottom boundary. + */ + RIGHT_EYE_BOTTOM_BOUNDARY: 23, + + /** + * Right eye, left corner. + */ + RIGHT_EYE_LEFT_CORNER: 24, + + /** + * Left eyebrow, upper midpoint. + */ + LEFT_EYEBROW_UPPER_MIDPOINT: 25, + + /** + * Right eyebrow, upper midpoint. + */ + RIGHT_EYEBROW_UPPER_MIDPOINT: 26, + + /** + * Left ear tragion. + */ + LEFT_EAR_TRAGION: 27, + + /** + * Right ear tragion. + */ + RIGHT_EAR_TRAGION: 28, + + /** + * Left eye pupil. + */ + LEFT_EYE_PUPIL: 29, + + /** + * Right eye pupil. + */ + RIGHT_EYE_PUPIL: 30, + + /** + * Forehead glabella. + */ + FOREHEAD_GLABELLA: 31, + + /** + * Chin gnathion. + */ + CHIN_GNATHION: 32, + + /** + * Chin left gonion. + */ + CHIN_LEFT_GONION: 33, + + /** + * Chin right gonion. + */ + CHIN_RIGHT_GONION: 34 + } + } +}; + +/** + * Detected entity location information. + * + * @property {Object} latLng + * lat/long location coordinates. + * + * This object should have the same structure as [google.type.LatLng]{@link external:"google.type.LatLng"} + * + * @class + * @see [google.cloud.vision.v1.LocationInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var LocationInfo = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A `Property` consists of a user-supplied name/value pair. + * + * @property {string} name + * Name of the property. + * + * @property {string} value + * Value of the property. + * + * @class + * @see [google.cloud.vision.v1.Property definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var Property = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of detected entity features. + * + * @property {string} mid + * Opaque entity ID. Some IDs may be available in + * [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). + * + * @property {string} locale + * The language code for the locale in which the entity textual + * `description` is expressed. + * + * @property {string} description + * Entity textual description, expressed in its `locale` language. + * + * @property {number} score + * Overall score of the result. Range [0, 1]. + * + * @property {number} confidence + * The accuracy of the entity detection in an image. + * For example, for an image in which the "Eiffel Tower" entity is detected, + * this field represents the confidence that there is a tower in the query + * image. Range [0, 1]. + * + * @property {number} topicality + * The relevancy of the ICA (Image Content Annotation) label to the + * image. For example, the relevancy of "tower" is likely higher to an image + * containing the detected "Eiffel Tower" than to an image containing a + * detected distant towering building, even though the confidence that + * there is a tower in each image may be the same. Range [0, 1]. + * + * @property {Object} boundingPoly + * Image region to which this entity belongs. Currently not produced + * for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s + * are produced for the entire text detected in an image region, followed by + * `boundingPoly`s for each word within the detected text. + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {Object[]} locations + * The location information for the detected entity. Multiple + * `LocationInfo` elements can be present because one location may + * indicate the location of the scene in the image, and another location + * may indicate the location of the place where the image was taken. + * Location information is usually present for landmarks. + * + * This object should have the same structure as [LocationInfo]{@link LocationInfo} + * + * @property {Object[]} properties + * Some entities may have optional user-supplied `Property` (name/value) + * fields, such a score or string that qualifies the entity. + * + * This object should have the same structure as [Property]{@link Property} + * + * @class + * @see [google.cloud.vision.v1.EntityAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var EntityAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of features pertaining to the image, computed by computer vision + * methods over safe-search verticals (for example, adult, spoof, medical, + * violence). + * + * @property {number} adult + * Represents the adult content likelihood for the image. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} spoof + * Spoof likelihood. The likelihood that an modification + * was made to the image's canonical version to make it appear + * funny or offensive. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} medical + * Likelihood that this is a medical image. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @property {number} violence + * Violence likelihood. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @class + * @see [google.cloud.vision.v1.SafeSearchAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var SafeSearchAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Rectangle determined by min and max `LatLng` pairs. + * + * @property {Object} minLatLng + * Min lat/long pair. + * + * This object should have the same structure as [google.type.LatLng]{@link external:"google.type.LatLng"} + * + * @property {Object} maxLatLng + * Max lat/long pair. + * + * This object should have the same structure as [google.type.LatLng]{@link external:"google.type.LatLng"} + * + * @class + * @see [google.cloud.vision.v1.LatLongRect definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var LatLongRect = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Color information consists of RGB channels, score, and the fraction of + * the image that the color occupies in the image. + * + * @property {Object} color + * RGB components of the color. + * + * This object should have the same structure as [google.type.Color]{@link external:"google.type.Color"} + * + * @property {number} score + * Image-specific score for this color. Value in range [0, 1]. + * + * @property {number} pixelFraction + * The fraction of pixels the color occupies in the image. + * Value in range [0, 1]. + * + * @class + * @see [google.cloud.vision.v1.ColorInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var ColorInfo = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of dominant colors and their corresponding scores. + * + * @property {Object[]} colors + * RGB color values with their score and pixel fraction. + * + * This object should have the same structure as [ColorInfo]{@link ColorInfo} + * + * @class + * @see [google.cloud.vision.v1.DominantColorsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var DominantColorsAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Stores image properties, such as dominant colors. + * + * @property {Object} dominantColors + * If present, dominant colors completed successfully. + * + * This object should have the same structure as [DominantColorsAnnotation]{@link DominantColorsAnnotation} + * + * @class + * @see [google.cloud.vision.v1.ImageProperties definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var ImageProperties = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Single crop hint that is used to generate a new crop when serving an image. + * + * @property {Object} boundingPoly + * The bounding polygon for the crop region. The coordinates of the bounding + * box are in the original image's scale, as returned in `ImageParams`. + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {number} confidence + * Confidence of this being a salient region. Range [0, 1]. + * + * @property {number} importanceFraction + * Fraction of importance of this salient region with respect to the original + * image. + * + * @class + * @see [google.cloud.vision.v1.CropHint definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var CropHint = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of crop hints that are used to generate new crops when serving images. + * + * @property {Object[]} cropHints + * This object should have the same structure as [CropHint]{@link CropHint} + * + * @class + * @see [google.cloud.vision.v1.CropHintsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var CropHintsAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Parameters for crop hints annotation request. + * + * @property {number[]} aspectRatios + * Aspect ratios in floats, representing the ratio of the width to the height + * of the image. For example, if the desired aspect ratio is 4/3, the + * corresponding float value should be 1.33333. If not specified, the + * best possible crop is returned. The number of provided aspect ratios is + * limited to a maximum of 16; any aspect ratios provided after the 16th are + * ignored. + * + * @class + * @see [google.cloud.vision.v1.CropHintsParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var CropHintsParams = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Image context and/or feature-specific parameters. + * + * @property {Object} latLongRect + * lat/long rectangle that specifies the location of the image. + * + * This object should have the same structure as [LatLongRect]{@link LatLongRect} + * + * @property {string[]} languageHints + * List of languages to use for TEXT_DETECTION. In most cases, an empty value + * yields the best results since it enables automatic language detection. For + * languages based on the Latin alphabet, setting `language_hints` is not + * needed. In rare cases, when the language of the text in the image is known, + * setting a hint will help get better results (although it will be a + * significant hindrance if the hint is wrong). Text detection returns an + * error if one or more of the specified languages is not one of the + * [supported languages](https://cloud.google.com/vision/docs/languages). + * + * @property {Object} cropHintsParams + * Parameters for crop hints annotation request. + * + * This object should have the same structure as [CropHintsParams]{@link CropHintsParams} + * + * @class + * @see [google.cloud.vision.v1.ImageContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var ImageContext = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Request for performing Google Cloud Vision API tasks over a user-provided + * image, with user-requested features. + * + * @property {Object} image + * The image to be processed. + * + * This object should have the same structure as [Image]{@link Image} + * + * @property {Object[]} features + * Requested features. + * + * This object should have the same structure as [Feature]{@link Feature} + * + * @property {Object} imageContext + * Additional context that may accompany the image. + * + * This object should have the same structure as [ImageContext]{@link ImageContext} + * + * @class + * @see [google.cloud.vision.v1.AnnotateImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var AnnotateImageRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Response to an image annotation request. + * + * @property {Object[]} faceAnnotations + * If present, face detection has completed successfully. + * + * This object should have the same structure as [FaceAnnotation]{@link FaceAnnotation} + * + * @property {Object[]} landmarkAnnotations + * If present, landmark detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * + * @property {Object[]} logoAnnotations + * If present, logo detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * + * @property {Object[]} labelAnnotations + * If present, label detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * + * @property {Object[]} textAnnotations + * If present, text (OCR) detection or document (OCR) text detection has + * completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * + * @property {Object} fullTextAnnotation + * If present, text (OCR) detection or document (OCR) text detection has + * completed successfully. + * This annotation provides the structural hierarchy for the OCR detected + * text. + * + * This object should have the same structure as [TextAnnotation]{@link TextAnnotation} + * + * @property {Object} safeSearchAnnotation + * If present, safe-search annotation has completed successfully. + * + * This object should have the same structure as [SafeSearchAnnotation]{@link SafeSearchAnnotation} + * + * @property {Object} imagePropertiesAnnotation + * If present, image properties were extracted successfully. + * + * This object should have the same structure as [ImageProperties]{@link ImageProperties} + * + * @property {Object} cropHintsAnnotation + * If present, crop hints have completed successfully. + * + * This object should have the same structure as [CropHintsAnnotation]{@link CropHintsAnnotation} + * + * @property {Object} webDetection + * If present, web detection has completed successfully. + * + * This object should have the same structure as [WebDetection]{@link WebDetection} + * + * @property {Object} error + * If set, represents the error message for the operation. + * Note that filled-in image annotations are guaranteed to be + * correct, even when `error` is set. + * + * This object should have the same structure as [google.rpc.Status]{@link external:"google.rpc.Status"} + * + * @class + * @see [google.cloud.vision.v1.AnnotateImageResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var AnnotateImageResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Multiple image annotation requests are batched into a single service call. + * + * @property {Object[]} requests + * Individual image annotation requests for this batch. + * + * This object should have the same structure as [AnnotateImageRequest]{@link AnnotateImageRequest} + * + * @class + * @see [google.cloud.vision.v1.BatchAnnotateImagesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var BatchAnnotateImagesRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Response to a batch image annotation request. + * + * @property {Object[]} responses + * Individual responses to image annotation requests within the batch. + * + * This object should have the same structure as [AnnotateImageResponse]{@link AnnotateImageResponse} + * + * @class + * @see [google.cloud.vision.v1.BatchAnnotateImagesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} + */ +var BatchAnnotateImagesResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A bucketized representation of likelihood, which is intended to give clients + * highly stable results across model upgrades. + * + * @enum {number} + */ +var Likelihood = { + + /** + * Unknown likelihood. + */ + UNKNOWN: 0, + + /** + * It is very unlikely that the image belongs to the specified vertical. + */ + VERY_UNLIKELY: 1, + + /** + * It is unlikely that the image belongs to the specified vertical. + */ + UNLIKELY: 2, + + /** + * It is possible that the image belongs to the specified vertical. + */ + POSSIBLE: 3, + + /** + * It is likely that the image belongs to the specified vertical. + */ + LIKELY: 4, + + /** + * It is very likely that the image belongs to the specified vertical. + */ + VERY_LIKELY: 5 +}; \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_text_annotation.js b/packages/vision/src/v1/doc/doc_text_annotation.js new file mode 100644 index 00000000000..96e5ccba01a --- /dev/null +++ b/packages/vision/src/v1/doc/doc_text_annotation.js @@ -0,0 +1,362 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * TextAnnotation contains a structured representation of OCR extracted text. + * The hierarchy of an OCR extracted text structure is like this: + * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol + * Each structural component, starting from Page, may further have their own + * properties. Properties describe detected languages, breaks etc.. Please + * refer to the {@link google.cloud.vision.v1.TextAnnotation.TextProperty} message + * definition below for more detail. + * + * @property {Object[]} pages + * List of pages detected by OCR. + * + * This object should have the same structure as [Page]{@link Page} + * + * @property {string} text + * UTF-8 text detected on the pages. + * + * @class + * @see [google.cloud.vision.v1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ +var TextAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Detected language for a structural component. + * + * @property {string} languageCode + * The BCP-47 language code, such as "en-US" or "sr-Latn". For more + * information, see + * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + * + * @property {number} confidence + * Confidence of detected language. Range [0, 1]. + * + * @class + * @see [google.cloud.vision.v1.TextAnnotation.DetectedLanguage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ + DetectedLanguage: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Detected start or end of a structural component. + * + * @property {number} type + * The number should be among the values of [BreakType]{@link BreakType} + * + * @property {boolean} isPrefix + * True if break prepends the element. + * + * @class + * @see [google.cloud.vision.v1.TextAnnotation.DetectedBreak definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ + DetectedBreak: { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Enum to denote the type of break found. New line, space etc. + * + * @enum {number} + */ + BreakType: { + + /** + * Unknown break label type. + */ + UNKNOWN: 0, + + /** + * Regular space. + */ + SPACE: 1, + + /** + * Sure space (very wide). + */ + SURE_SPACE: 2, + + /** + * Line-wrapping break. + */ + EOL_SURE_SPACE: 3, + + /** + * End-line hyphen that is not present in text; does + */ + HYPHEN: 4, + + /** + * not co-occur with SPACE, LEADER_SPACE, or + * LINE_BREAK. + * Line break that ends a paragraph. + */ + LINE_BREAK: 5 + } + }, + + /** + * Additional information detected on the structural component. + * + * @property {Object[]} detectedLanguages + * A list of detected languages together with confidence. + * + * This object should have the same structure as [DetectedLanguage]{@link DetectedLanguage} + * + * @property {Object} detectedBreak + * Detected start or end of a text segment. + * + * This object should have the same structure as [DetectedBreak]{@link DetectedBreak} + * + * @class + * @see [google.cloud.vision.v1.TextAnnotation.TextProperty definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ + TextProperty: { + // This is for documentation. Actual contents will be loaded by gRPC. + } +}; + +/** + * Detected page from OCR. + * + * @property {Object} property + * Additional information detected on the page. + * + * This object should have the same structure as [TextProperty]{@link TextProperty} + * + * @property {number} width + * Page width in pixels. + * + * @property {number} height + * Page height in pixels. + * + * @property {Object[]} blocks + * List of blocks of text, images etc on this page. + * + * This object should have the same structure as [Block]{@link Block} + * + * @class + * @see [google.cloud.vision.v1.Page definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ +var Page = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Logical element on the page. + * + * @property {Object} property + * Additional information detected for the block. + * + * This object should have the same structure as [TextProperty]{@link TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the block. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {Object[]} paragraphs + * List of paragraphs in this block (if this blocks is of type text). + * + * This object should have the same structure as [Paragraph]{@link Paragraph} + * + * @property {number} blockType + * Detected block type (text, image etc) for this block. + * + * The number should be among the values of [BlockType]{@link BlockType} + * + * @class + * @see [google.cloud.vision.v1.Block definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ +var Block = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Type of a block (text, image etc) as identified by OCR. + * + * @enum {number} + */ + BlockType: { + + /** + * Unknown block type. + */ + UNKNOWN: 0, + + /** + * Regular text block. + */ + TEXT: 1, + + /** + * Table block. + */ + TABLE: 2, + + /** + * Image block. + */ + PICTURE: 3, + + /** + * Horizontal/vertical line box. + */ + RULER: 4, + + /** + * Barcode block. + */ + BARCODE: 5 + } +}; + +/** + * Structural unit of text representing a number of words in certain order. + * + * @property {Object} property + * Additional information detected for the paragraph. + * + * This object should have the same structure as [TextProperty]{@link TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the paragraph. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {Object[]} words + * List of words in this paragraph. + * + * This object should have the same structure as [Word]{@link Word} + * + * @class + * @see [google.cloud.vision.v1.Paragraph definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ +var Paragraph = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A word representation. + * + * @property {Object} property + * Additional information detected for the word. + * + * This object should have the same structure as [TextProperty]{@link TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the word. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {Object[]} symbols + * List of symbols in the word. + * The order of the symbols follows the natural reading order. + * + * This object should have the same structure as [Symbol]{@link Symbol} + * + * @class + * @see [google.cloud.vision.v1.Word definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ +var Word = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A single symbol representation. + * + * @property {Object} property + * Additional information detected for the symbol. + * + * This object should have the same structure as [TextProperty]{@link TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the symbol. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * + * @property {string} text + * The actual UTF-8 representation of the symbol. + * + * @class + * @see [google.cloud.vision.v1.Symbol definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} + */ +var Symbol = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/vision/src/v1/doc/doc_web_detection.js b/packages/vision/src/v1/doc/doc_web_detection.js new file mode 100644 index 00000000000..12e2da864f6 --- /dev/null +++ b/packages/vision/src/v1/doc/doc_web_detection.js @@ -0,0 +1,108 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * Relevant information for the image from the Internet. + * + * @property {Object[]} webEntities + * Deduced entities from similar images on the Internet. + * + * This object should have the same structure as [WebEntity]{@link WebEntity} + * + * @property {Object[]} fullMatchingImages + * Fully matching images from the Internet. + * They're definite neardups and most often a copy of the query image with + * merely a size change. + * + * This object should have the same structure as [WebImage]{@link WebImage} + * + * @property {Object[]} partialMatchingImages + * Partial matching images from the Internet. + * Those images are similar enough to share some key-point features. For + * example an original image will likely have partial matching for its crops. + * + * This object should have the same structure as [WebImage]{@link WebImage} + * + * @property {Object[]} pagesWithMatchingImages + * Web pages containing the matching images from the Internet. + * + * This object should have the same structure as [WebPage]{@link WebPage} + * + * @class + * @see [google.cloud.vision.v1.WebDetection definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} + */ +var WebDetection = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Entity deduced from similar images on the Internet. + * + * @property {string} entityId + * Opaque entity ID. + * + * @property {number} score + * Overall relevancy score for the entity. + * Not normalized and not comparable across different image queries. + * + * @property {string} description + * Canonical description of the entity, in English. + * + * @class + * @see [google.cloud.vision.v1.WebDetection.WebEntity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} + */ + WebEntity: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Metadata for online images. + * + * @property {string} url + * The result image URL. + * + * @property {number} score + * Overall relevancy score for the image. + * Not normalized and not comparable across different image queries. + * + * @class + * @see [google.cloud.vision.v1.WebDetection.WebImage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} + */ + WebImage: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Metadata for web pages. + * + * @property {string} url + * The result web page URL. + * + * @property {number} score + * Overall relevancy score for the web page. + * Not normalized and not comparable across different image queries. + * + * @class + * @see [google.cloud.vision.v1.WebDetection.WebPage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} + */ + WebPage: { + // This is for documentation. Actual contents will be loaded by gRPC. + } +}; \ No newline at end of file diff --git a/packages/vision/src/v1/image_annotator_client.js b/packages/vision/src/v1/image_annotator_client.js index e2ace8b98f8..bfbad78422d 100644 --- a/packages/vision/src/v1/image_annotator_client.js +++ b/packages/vision/src/v1/image_annotator_client.js @@ -50,15 +50,6 @@ var ALL_SCOPES = [ * images, such as face, landmark, logo, label, and text detection. The * ImageAnnotator service returns detected entities from the images. * - * This will be created through a builder function which can be obtained by the module. - * See the following example of how to initialize the module and how to access to the builder. - * @see {@link imageAnnotatorClient} - * - * @example - * var visionV1 = require('@google-cloud/vision').v1({ - * // optional auth parameters. - * }); - * var client = visionV1.imageAnnotatorClient(); * * @class */ @@ -143,12 +134,18 @@ ImageAnnotatorClient.prototype.getProjectId = function(callback) { * * @example * - * var client = visionV1.imageAnnotatorClient(); + * var vision = require('@google-cloud/vision'); + * + * var client = vision.v1({ + * // optional auth parameters. + * }); + * * var requests = []; * client.batchAnnotateImages({requests: requests}).then(function(responses) { * var response = responses[0]; * // doThingsWith(response) - * }).catch(function(err) { + * }) + * .catch(function(err) { * console.error(err); * }); */ @@ -197,4 +194,4 @@ function ImageAnnotatorClientBuilder(gaxGrpc) { } module.exports = ImageAnnotatorClientBuilder; module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS; -module.exports.ALL_SCOPES = ALL_SCOPES; \ No newline at end of file +module.exports.ALL_SCOPES = ALL_SCOPES; diff --git a/packages/vision/src/v1/index.js b/packages/vision/src/v1/index.js index b12e1e4b9e8..df112c350a4 100644 --- a/packages/vision/src/v1/index.js +++ b/packages/vision/src/v1/index.js @@ -1,11 +1,11 @@ /* - * Copyright 2016 Google Inc. All rights reserved. + * Copyright 2017, Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,4 +31,4 @@ v1.GAPIC_VERSION = '0.7.1'; v1.SERVICE_ADDRESS = imageAnnotatorClient.SERVICE_ADDRESS; v1.ALL_SCOPES = imageAnnotatorClient.ALL_SCOPES; -module.exports = v1; +module.exports = v1; \ No newline at end of file diff --git a/packages/vision/system-test/vision.js b/packages/vision/system-test/vision.js index 3c14dfcda85..5e6e448d1d4 100644 --- a/packages/vision/system-test/vision.js +++ b/packages/vision/system-test/vision.js @@ -19,9 +19,6 @@ var assert = require('assert'); var async = require('async'); var fs = require('fs'); -var is = require('is'); -var multiline = require('multiline'); -var normalizeNewline = require('normalize-newline'); var path = require('path'); var Storage = require('@google-cloud/storage'); var uuid = require('node-uuid'); @@ -44,7 +41,6 @@ describe('Vision', function() { var vision = new Vision(env); var bucket = storage.bucket(generateName()); - var file = bucket.file('logo.jpg'); before(function(done) { bucket.create(function(err) { @@ -81,553 +77,48 @@ describe('Vision', function() { }); }); - it('should detect from a URL', function(done) { + it('should detect from a URL', () => { var url = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png'; - - vision.detect(url, ['logos'], function(err, logos) { - assert.ifError(err); - - assert.deepEqual(logos, ['Google']); - - done(); + return vision.logoDetection({ + source: {imageUri: url}, + }).then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); }); }); - it('should detect from a File', function(done) { - vision.detect(file, ['logos'], function(err, logos) { - assert.ifError(err); - - assert.deepEqual(logos, ['Google']); - - done(); + it('should detect from a filename', () => { + return vision.logoDetection({ + source: {filename: IMAGES.logo}, + }).then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); }); }); - it('should detect from a Buffer', function(done) { + it('should detect from a Buffer', () => { var buffer = fs.readFileSync(IMAGES.logo); - vision.detect(buffer, ['logos'], function(err, logos) { - assert.ifError(err); - - assert.deepEqual(logos, ['Google']); - - done(); - }); - }); - - describe('single image', function() { - var TYPES = ['faces', 'labels', 'safeSearch']; - - it('should perform a single detection', function(done) { - vision.detect(IMAGES.rushmore, TYPES[0], function(err, detections) { - assert.ifError(err); - - assert(is.array(detections)); - - done(); - }); - }); - - it('should perform multiple detections', function(done) { - vision.detect(IMAGES.rushmore, TYPES, function(err, detections) { - assert.ifError(err); - - assert(is.array(detections.faces)); - assert(is.array(detections.labels)); - assert(is.object(detections.safeSearch)); - - done(); - }); - }); - - it('should return errors', function(done) { - vision.detect(IMAGES.malformed, TYPES, function(err, detections) { - assert.strictEqual(err.name, 'PartialFailureError'); - assert(is.array(err.errors)); - assert.strictEqual(err.errors.length, 1); - - assert.deepEqual(detections, []); - done(); - }); - }); - }); - - describe('multiple images', function() { - var TYPES = ['faces', 'labels', 'safeSearch']; - - it('should perform a single detection', function(done) { - var images = [IMAGES.logo, IMAGES.rushmore]; - - vision.detect(images, TYPES[0], function(err, detections) { - assert.ifError(err); - - var image1detections = detections[0]; - var image2detections = detections[1]; - - assert(is.array(image1detections)); - assert(is.array(image2detections)); - - done(); - }); - }); - - it('should perform multiple detections', function(done) { - var images = [IMAGES.logo, IMAGES.rushmore]; - - vision.detect(images, TYPES, function(err, detections) { - assert.ifError(err); - - var image1detections = detections[0]; - var image2detections = detections[1]; - - assert(is.array(image1detections.faces)); - assert(is.array(image1detections.labels)); - assert(is.object(image1detections.safeSearch)); - - assert(is.array(image2detections.faces)); - assert(is.array(image2detections.labels)); - assert(is.object(image2detections.safeSearch)); - - done(); - }); - }); - - it('should return errors', function(done) { - var images = [IMAGES.logo, IMAGES.malformed]; - - vision.detect(images, TYPES, function(err, detections) { - assert.strictEqual(err.name, 'PartialFailureError'); - assert(is.array(err.errors)); - assert.strictEqual(err.errors.length, 1); - - var image2errors = err.errors[0]; - assert.deepEqual(image2errors, { - image: IMAGES.malformed, - errors: [ - { - code: 400, - message: 'Bad image data.', - type: 'faces' - }, - { - code: 400, - message: 'Bad image data.', - type: 'labels' - }, - { - code: 400, - message: 'Bad image data.', - type: 'safeSearch' - } - ] - }); - - var image1detections = detections[0]; - assert(is.array(image1detections.faces)); - assert(is.array(image1detections.labels)); - assert(is.object(image1detections.safeSearch)); - - var image2detections = detections[1]; - assert.deepEqual(image2detections, {}); - - done(); - }); - }); - }); - - describe('crops', function() { - it('should detect crops from an image', function(done) { - vision.detectCrops(IMAGES.logo, function(err, crops) { - assert.ifError(err); - assert.strictEqual(crops.length, 1); - assert.strictEqual(crops[0].length, 4); - done(); - }); - }); - - it('should detect crops from multiple images', function(done) { - vision.detectCrops([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, crops) { - assert.ifError(err); - - assert.strictEqual(crops.length, 2); - assert.strictEqual(crops[0][0].length, 4); - assert.strictEqual(crops[1][0].length, 4); - - done(); - }); - }); - }); - - describe('documents', function() { - it('should detect text from a document', function(done) { - vision.readDocument(IMAGES.document, function(err, text) { - assert.ifError(err); - - assert.strictEqual(typeof text, 'string'); - - done(); - }); - }); - - it('should detect pages from multiple documents', function(done) { - vision.readDocument([ - IMAGES.document, - IMAGES.logo - ], function(err, pages) { - assert.ifError(err); - - assert.strictEqual(pages.length, 2); - assert(typeof pages[0], 'object'); - assert(typeof pages[1], 'object'); - - done(); - }); - }); - }); - - describe('faces', function() { - it('should detect faces from an image', function(done) { - vision.detectFaces(IMAGES.rushmore, function(err, faces) { - assert.ifError(err); - - assert.strictEqual(faces.length, 1); - - done(); - }); - }); - - it('should detect faces from multiple images', function(done) { - vision.detectFaces([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, faces) { - assert.ifError(err); - - assert.strictEqual(faces.length, 2); - assert.strictEqual(faces[0].length, 0); - assert.strictEqual(faces[1].length, 1); - - done(); - }); - }); - }); - - describe('labels', function() { - it('should detect labels', function(done) { - vision.detectLabels(IMAGES.rushmore, function(err, labels) { - assert.ifError(err); - - assert(labels.length > -1); - - done(); - }); - }); - - it('should detect labels from multiple images', function(done) { - vision.detectLabels([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, labels) { - assert.ifError(err); - - assert.strictEqual(labels.length, 2); - - assert(labels[0].length > -1); - assert(labels[1].length > -1); - - done(); - }); - }); - - it('should support verbose mode', function(done) { - var options = { - verbose: true - }; - - vision.detectLabels(IMAGES.rushmore, options, function(err, labels) { - assert.ifError(err); - - assert(is.defined(labels[0].mid)); - - done(); - }); - }); - }); - - describe('landmarks', function() { - it('should detect landmarks from an image', function(done) { - vision.detectLandmarks(IMAGES.rushmore, function(err, landmarks) { - assert.ifError(err); - - assert.deepEqual(landmarks, ['Mount Rushmore']); - - done(); - }); - }); - - it('should detect landmarks from multiple images', function(done) { - vision.detectLandmarks([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, landmarks) { - assert.ifError(err); - - assert.strictEqual(landmarks.length, 2); - - assert.deepEqual(landmarks[0], []); - assert.deepEqual(landmarks[1], ['Mount Rushmore']); - - done(); - }); - }); - - it('should support verbose mode', function(done) { - var opts = { - verbose: true - }; - - vision.detectLandmarks(IMAGES.rushmore, opts, function(err, landmarks) { - assert.ifError(err); - - assert(is.defined(landmarks[0].mid)); - - done(); - }); - }); - }); - - describe('logos', function() { - it('should detect logos from an image', function(done) { - vision.detectLogos(IMAGES.logo, function(err, logos) { - assert.ifError(err); - - assert.deepEqual(logos, ['Google']); - - done(); - }); - }); - - it('should detect logos from multiple images', function(done) { - vision.detectLogos([ - IMAGES.rushmore, - IMAGES.logo - ], function(err, logos) { - assert.ifError(err); - - assert.strictEqual(logos.length, 2); - - assert.deepEqual(logos[0], []); - assert.deepEqual(logos[1], ['Google']); - - done(); - }); - }); - - it('should support verbose mode', function(done) { - var options = { - verbose: true - }; - - vision.detectLogos(IMAGES.logo, options, function(err, logos) { - assert.ifError(err); - - assert(is.defined(logos[0].mid)); - - done(); - }); + return vision.logoDetection(buffer).then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); }); }); - describe('properties', function() { - it('should detect image properties', function(done) { - vision.detectProperties(IMAGES.rushmore, function(err, properties) { - assert.ifError(err); - - assert.deepEqual(properties.colors, [ - '3b3027', - '727d81', - '3f3022', - '838e92', - '482b17', - '5f4e3d', - '261c14', - 'b29a7f', - '51473e', - '2c1e12' - ]); - - done(); - }); - }); - - it('should detect image properties from multiple images', function(done) { - vision.detectProperties([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, properties) { - assert.ifError(err); - - assert.strictEqual(properties.length, 2); - assert(is.array(properties[0].colors)); - assert(is.array(properties[1].colors)); - - done(); - }); - }); - - it('should support verbose mode', function(done) { - var options = { - verbose: true - }; - - vision.detectProperties(IMAGES.rushmore, options, function(err, props) { - assert.ifError(err); - - assert(is.object(props.colors[0])); - - done(); - }); - }); - }); - - describe('SafeSearch', function() { - it('should detect SafeSearch', function(done) { - vision.detectSafeSearch(IMAGES.rushmore, function(err, safesearch) { - assert.ifError(err); - - assert.deepEqual(safesearch, { - adult: false, - medical: false, - spoof: false, - violence: false - }); - - done(); - }); - }); - - it('should detect SafeSearch from multiple images', function(done) { - vision.detectSafeSearch([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, safesearches) { - assert.ifError(err); - - assert.strictEqual(safesearches.length, 2); - assert.deepEqual(safesearches[0], { - adult: false, - medical: false, - spoof: false, - violence: false - }); - assert.deepEqual(safesearches[1], { - adult: false, - medical: false, - spoof: false, - violence: false - }); - - done(); - }); - }); - - it('should support verbose mode', function(done) { - var options = { - verbose: true - }; - - vision.detectSafeSearch(IMAGES.rushmore, options, function(err, ss) { - assert.ifError(err); - - assert(!is.boolean(ss.adult)); - - done(); - }); - }); - }); - - describe('similar', function() { - it('should detect similar images from the internet', function(done) { - vision.detectSimilar(IMAGES.logo, function(err, images) { - assert.ifError(err); - assert(images.length > -1); - done(); - }); - }); - - it('should detect similar images from multiple images', function(done) { - vision.detectSimilar([ - IMAGES.logo, - IMAGES.rushmore - ], function(err, images) { - assert.ifError(err); - - assert.strictEqual(images.length, 2); - - assert(images[0].length > -1); - assert(images[1].length > -1); - - done(); - }); - }); - }); - - describe('text', function() { - var expectedResults = [ - normalizeNewline(multiline.stripIndent(function() {/* - Google Cloud Client Library for Node.js - an idiomatic, intuitive, and - natural way for Node.js developers to integrate with Google Cloud - Platform services, like Cloud Datastore and Cloud Storage. - - */})) + describe('single image', () => { + var TYPES = [ + {type: 'FACE_DETECTION'}, + {type: 'LABEL_DETECTION'}, + {type: 'SAFE_SEARCH_DETECTION'}, ]; - - expectedResults = expectedResults.concat( - expectedResults[0] - .replace(/\n/g, ' ') - .trim() - .split(' ') - ); - - it('should detect text', function(done) { - vision.detectText(IMAGES.text, function(err, text) { - assert.ifError(err); - - assert.deepEqual(text, expectedResults); - - done(); - }); - }); - - it('should detect text from multiple images', function(done) { - vision.detectText([ - IMAGES.rushmore, - IMAGES.text - ], function(err, texts) { - assert.ifError(err); - - assert.strictEqual(texts.length, 2); - - assert.deepEqual(texts[0], []); - assert.deepEqual(texts[1], expectedResults); - - done(); - }); - }); - - it('should support verbose mode', function(done) { - var options = { - verbose: true - }; - - vision.detectText(IMAGES.text, options, function(err, text) { - assert.ifError(err); - - assert(is.defined(text[0].bounds)); - - done(); + it('should perform multiple detections', () => { + return vision.annotateImage({ + features: TYPES, + image: {source: {filename: IMAGES.rushmore}}, + }).then(responses => { + var response = responses[0]; + assert(response.faceAnnotations.length >= 1); + assert(response.labelAnnotations.length >= 1); + assert(response.safeSearchAnnotation !== null); }); }); }); diff --git a/packages/vision/test/gapic-v1.js b/packages/vision/test/gapic-v1.js new file mode 100644 index 00000000000..1408a392924 --- /dev/null +++ b/packages/vision/test/gapic-v1.js @@ -0,0 +1,82 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +'use strict'; + +var assert = require('assert'); +var vision = require('../src'); + +var FAKE_STATUS_CODE = 1; +var error = new Error(); +error.code = FAKE_STATUS_CODE; + +describe('ImageAnnotatorClient', function() { + describe('batchAnnotateImages', function() { + it('invokes batchAnnotateImages without error', function(done) { + var client = vision.v1(); + + // Mock request + var requests = []; + var request = { + requests : requests + }; + + // Mock response + var expectedResponse = {}; + + // Mock Grpc layer + client._batchAnnotateImages = mockSimpleGrpcMethod(request, expectedResponse); + + client.batchAnnotateImages(request, function(err, response) { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }); + }); + + it('invokes batchAnnotateImages with error', function(done) { + var client = vision.v1(); + + // Mock request + var requests = []; + var request = { + requests : requests + }; + + // Mock Grpc layer + client._batchAnnotateImages = mockSimpleGrpcMethod(request, null, error); + + client.batchAnnotateImages(request, function(err, response) { + assert(err instanceof Error); + assert.equal(err.code, FAKE_STATUS_CODE); + done(); + }); + }); + }); + +}); + +function mockSimpleGrpcMethod(expectedRequest, response, error) { + return function(actualRequest, options, callback) { + assert.deepStrictEqual(actualRequest, expectedRequest); + if (error) { + callback(error); + } else if (response) { + callback(null, response); + } else { + callback(null); + } + }; +} diff --git a/packages/vision/test/helpers.test.js b/packages/vision/test/helpers.test.js new file mode 100644 index 00000000000..c740193bfe7 --- /dev/null +++ b/packages/vision/test/helpers.test.js @@ -0,0 +1,283 @@ +/*! + * Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +var assert = require('assert'); +var fs = require('fs'); +var is = require('is'); +var sinon = require('sinon'); + +var Vision = require('../'); + + +describe('Vision helper methods', () => { + var sandbox = sinon.sandbox.create(); + + afterEach(() => { + sandbox.restore(); + }); + + describe('annotateImage', () => { + it('calls batchAnnotateImages correctly', () => { + var vision = Vision.v1(); + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, {responses: [{ + logoAnnotations: [{description: 'Google'}], + }]}); + + // Ensure that the annotateImage method arrifies the request and + // passes it through to the batch annotation method. + var request = { + image: {content: new Buffer('bogus==')}, + features: {type: ['LOGO_DETECTION']}, + }; + return vision.annotateImage(request).then(r => { + var response = r[0]; + + // Ensure that we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert(batchAnnotate.calledWith([request])); + }); + }); + + it('understands buffers', () => { + var vision = Vision.v1(); + + // Stub out the batch annotation method. + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, {responses: [{ + logoAnnotations: [{description: 'Google'}], + }]}); + + // Ensure that the annotateImage method arrifies the request and + // passes it through to the batch annotation method. + var request = { + image: new Buffer('fakeImage'), + features: {type: ['LOGO_DETECTION']}, + }; + return vision.annotateImage(request).then(r => { + var response = r[0]; + + // Ensure that we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert.deepEqual(request, { + image: {content: 'ZmFrZUltYWdl'}, + features: {type: ['LOGO_DETECTION']}, + }); + assert(batchAnnotate.calledWith([request])); + }); + }); + + it('understands filenames', () => { + var vision = Vision.v1(); + + // Stub out `fs.readFile` and return a bogus image object. + // This allows us to test filename detection. + var readFile = sandbox.stub(fs, 'readFile'); + readFile.withArgs('image.jpg').callsArgWith(2, null, + new Buffer('fakeImage') + ); + readFile.callThrough(); + + // Stub out the batch annotation method as before. + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, {responses: [{ + logoAnnotations: [{description: 'Google'}], + }]}); + + // Ensure that the annotateImage method arrifies the request and + // passes it through to the batch annotation method. + var request = { + image: {source: {filename: 'image.jpg'}}, + features: {type: ['LOGO_DETECTION']}, + }; + return vision.annotateImage(request).then(r => { + var response = r[0]; + + // Ensure that we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to `readFile` to ensure that they matched + // the expected signature. + assert(readFile.callCount === 1); + assert(readFile.calledWith('image.jpg')); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert.deepEqual(request, { + image: {content: 'ZmFrZUltYWdl'}, + features: {type: ['LOGO_DETECTION']}, + }); + assert(batchAnnotate.calledWith([request])); + }); + }); + + it('propagates the error if a file is not found', () => { + var vision = Vision.v1(); + + // Stub out `fs.readFile` and return a bogus image object. + // This allows us to test filename detection. + var readFile = sandbox.stub(fs, 'readFile'); + readFile.withArgs('image.jpg').callsArgWith(2, {error: 404}); + readFile.callThrough(); + + // Ensure that the annotateImage method arrifies the request and + // passes it through to the batch annotation method. + var request = { + image: {source: {filename: 'image.jpg'}}, + features: {type: ['LOGO_DETECTION']}, + }; + return vision.annotateImage(request).then(assert.fail).catch(err => { + assert.deepEqual(err, {error: 404}); + }); + }); + + it('retains call options sent', () => { + var vision = Vision.v1(); + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, {responses: [{ + logoAnnotations: [{description: 'Google'}], + }]}); + + // Ensure that the annotateImage method arrifies the request and + // passes it through to the batch annotation method. + var request = { + image: {content: new Buffer('bogus==')}, + features: {type: ['LOGO_DETECTION']}, + }; + return vision.annotateImage(request, {foo: 'bar'}).then(r => { + var response = r[0]; + + // Ensure that we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert(batchAnnotate.calledWith([request], {foo: 'bar'})); + }); + }); + + it('fires a callback if provided', done => { + var vision = Vision.v1(); + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, {responses: [{ + logoAnnotations: [{description: 'Google'}], + }]}); + + // Ensure that the annotateImage method does *not* pass the callback + // on to batchAnnotateImages, but rather handles it itself. + var request = { + image: {content: new Buffer('bogus==')}, + features: {type: ['LOGO_DETECTION']}, + }; + vision.annotateImage(request, function(err, response) { + // Establish that we got the expected response. + assert(is.undefined(err)); + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotate and ensure that they match + // what we expected. + assert(batchAnnotate.callCount === 1); + assert(batchAnnotate.calledWith([request], undefined)); + done(); + }); + }); + + it('fires the callback on error', () => { + var vision = Vision.v1(); + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, {message: 'Bad things!'}); + + // Ensure that the annotateImage method does *not* pass the callback + // on to batchAnnotateImages, but rather handles it itself. + var request = { + image: {content: new Buffer('bogus==')}, + features: {type: ['LOGO_DETECTION']}, + }; + return vision.annotateImage(request).catch(err => { + // Establish that we got the expected response. + assert.deepEqual(err, {message: 'Bad things!'}); + + // Inspect the calls to batchAnnotate and ensure that they match + // what we expected. + assert(batchAnnotate.callCount === 1); + assert(batchAnnotate.calledWith([request], undefined)); + }); + }); + + it('requires an image and throws without one', () => { + var vision = Vision.v1(); + var request = {}; + return vision.annotateImage(request).then(assert.fail).catch(err => { + var expected = 'Attempted to call `annotateImage` with no image.'; + assert(err.message === expected); + }); + }); + }); + + describe('single-feature methods', () => { + it('calls annotateImage with the correct feature', () => { + var vision = Vision.v1(); + var annotate = sandbox.spy(vision, 'annotateImage'); + var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, {responses: [{ + logoAnnotations: [{description: 'Google'}], + }]}); + + // Ensure that the annotateImage method does *not* pass the callback + // on to batchAnnotateImages, but rather handles it itself. + var image = {content: new Buffer('bogus==')}; + return vision.logoDetection(image).then(r => { + var response = r[0]; + + // Ensure that we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to annotateImage and batchAnnotateImages and + // ensure they matched the expected signature. + assert(annotate.callCount === 1); + assert(annotate.calledWith({image: image, features: [{type: 3}]})); + assert(batchAnnotate.callCount === 1); + assert(batchAnnotate.calledWith( + [{image: image, features: [{type: 3}]}] + )); + }); + }); + }); +}); diff --git a/packages/vision/test/index.js b/packages/vision/test/index.js deleted file mode 100644 index 74838291b7c..00000000000 --- a/packages/vision/test/index.js +++ /dev/null @@ -1,1868 +0,0 @@ -/** - * Copyright 2015 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -var assert = require('assert'); -var async = require('async'); -var deepStrictEqual = require('deep-strict-equal'); -var extend = require('extend'); -var fs = require('fs'); -var GrpcService = require('@google-cloud/common-grpc').Service; -var prop = require('propprop'); -var proxyquire = require('proxyquire'); -var tmp = require('tmp'); -var util = require('@google-cloud/common').util; - -var promisified = false; -var fakeUtil = extend({}, util, { - promisifyAll: function(Class) { - if (Class.name === 'Vision') { - promisified = true; - } - } -}); - -var fakeV1Override; -function fakeV1() { - if (fakeV1Override) { - return fakeV1Override.apply(null, arguments); - } - - return { - imageAnnotatorClient: util.noop - }; -} - -describe('Vision', function() { - var IMAGE = './image.jpg'; - var PROJECT_ID = 'project-id'; - - var Vision; - var VisionCached; - var vision; - - var OPTIONS = { - projectId: PROJECT_ID - }; - - before(function() { - Vision = proxyquire('../', { - '@google-cloud/common': { - util: fakeUtil - }, - './v1': fakeV1 - }); - - VisionCached = extend({}, Vision); - }); - - beforeEach(function() { - fakeV1Override = null; - - vision = new Vision(OPTIONS); - - extend(Vision, VisionCached); - }); - - describe('instantiation', function() { - it('should promisify all the things', function() { - assert(promisified); - }); - - it('should normalize the arguments', function() { - var normalizeArguments = fakeUtil.normalizeArguments; - var normalizeArgumentsCalled = false; - var fakeOptions = { projectId: PROJECT_ID }; - var fakeContext = {}; - - fakeUtil.normalizeArguments = function(context, options) { - normalizeArgumentsCalled = true; - assert.strictEqual(context, fakeContext); - assert.strictEqual(options, fakeOptions); - return options; - }; - - Vision.call(fakeContext, fakeOptions); - assert(normalizeArgumentsCalled); - - fakeUtil.normalizeArguments = normalizeArguments; - }); - - it('should create a gax api client', function() { - var expectedVisionClient = {}; - - fakeV1Override = function(options) { - var expected = extend({}, OPTIONS, { - libName: 'gccl', - libVersion: require('../package.json').version - }); - assert.deepStrictEqual(options, expected); - - return { - imageAnnotatorClient: function(options) { - assert.deepStrictEqual(options, expected); - return expectedVisionClient; - } - }; - }; - - var vision = new Vision(OPTIONS); - - assert.deepEqual(vision.api, { - Vision: expectedVisionClient - }); - }); - }); - - describe('constants', function() { - it('should define constants', function() { - assert.strictEqual(Vision.likelihood.VERY_UNLIKELY, 0); - assert.strictEqual(Vision.likelihood.UNLIKELY, 1); - assert.strictEqual(Vision.likelihood.POSSIBLE, 2); - assert.strictEqual(Vision.likelihood.LIKELY, 3); - assert.strictEqual(Vision.likelihood.VERY_LIKELY, 4); - }); - }); - - describe('annotate', function() { - var REQ = {}; - - it('should arrify request objects', function(done) { - vision.api.Vision = { - batchAnnotateImages: function(reqOpts) { - assert.deepEqual(reqOpts, { - requests: [REQ] - }); - done(); - } - }; - - vision.annotate(REQ, assert.ifError); - }); - - describe('error', function() { - var error = new Error('Error.'); - var apiResponse = {}; - - beforeEach(function() { - vision.api.Vision = { - batchAnnotateImages: function(reqOpts, callback) { - callback(error, apiResponse); - } - }; - }); - - it('should execute callback with error & API response', function(done) { - vision.annotate(REQ, function(err, annotations, resp) { - assert.strictEqual(err, error); - assert.strictEqual(annotations, null); - assert.strictEqual(resp, apiResponse); - done(); - }); - }); - }); - - describe('success', function() { - var apiResponse = { - responses: [] - }; - - beforeEach(function() { - vision.api.Vision = { - batchAnnotateImages: function(reqOpts, callback) { - callback(null, apiResponse); - } - }; - }); - - it('should execute callback with annotations & API resp', function(done) { - vision.annotate(REQ, function(err, annotations, resp) { - assert.ifError(err); - - assert.strictEqual(annotations, apiResponse.responses); - assert.strictEqual(resp, apiResponse); - - done(); - }); - }); - }); - }); - - describe('detect', function() { - var TYPES = [ - 'face', - 'label' - ]; - - var IMAGES = [ - { - content: 'aGk=' - } - ]; - - var MULTIPLE_IMAGES = [ - IMAGES[0], - IMAGES[0] - ]; - - - beforeEach(function() { - Vision.findImages_ = function(images, callback) { - callback(null, IMAGES); - }; - }); - - it('should find the images', function(done) { - Vision.findImages_ = function(images) { - assert.strictEqual(images, IMAGE); - done(); - }; - - vision.detect(IMAGE, TYPES, assert.ifError); - }); - - it('should return an error from findImages_', function(done) { - var error = new Error('Error.'); - - Vision.findImages_ = function(images, callback) { - assert.strictEqual(images, IMAGE); - callback(error); - }; - - vision.detect(IMAGE, TYPES, function(err) { - assert.strictEqual(err, error); - done(); - }); - }); - - it('should throw an error if a type does not exist', function() { - var type = 'not-real-type'; - - assert.throws(function() { - vision.detect(IMAGE, type, assert.ifError); - }, /Requested detection feature not found: not-real-type/); - }); - - it('should format the correct config', function(done) { - var typeShortNameToFullName = { - crop: 'CROP_HINTS', - crops: 'CROP_HINTS', - - doc: 'DOCUMENT_TEXT_DETECTION', - document: 'DOCUMENT_TEXT_DETECTION', - - face: 'FACE_DETECTION', - faces: 'FACE_DETECTION', - - label: 'LABEL_DETECTION', - labels: 'LABEL_DETECTION', - - landmark: 'LANDMARK_DETECTION', - landmarks: 'LANDMARK_DETECTION', - - logo: 'LOGO_DETECTION', - logos: 'LOGO_DETECTION', - - properties: 'IMAGE_PROPERTIES', - - safeSearch: 'SAFE_SEARCH_DETECTION', - - similar: 'WEB_DETECTION', - - text: 'TEXT_DETECTION' - }; - - var shortNames = Object.keys(typeShortNameToFullName); - - function checkConfig(shortName, callback) { - vision.annotate = function(config) { - assert.deepEqual(config, [ - { - image: IMAGES[0], - features: [ - { - type: typeShortNameToFullName[shortName] - } - ] - } - ]); - - callback(); - }; - - vision.detect(IMAGE, shortName, assert.ifError); - } - - async.each(shortNames, checkConfig, done); - }); - - it('should allow setting imageContext', function(done) { - var imageContext = { - latLongRect: { - minLatLng: { - latitude: 37.420901, - longitude: -122.081293 - }, - maxLatLng: { - latitude: 37.423228, - longitude: -122.086347 - } - } - }; - - vision.annotate = function(config) { - assert.deepEqual(config, [ - { - image: IMAGES[0], - features: [ - { - type: 'LABEL_DETECTION' - } - ], - imageContext: imageContext - } - ]); - - done(); - }; - - vision.detect(IMAGE, { - types: ['label'], - imageContext: imageContext - }, assert.ifError); - }); - - it('should allow setting maxResults', function(done) { - var maxResults = 10; - - vision.annotate = function(config) { - assert.deepEqual(config, [ - { - image: IMAGES[0], - features: [ - { - type: 'FACE_DETECTION', - maxResults: 10 - } - ] - } - ]); - - done(); - }; - - vision.detect(IMAGE, { - types: ['face'], - maxResults: maxResults - }, assert.ifError); - }); - - it('should return empty detections when none were found', function(done) { - vision.annotate = function(config, callback) { - callback(null, [ - {}, - {} - ]); - }; - - vision.detect(IMAGE, TYPES, function(err, detections) { - assert.ifError(err); - assert.deepEqual(detections, { - faces: [], - labels: [] - }); - done(); - }); - }); - - it('should return the correct detections', function(done) { - var annotations = [ - { - cropHintsAnnotation: { anno: true } - }, - { - faceAnnotations: { anno: true } - }, - { - fullTextAnnotation: { anno: true } - }, - { - imagePropertiesAnnotation: { anno: true } - }, - { - labelAnnotations: { anno: true } - }, - { - landmarkAnnotations: { anno: true } - }, - { - logoAnnotations: { anno: true } - }, - { - safeSearchAnnotation: { anno: true } - }, - { - textAnnotations: { anno: true } - }, - { - webDetection: { anno: true } - } - ]; - - var cropHintsAnnotation = {}; - var faceAnnotation = {}; - var fullTextAnnotation = {}; - var imagePropertiesAnnotation = {}; - var entityAnnotation = {}; - var safeSearchAnnotation = {}; - var webDetection = {}; - - Vision.formatCropHintsAnnotation_ = function() { - return cropHintsAnnotation; - }; - - Vision.formatFaceAnnotation_ = function() { - return faceAnnotation; - }; - - Vision.formatFullTextAnnotation_ = function() { - return fullTextAnnotation; - }; - - Vision.formatImagePropertiesAnnotation_ = function() { - return imagePropertiesAnnotation; - }; - - Vision.formatEntityAnnotation_ = function() { - return entityAnnotation; - }; - - Vision.formatSafeSearchAnnotation_ = function() { - return safeSearchAnnotation; - }; - - Vision.formatWebDetection_ = function() { - return webDetection; - }; - - vision.annotate = function(config, callback) { - callback(null, annotations); - }; - - var expected = { - crops: cropHintsAnnotation, - faces: faceAnnotation, - document: fullTextAnnotation, - properties: imagePropertiesAnnotation, - labels: entityAnnotation, - landmarks: entityAnnotation, - logos: entityAnnotation, - safeSearch: safeSearchAnnotation, - text: entityAnnotation, - similar: webDetection - }; - - var types = Object.keys(expected); - - vision.detect(IMAGE, types, function(err, detections) { - assert.ifError(err); - assert(deepStrictEqual(detections, expected)); - done(); - }); - }); - - it('should return an empty array for empty responses', function(done) { - var annotations = [ - {}, // empty `faceAnnotations` - { - imagePropertiesAnnotation: {} - } - ]; - - vision.annotate = function(config, callback) { - callback(null, annotations); - }; - - var expected = { - faces: [], - properties: {} - }; - - var types = Object.keys(expected); - - vision.detect(IMAGE, types, function(err, detections) { - assert.ifError(err); - - assert(deepStrictEqual(detections, expected)); - - done(); - }); - }); - - it('should return partial failure errors', function(done) { - var error1 = { error: true }; - var error2 = { error: true }; - - var annotations = [ - { error: error1 }, - { error: error2 } - ]; - - var types = ['faces', 'properties']; - - Vision.formatError_ = function(err) { - err.formatted = true; - return err; - }; - - vision.annotate = function(config, callback) { - callback(null, annotations); - }; - - vision.detect(IMAGE, types, function(err, detections) { - assert.strictEqual(err.name, 'PartialFailureError'); - - assert.deepEqual(err.errors, [ - { - image: IMAGE, - errors: [ - extend(error1, { - type: types[0], - formatted: true - }), - extend(error2, { - type: types[1], - formatted: true - }) - ] - } - ]); - - assert.deepEqual(detections, {}); - - done(); - }); - }); - - it('should return partial failure errors for multi images', function(done) { - var error1 = { error: true }; - var error2 = { error: true }; - var error3 = { error: true }; - var error4 = { error: true }; - - var annotations = [ - { error: error1 }, - { error: error2 }, - { error: error3 }, - { error: error4 } - ]; - - var images = ['./image.jpg', './image-2.jpg']; - var types = ['faces', 'properties']; - - Vision.findImages_ = function(images, callback) { - callback(null, MULTIPLE_IMAGES); - }; - - Vision.formatError_ = function(err) { - err.formatted = true; - return err; - }; - - vision.annotate = function(config, callback) { - callback(null, annotations); - }; - - vision.detect(images, types, function(err, detections) { - assert.strictEqual(err.name, 'PartialFailureError'); - - assert.deepEqual(err.errors, [ - { - image: images[0], - errors: [ - extend(error1, { - type: types[0], - formatted: true - }), - extend(error2, { - type: types[1], - formatted: true - }) - ] - }, - { - image: images[1], - errors: [ - extend(error3, { - type: types[0], - formatted: true - }), - extend(error4, { - type: types[1], - formatted: true - }) - ] - } - ]); - - assert.deepEqual(detections, [{}, {}]); - - done(); - }); - }); - - it('should return only the detection wanted', function(done) { - vision.annotate = function(config, callback) { - callback(null, [{}]); - }; - - vision.detect(IMAGE, ['face'], function(err, detection) { - assert.ifError(err); - - assert.deepEqual(detection, []); - - done(); - }); - }); - - it('should return the correct detections for multiple img', function(done) { - var anno = { a: 'b', c: 'd' }; - - var annotations = [ - // Image 1 annotations: - { - faceAnnotations: anno - }, - { - imagePropertiesAnnotation: anno - }, - { - labelAnnotations: anno - }, - { - landmarkAnnotations: anno - }, - { - logoAnnotations: anno - }, - { - safeSearchAnnotation: anno - }, - { - textAnnotations: anno - }, - - // Image 2 annotations: - { - faceAnnotations: anno - }, - { - imagePropertiesAnnotation: anno - }, - { - labelAnnotations: anno - }, - { - landmarkAnnotations: anno - }, - { - logoAnnotations: anno - }, - { - safeSearchAnnotation: anno - }, - { - textAnnotations: anno - } - ]; - - var faceAnnotation = {}; - var imagePropertiesAnnotation = {}; - var entityAnnotation = {}; - var safeSearchAnnotation = {}; - - Vision.formatFaceAnnotation_ = function(anno_) { - assert.strictEqual(anno_, anno); - return faceAnnotation; - }; - - Vision.formatImagePropertiesAnnotation_ = function(anno_) { - assert.strictEqual(anno_, anno); - return imagePropertiesAnnotation; - }; - - Vision.formatEntityAnnotation_ = function(anno_) { - assert.strictEqual(anno_, anno); - return entityAnnotation; - }; - - Vision.formatSafeSearchAnnotation_ = function(anno_) { - assert.strictEqual(anno_, anno); - return safeSearchAnnotation; - }; - - Vision.findImages_ = function(images, callback) { - callback(null, IMAGES.concat(IMAGES)); - }; - - vision.annotate = function(config, callback) { - callback(null, annotations); - }; - - var expected = [ - { - faces: faceAnnotation, - properties: imagePropertiesAnnotation, - labels: entityAnnotation, - landmarks: entityAnnotation, - logos: entityAnnotation, - safeSearch: safeSearchAnnotation, - text: entityAnnotation - }, - { - faces: faceAnnotation, - properties: imagePropertiesAnnotation, - labels: entityAnnotation, - landmarks: entityAnnotation, - logos: entityAnnotation, - safeSearch: safeSearchAnnotation, - text: entityAnnotation - } - ]; - - var types = Object.keys(expected[0]); - - vision.detect([IMAGE, IMAGE], types, function(err, detections) { - assert.ifError(err); - assert(deepStrictEqual(detections, expected)); - done(); - }); - }); - - it('should return the raw annotation for unknown types', function(done) { - var anno = { a: 'b', c: 'd' }; - - var annotations = [ - { - faceAnnotations: anno - } - ]; - - Vision.formatFaceAnnotation_ = null; - - vision.annotate = function(config, callback) { - callback(null, annotations); - }; - - vision.detect(IMAGE, 'faces', function(err, detections) { - assert.ifError(err); - assert.strictEqual(detections, anno); - done(); - }); - }); - - it('should return an error from annotate()', function(done) { - var error = new Error('Error.'); - var apiResponse = {}; - - vision.annotate = function(config, callback) { - callback(error, null, apiResponse); - }; - - vision.detect(IMAGE, TYPES, function(err, annotations, resp) { - assert.strictEqual(err, error); - assert.strictEqual(annotations, null); - assert.strictEqual(resp, apiResponse); - done(); - }); - }); - - it('should return the apiResponse from annotate()', function(done) { - var apiResponse = { - responses: [ - { - faceAnnotations: {} - } - ] - }; - - var originalApiResponse = extend(true, {}, apiResponse); - - Vision.formatFaceAnnotation_ = function() { - return {}; - }; - - vision.annotate = function(config, callback) { - callback(null, apiResponse.responses, apiResponse); - }; - - vision.detect(IMAGE, TYPES, function(err, annotations, resp) { - assert.ifError(err); - - // assert.strictEqual(resp, apiResponse); - assert.deepEqual(resp, originalApiResponse); - - done(); - }); - }); - }); - - describe('detectCrops', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('crops'); - - vision.detectCrops(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('crops', options); - - vision.detectCrops(IMAGE, options, done); - }); - }); - - describe('detectFaces', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('faces'); - - vision.detectFaces(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('faces', options); - - vision.detectFaces(IMAGE, options, done); - }); - }); - - describe('detectLabels', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('labels'); - - vision.detectLabels(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('labels', options); - - vision.detectLabels(IMAGE, options, done); - }); - }); - - describe('detectLandmarks', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('landmarks'); - - vision.detectLandmarks(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('landmarks', options); - - vision.detectLandmarks(IMAGE, options, done); - }); - }); - - describe('detectLogos', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('logos'); - - vision.detectLogos(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('logos', options); - - vision.detectLogos(IMAGE, options, done); - }); - }); - - describe('detectProperties', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('properties'); - - vision.detectProperties(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('properties', options); - - vision.detectProperties(IMAGE, options, done); - }); - }); - - describe('detectSafeSearch', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('safeSearch'); - - vision.detectSafeSearch(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('safeSearch', options); - - vision.detectSafeSearch(IMAGE, options, done); - }); - }); - - describe('detectSimilar', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('similar'); - - vision.detectSimilar(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('similar', options); - - vision.detectSimilar(IMAGE, options, done); - }); - }); - - describe('detectText', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('text'); - - vision.detectText(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('text', options); - - vision.detectText(IMAGE, options, done); - }); - }); - - describe('readDocument', function() { - it('should accept a callback only', function(done) { - vision.detect = testWithoutOptions('document'); - - vision.readDocument(IMAGE, done); - }); - - it('should accept options', function(done) { - var options = { - a: 'b', - c: 'd' - }; - - vision.detect = testWithOptions('document', options); - - vision.readDocument(IMAGE, options, done); - }); - }); - - describe('findImages_', function() { - it('should return buffer for snippet sandbox', function(done) { - global.GCLOUD_SANDBOX_ENV = true; - - Vision.findImages_({}, function(err, images) { - delete global.GCLOUD_SANDBOX_ENV; - assert.ifError(err); - - assert.deepEqual(images, [ - { - content: new Buffer('') - } - ]); - - done(); - }); - }); - - it('should convert a File object', function(done) { - var file = { - name: 'file-name', - bucket: { - name: 'bucket-name' - } - }; - - var isCustomType = util.isCustomType; - - fakeUtil.isCustomType = function(unknown, type) { - fakeUtil.isCustomType = isCustomType; - assert.strictEqual(unknown, file); - assert.strictEqual(type, 'storage/file'); - return true; - }; - - Vision.findImages_(file, function(err, images) { - assert.ifError(err); - - assert.deepEqual(images, [ - { - source: { - gcsImageUri: 'gs://' + file.bucket.name + '/' + file.name - } - } - ]); - - done(); - }); - }); - - it('should properly format a URL', function(done) { - var imageUri = 'http://www.google.com/logo.png'; - - Vision.findImages_(imageUri, function(err, images) { - assert.ifError(err); - assert.deepEqual(images, [ - { - source: { - imageUri: imageUri - } - } - ]); - done(); - }); - }); - - it('should read from a file path', function(done) { - tmp.setGracefulCleanup(); - - tmp.file(function tempFileCreated_(err, tmpFilePath) { - assert.ifError(err); - - var contents = 'abcdef'; - - function writeFile(callback) { - fs.writeFile(tmpFilePath, contents, callback); - } - - function convertFile(callback) { - Vision.findImages_(tmpFilePath, callback); - } - - async.waterfall([writeFile, convertFile], function(err, images) { - assert.ifError(err); - - assert.deepEqual(images, [ - { - content: new Buffer(contents).toString('base64') - } - ]); - - done(); - }); - }); - }); - - - it('should get content from a buffer', function(done) { - var base64String = 'aGVsbG8gd29ybGQ='; - var buffer = new Buffer(base64String, 'base64'); - - Vision.findImages_(buffer, function(err, images) { - assert.ifError(err); - assert.deepEqual(images, [ - { - content: base64String - } - ]); - done(); - }); - }); - - it('should return an error when file cannot be found', function(done) { - Vision.findImages_('./not-real-file.png', function(err) { - assert.strictEqual(err.code, 'ENOENT'); - done(); - }); - }); - }); - - describe('formatCropHintsAnnotation_', function() { - var VERTICES = [ - { x: 0, y: 0 }, - { x: 0, y: 0 } - ]; - - var CONFIDENCE = 0.3; - - var cropHintsAnnotation = { - cropHints: [ - { - boundingPoly: { - vertices: VERTICES - }, - confidence: CONFIDENCE - } - ] - }; - - describe('verbose: false', function() { - var opts = {}; - - it('should format the annotation', function() { - var fmtd = Vision.formatCropHintsAnnotation_(cropHintsAnnotation, opts); - - assert.deepEqual(fmtd, [ - VERTICES - ]); - }); - }); - - describe('verbose: true', function() { - var opts = { verbose: true }; - - it('should format the annotation', function() { - var fmtd = Vision.formatCropHintsAnnotation_(cropHintsAnnotation, opts); - - assert.deepEqual(fmtd, [ - { - bounds: VERTICES, - confidence: CONFIDENCE - } - ]); - }); - }); - }); - - describe('formatEntityAnnotation_', function() { - var entityAnnotation = { - description: 'description', - mid: 'mid', - score: 0.4, - boundingPoly: { - vertices: {} - }, - confidence: 0.2, - locations: [ - { - latLng: [] - } - ], - properties: {} - }; - - describe('verbose: false', function() { - it('should just return the description', function() { - var formatted = Vision.formatEntityAnnotation_(entityAnnotation, {}); - - assert.strictEqual(formatted, entityAnnotation.description); - }); - }); - - describe('verbose: true', function() { - var opts = { - verbose: true - }; - - it('should format the entity annotation', function() { - var formatted = Vision.formatEntityAnnotation_(entityAnnotation, opts); - - assert.deepEqual(formatted, { - desc: entityAnnotation.description, - mid: entityAnnotation.mid, - score: entityAnnotation.score * 100, - bounds: entityAnnotation.boundingPoly.vertices, - confidence: entityAnnotation.confidence * 100, - locations: entityAnnotation.locations.map(prop('latLng')), - properties: entityAnnotation.properties - }); - }); - }); - }); - - describe('formatError_', function() { - var error = { - code: 1, - message: 'Oh no!', - details: [ - 'these should be clipped' - ] - }; - - it('should format an error', function() { - var err = Vision.formatError_(error); - - assert.deepEqual(err, { - code: GrpcService.GRPC_ERROR_CODE_TO_HTTP[1].code, - message: error.message - }); - }); - }); - - describe('formatFaceAnnotation_', function() { - var faceAnnotation = { - panAngle: {}, - rollAngle: {}, - tiltAngle: {}, - - boundingPoly: { - vertices: {} - }, - fdBoundingPoly: { - vertices: {} - }, - - landmarkingConfidence: 0.2, - - landmarks: [ - { - type: 'CHIN_GNATHION', - position: {} - }, - { - type: 'CHIN_LEFT_GONION', - position: {} - }, - { - type: 'CHIN_RIGHT_GONION', - position: {} - }, - { - type: 'LEFT_EAR_TRAGION', - position: {} - }, - { - type: 'RIGHT_EAR_TRAGION', - position: {} - }, - { - type: 'LEFT_OF_LEFT_EYEBROW', - position: {} - }, - { - type: 'RIGHT_OF_LEFT_EYEBROW', - position: {} - }, - { - type: 'LEFT_EYEBROW_UPPER_MIDPOINT', - position: {} - }, - { - type: 'LEFT_OF_RIGHT_EYEBROW', - position: {} - }, - { - type: 'RIGHT_OF_RIGHT_EYEBROW', - position: {} - }, - { - type: 'RIGHT_EYEBROW_UPPER_MIDPOINT', - position: {} - }, - { - type: 'LEFT_EYE_BOTTOM_BOUNDARY', - position: {} - }, - { - type: 'LEFT_EYE', - position: {} - }, - { - type: 'LEFT_EYE_LEFT_CORNER', - position: {} - }, - { - type: 'LEFT_EYE_PUPIL', - position: {} - }, - { - type: 'LEFT_EYE_RIGHT_CORNER', - position: {} - }, - { - type: 'LEFT_EYE_TOP_BOUNDARY', - position: {} - }, - { - type: 'RIGHT_EYE_BOTTOM_BOUNDARY', - position: {} - }, - { - type: 'RIGHT_EYE', - position: {} - }, - { - type: 'RIGHT_EYE_LEFT_CORNER', - position: {} - }, - { - type: 'RIGHT_EYE_PUPIL', - position: {} - }, - { - type: 'RIGHT_EYE_RIGHT_CORNER', - position: {} - }, - { - type: 'RIGHT_EYE_TOP_BOUNDARY', - position: {} - }, - { - type: 'FOREHEAD_GLABELLA', - position: {} - }, - { - type: 'LOWER_LIP', - position: {} - }, - { - type: 'UPPER_LIP', - position: {} - }, - { - type: 'MOUTH_CENTER', - position: {} - }, - { - type: 'MOUTH_LEFT', - position: {} - }, - { - type: 'MOUTH_RIGHT', - position: {} - }, - { - type: 'NOSE_BOTTOM_CENTER', - position: {} - }, - { - type: 'NOSE_BOTTOM_LEFT', - position: {} - }, - { - type: 'NOSE_BOTTOM_RIGHT', - position: {} - }, - { - type: 'NOSE_TIP', - position: {} - }, - { - type: 'MIDPOINT_BETWEEN_EYES', - position: {} - } - ], - - detectionConfidence: 0.2, - blurredLikelihood: 'LIKELY', - underExposedLikelihood: 'LIKELY', - joyLikelihood: 'LIKELY', - headwearLikelihood: 'LIKELY', - angerLikelihood: 'LIKELY', - sorrowLikelihood: 'LIKELY', - surpriseLikelihood: 'LIKELY', - - nonExistentLikelihood: 'LIKELY' - }; - - function findLandmark(type) { - var landmarks = faceAnnotation.landmarks; - - return landmarks.filter(function(landmark) { - return landmark.type === type; - })[0].position; - } - - it('should format the annotation', function() { - var expected = { - angles: { - pan: faceAnnotation.panAngle, - roll: faceAnnotation.rollAngle, - tilt: faceAnnotation.tiltAngle - }, - - bounds: { - head: faceAnnotation.boundingPoly.vertices, - face: faceAnnotation.fdBoundingPoly.vertices - }, - - features: { - confidence: faceAnnotation.landmarkingConfidence * 100, - chin: { - center: findLandmark('CHIN_GNATHION'), - left: findLandmark('CHIN_LEFT_GONION'), - right: findLandmark('CHIN_RIGHT_GONION') - }, - ears: { - left: findLandmark('LEFT_EAR_TRAGION'), - right: findLandmark('RIGHT_EAR_TRAGION'), - }, - eyebrows: { - left: { - left: findLandmark('LEFT_OF_LEFT_EYEBROW'), - right: findLandmark('RIGHT_OF_LEFT_EYEBROW'), - top: findLandmark('LEFT_EYEBROW_UPPER_MIDPOINT') - }, - right: { - left: findLandmark('LEFT_OF_RIGHT_EYEBROW'), - right: findLandmark('RIGHT_OF_RIGHT_EYEBROW'), - top: findLandmark('RIGHT_EYEBROW_UPPER_MIDPOINT') - } - }, - eyes: { - left: { - bottom: findLandmark('LEFT_EYE_BOTTOM_BOUNDARY'), - center: findLandmark('LEFT_EYE'), - left: findLandmark('LEFT_EYE_LEFT_CORNER'), - pupil: findLandmark('LEFT_EYE_PUPIL'), - right: findLandmark('LEFT_EYE_RIGHT_CORNER'), - top: findLandmark('LEFT_EYE_TOP_BOUNDARY') - }, - right: { - bottom: findLandmark('RIGHT_EYE_BOTTOM_BOUNDARY'), - center: findLandmark('RIGHT_EYE'), - left: findLandmark('RIGHT_EYE_LEFT_CORNER'), - pupil: findLandmark('RIGHT_EYE_PUPIL'), - right: findLandmark('RIGHT_EYE_RIGHT_CORNER'), - top: findLandmark('RIGHT_EYE_TOP_BOUNDARY') - } - }, - forehead: findLandmark('FOREHEAD_GLABELLA'), - lips: { - bottom: findLandmark('LOWER_LIP'), - top: findLandmark('UPPER_LIP') - }, - mouth: { - center: findLandmark('MOUTH_CENTER'), - left: findLandmark('MOUTH_LEFT'), - right: findLandmark('MOUTH_RIGHT') - }, - nose: { - bottom: { - center: findLandmark('NOSE_BOTTOM_CENTER'), - left: findLandmark('NOSE_BOTTOM_LEFT'), - right: findLandmark('NOSE_BOTTOM_RIGHT') - }, - tip: findLandmark('NOSE_TIP'), - top: findLandmark('MIDPOINT_BETWEEN_EYES') - } - }, - - confidence: faceAnnotation.detectionConfidence * 100, - - anger: true, - angerLikelihood: 3, - blurred: true, - blurredLikelihood: 3, - headwear: true, - headwearLikelihood: 3, - joy: true, - joyLikelihood: 3, - sorrow: true, - sorrowLikelihood: 3, - surprise: true, - surpriseLikelihood: 3, - underExposed: true, - underExposedLikelihood: 3, - - // Checks that *any* property that ends in `Likelihood` is shortened. - nonExistent: true, - nonExistentLikelihood: 3 - }; - - var formatted = Vision.formatFaceAnnotation_(faceAnnotation); - - assert(deepStrictEqual(formatted, expected)); - }); - }); - - describe('formatFullTextAnnotation_', function() { - var BLOCK_TYPE = 'block type'; - - var LANGUAGE_CODE = 'language code'; - - var TEXT = 'F'; - - var VERTICES = [ - { x: 0, y: 0 }, - { x: 0, y: 0 }, - { x: 0, y: 0 }, - { x: 0, y: 0 } - ]; - - var fullTextAnnotation = { - text: 'Full text', - pages: [ - { - property: { - detectedLanguages: [ - { - languageCode: LANGUAGE_CODE - } - ] - }, - width: 50, - height: 100, - blocks: [ - { - blockType: BLOCK_TYPE, - boundingBox: { - vertices: VERTICES - }, - paragraphs: [ - { - boundingBox: { - vertices: VERTICES - }, - words: [ - { - boundingBox: { - vertices: VERTICES - }, - symbols: [ - { - boundingBox: { - vertices: VERTICES - }, - text: TEXT - } - ] - } - ] - } - ] - } - ] - } - ] - }; - - describe('verbose: false', function() { - var opts = {}; - - it('should return text property', function() { - var fmtd = Vision.formatFullTextAnnotation_(fullTextAnnotation, opts); - - assert.strictEqual(fmtd, fullTextAnnotation.text); - }); - }); - - describe('verbose: true', function() { - var opts = { verbose: true }; - - it('should return formatted annotation', function() { - var fmtd = Vision.formatFullTextAnnotation_(fullTextAnnotation, opts); - - assert.deepEqual(fmtd, [ - { - languages: [ - LANGUAGE_CODE - ], - width: 50, - height: 100, - blocks: [ - { - type: BLOCK_TYPE, - bounds: VERTICES, - paragraphs: [ - { - bounds: VERTICES, - words: [ - { - bounds: VERTICES, - symbols: [ - { - bounds: VERTICES, - text: TEXT - } - ] - } - ] - } - ] - } - ] - } - ]); - }); - - it('should not require a bounding block box', function() { - var annoWithoutBounding = extend(true, {}, fullTextAnnotation); - delete annoWithoutBounding.pages[0].blocks[0].boundingBox; - - var fmtd = Vision.formatFullTextAnnotation_(annoWithoutBounding, opts); - - assert.deepEqual(fmtd[0].blocks[0].bounds, []); - }); - }); - }); - - describe('formatImagePropertiesAnnotation_', function() { - var imgAnnotation = { - dominantColors: { - colors: [ - { - color: { - red: 255, - green: 255, - blue: 255 - }, - pixelFraction: 0.8, - score: 0.2 - } - ] - } - }; - - describe('verbose: false', function() { - var opts = {}; - - it('should format the annotation', function() { - var fmtd = Vision.formatImagePropertiesAnnotation_(imgAnnotation, opts); - - assert.deepEqual(fmtd, { - colors: ['ffffff'] - }); - }); - }); - - describe('verbose: true', function() { - var opts = { - verbose: true - }; - - it('should format the annotation', function() { - var fmtd = Vision.formatImagePropertiesAnnotation_(imgAnnotation, opts); - - assert.deepEqual(fmtd, { - colors: [ - { - red: 255, - green: 255, - blue: 255, - hex: 'ffffff', - coverage: 80, - score: 20 - } - ] - }); - }); - }); - }); - - describe('formatSafeSearchAnnotation_', function() { - var safeSearchAnno = { - adult: 'LIKELY', - medical: 'LIKELY', - spoof: 'LIKELY', - violence: 'LIKELY' - }; - - describe('verbose: false', function() { - var opts = {}; - - it('should convert values to a boolean', function() { - var fmtd = Vision.formatSafeSearchAnnotation_(safeSearchAnno, opts); - - assert.deepEqual(fmtd, { - adult: true, - medical: true, - spoof: true, - violence: true - }); - }); - }); - - describe('verbose: true', function() { - var opts = { - verbose: true - }; - - it('should return raw response', function() { - var fmtd = Vision.formatSafeSearchAnnotation_(safeSearchAnno, opts); - - assert.strictEqual(fmtd, safeSearchAnno); - }); - }); - }); - - describe('formatWebDetection_', function() { - var webDetection = { - webEntities: [ - { - description: 'description' - }, - ], - - fullMatchingImages: [ - { - score: 0, - url: 'http://full-0' - }, - { - score: 1, - url: 'http://full-1' - } - ], - - partialMatchingImages: [ - { - score: 0, - url: 'http://partial-0' - }, - { - score: 1, - url: 'http://partial-1' - } - ], - - pagesWithMatchingImages: [ - { - score: 0, - url: 'http://page-0' - }, - { - score: 1, - url: 'http://page-1' - } - ] - }; - - describe('verbose: false', function() { - var opts = {}; - - it('should return sorted & combined image urls', function() { - var fmtd = Vision.formatWebDetection_(webDetection, opts); - - assert.deepEqual(fmtd, [ - 'http://full-1', - 'http://full-0', - 'http://partial-1', - 'http://partial-0' - ]); - }); - }); - - describe('verbose: true', function() { - var opts = { - verbose: true - }; - - it('should return entities, pages & individual, sorted urls', function() { - var fmtd = Vision.formatWebDetection_(webDetection, opts); - - assert.deepEqual(fmtd, { - entities: webDetection.webEntities.map(prop('description')), - fullMatches: [ - 'http://full-1', - 'http://full-0' - ], - partialMatches: [ - 'http://partial-1', - 'http://partial-0' - ], - pages: [ - 'http://page-1', - 'http://page-0' - ] - }); - }); - }); - }); - - describe('gteLikelihood_', function() { - it('should return booleans', function() { - var baseLikelihood = Vision.likelihood.LIKELY; - - assert.strictEqual( - Vision.gteLikelihood_(baseLikelihood, 'VERY_UNLIKELY'), - false - ); - - assert.strictEqual( - Vision.gteLikelihood_(baseLikelihood, 'UNLIKELY'), - false - ); - - assert.strictEqual( - Vision.gteLikelihood_(baseLikelihood, 'POSSIBLE'), - false - ); - - assert.strictEqual( - Vision.gteLikelihood_(baseLikelihood, 'LIKELY'), - true - ); - - assert.strictEqual( - Vision.gteLikelihood_(baseLikelihood, 'VERY_LIKELY'), - true - ); - }); - }); - - function testWithoutOptions(type) { - return function(images, options, callback) { - assert.strictEqual(images, IMAGE); - assert.deepEqual(options, { - types: [type] - }); - callback(); // done() - }; - } - - function testWithOptions(type, options) { - return function(images, options_, callback) { - assert.strictEqual(images, IMAGE); - assert.notStrictEqual(options_, options); - assert.deepEqual(options_, extend({}, options, { - types: [type] - })); - callback(); // done() - }; - } -}); diff --git a/packages/vision/test/index.test.js b/packages/vision/test/index.test.js new file mode 100644 index 00000000000..197c058fe95 --- /dev/null +++ b/packages/vision/test/index.test.js @@ -0,0 +1,50 @@ +/** + * Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +var assert = require('assert'); + +var Vision = require('../'); + + +describe('Vision', () => { + describe('v1', () => { + it('returns a v1 GAPIC augmented with helpers', () => { + var vision = Vision.v1(); + + // Assert that the GAPIC v1 methods are present on the object. + assert(vision.batchAnnotateImages instanceof Function); + + // Assert that the manual single-image helper method is present + // on the object. + assert(vision.annotateImage instanceof Function); + + // Assert that some of the expected single-feature helper methods + // are present on the object. + assert(vision.faceDetection instanceof Function); + assert(vision.landmarkDetection instanceof Function); + assert(vision.logoDetection instanceof Function); + assert(vision.labelDetection instanceof Function); + assert(vision.textDetection instanceof Function); + assert(vision.documentTextDetection instanceof Function); + assert(vision.safeSearchDetection instanceof Function); + assert(vision.imageProperties instanceof Function); + assert(vision.cropHints instanceof Function); + assert(vision.webDetection instanceof Function); + }); + }); +}); diff --git a/scripts/docs/parser.js b/scripts/docs/parser.js index 9d26f69cb68..e0b8b41b0a9 100644 --- a/scripts/docs/parser.js +++ b/scripts/docs/parser.js @@ -99,8 +99,13 @@ function detectCustomType(str) { // @TODO link-ability .replace(rProtoType, function(match, protoType) { return ` - ${protoType} + + ${protoType} `.trim(); diff --git a/test/docs.js b/test/docs.js index 295e8ca3aec..2f2428dfe10 100644 --- a/test/docs.js +++ b/test/docs.js @@ -221,6 +221,9 @@ modules.forEach(function(mod) { it('should run ' + name + ' examples without errors', function() { jshint(snippet, { + // Allow ES6 syntax + esversion: 6, + // in several snippets we give an example as to how to access // a property (like metadata) without doing anything with it // e.g. `list[0].metadata`