diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/geometry.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/geometry.proto
new file mode 100644
index 00000000000..60b71fd1ac6
--- /dev/null
+++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/geometry.proto
@@ -0,0 +1,72 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "GeometryProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+
+// A vertex represents a 2D point in the image.
+// NOTE: the vertex coordinates are in the same scale as the original image.
+message Vertex {
+ // X coordinate.
+ int32 x = 1;
+
+ // Y coordinate.
+ int32 y = 2;
+}
+
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// A bounding polygon for the detected image annotation.
+message BoundingPoly {
+ // The bounding polygon vertices.
+ repeated Vertex vertices = 1;
+
+ // The bounding polygon normalized vertices.
+ repeated NormalizedVertex normalized_vertices = 2;
+}
+
+// A 3D position in the image, used primarily for Face detection landmarks.
+// A valid Position must have both x and y coordinates.
+// The position coordinates are in the same scale as the original image.
+message Position {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+
+ // Z coordinate (or depth).
+ float z = 3;
+}
diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/image_annotator.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/image_annotator.proto
new file mode 100644
index 00000000000..3e4e8b614f9
--- /dev/null
+++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/image_annotator.proto
@@ -0,0 +1,902 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+import "google/cloud/vision/v1p4beta1/product_search.proto";
+import "google/cloud/vision/v1p4beta1/text_annotation.proto";
+import "google/cloud/vision/v1p4beta1/web_detection.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/type/color.proto";
+import "google/type/latlng.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ImageAnnotatorProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+
+// Service that performs Google Cloud Vision API detection tasks over client
+// images, such as face, landmark, logo, label, and text detection. The
+// ImageAnnotator service returns detected entities from the images.
+service ImageAnnotator {
+ // Run image detection and annotation for a batch of images.
+ rpc BatchAnnotateImages(BatchAnnotateImagesRequest) returns (BatchAnnotateImagesResponse) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/images:annotate"
+ body: "*"
+ };
+ }
+
+ // Service that performs image detection and annotation for a batch of files.
+ // Now only "application/pdf", "image/tiff" and "image/gif" are supported.
+ //
+ // This service will extract at most the first 10 frames (gif) or pages
+ // (pdf or tiff) from each file provided and perform detection and annotation
+ // for each image extracted.
+ rpc BatchAnnotateFiles(BatchAnnotateFilesRequest) returns (BatchAnnotateFilesResponse) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/files:annotate"
+ body: "*"
+ };
+ }
+
+ // Run asynchronous image detection and annotation for a list of images.
+ //
+ // Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
+ //
+ // This service will write image annotation outputs to json files in customer
+ // GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
+ rpc AsyncBatchAnnotateImages(AsyncBatchAnnotateImagesRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/images:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+
+ // Run asynchronous image detection and annotation for a list of generic
+ // files, such as PDF files, which may contain multiple pages and multiple
+ // images per page. Progress and results can be retrieved through the
+ // `google.longrunning.Operations` interface.
+ // `Operation.metadata` contains `OperationMetadata` (metadata).
+ // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
+ rpc AsyncBatchAnnotateFiles(AsyncBatchAnnotateFilesRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/files:asyncBatchAnnotate"
+ body: "*"
+ };
+ }
+}
+
+// The type of Google Cloud Vision API detection to perform, and the maximum
+// number of results to return for that type. Multiple `Feature` objects can
+// be specified in the `features` list.
+message Feature {
+ // Type of Google Cloud Vision API feature to be extracted.
+ enum Type {
+ // Unspecified feature type.
+ TYPE_UNSPECIFIED = 0;
+
+ // Run face detection.
+ FACE_DETECTION = 1;
+
+ // Run landmark detection.
+ LANDMARK_DETECTION = 2;
+
+ // Run logo detection.
+ LOGO_DETECTION = 3;
+
+ // Run label detection.
+ LABEL_DETECTION = 4;
+
+ // Run text detection / optical character recognition (OCR). Text detection
+ // is optimized for areas of text within a larger image; if the image is
+ // a document, use `DOCUMENT_TEXT_DETECTION` instead.
+ TEXT_DETECTION = 5;
+
+ // Run dense text document OCR. Takes precedence when both
+ // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
+ DOCUMENT_TEXT_DETECTION = 11;
+
+ // Run Safe Search to detect potentially unsafe
+ // or undesirable content.
+ SAFE_SEARCH_DETECTION = 6;
+
+ // Compute a set of image properties, such as the
+ // image's dominant colors.
+ IMAGE_PROPERTIES = 7;
+
+ // Run crop hints.
+ CROP_HINTS = 9;
+
+ // Run web detection.
+ WEB_DETECTION = 10;
+
+ // Run Product Search.
+ PRODUCT_SEARCH = 12;
+
+ // Run localizer for object detection.
+ OBJECT_LOCALIZATION = 19;
+ }
+
+ // The feature type.
+ Type type = 1;
+
+ // Maximum number of results of this type. Does not apply to
+ // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ int32 max_results = 2;
+
+ // Model to use for the feature.
+ // Supported values: "builtin/stable" (the default if unset) and
+ // "builtin/latest".
+ string model = 3;
+}
+
+// External image source (Google Cloud Storage or web URL image location).
+message ImageSource {
+ // **Use `image_uri` instead.**
+ //
+ // The Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+ string gcs_image_uri = 1;
+
+ // The URI of the source image. Can be either:
+ //
+ // 1. A Google Cloud Storage URI of the form
+ // `gs://bucket_name/object_name`. Object versioning is not supported. See
+ // [Google Cloud Storage Request
+ // URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+ // info.
+ //
+ // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+ // HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+ // completed. Your request may fail if the specified host denies the
+ // request (e.g. due to request throttling or DOS prevention), or if Google
+ // throttles requests to the site for abuse prevention. You should not
+ // depend on externally-hosted images for production applications.
+ //
+ // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ // precedence.
+ string image_uri = 2;
+}
+
+// Client image to perform Google Cloud Vision API tasks over.
+message Image {
+ // Image content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ bytes content = 1;
+
+ // Google Cloud Storage image location, or publicly-accessible image
+ // URL. If both `content` and `source` are provided for an image, `content`
+ // takes precedence and is used to perform the image annotation request.
+ ImageSource source = 2;
+}
+
+// A bucketized representation of likelihood, which is intended to give clients
+// highly stable results across model upgrades.
+enum Likelihood {
+ // Unknown likelihood.
+ UNKNOWN = 0;
+
+ // It is very unlikely that the image belongs to the specified vertical.
+ VERY_UNLIKELY = 1;
+
+ // It is unlikely that the image belongs to the specified vertical.
+ UNLIKELY = 2;
+
+ // It is possible that the image belongs to the specified vertical.
+ POSSIBLE = 3;
+
+ // It is likely that the image belongs to the specified vertical.
+ LIKELY = 4;
+
+ // It is very likely that the image belongs to the specified vertical.
+ VERY_LIKELY = 5;
+}
+
+// A face annotation object contains the results of face detection.
+message FaceAnnotation {
+ // A face-specific landmark (for example, a face feature).
+ message Landmark {
+ // Face landmark (feature) type.
+ // Left and right are defined from the vantage of the viewer of the image
+ // without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ // typically, is the person's right eye.
+ enum Type {
+ // Unknown face landmark detected. Should not be filled.
+ UNKNOWN_LANDMARK = 0;
+
+ // Left eye.
+ LEFT_EYE = 1;
+
+ // Right eye.
+ RIGHT_EYE = 2;
+
+ // Left of left eyebrow.
+ LEFT_OF_LEFT_EYEBROW = 3;
+
+ // Right of left eyebrow.
+ RIGHT_OF_LEFT_EYEBROW = 4;
+
+ // Left of right eyebrow.
+ LEFT_OF_RIGHT_EYEBROW = 5;
+
+ // Right of right eyebrow.
+ RIGHT_OF_RIGHT_EYEBROW = 6;
+
+ // Midpoint between eyes.
+ MIDPOINT_BETWEEN_EYES = 7;
+
+ // Nose tip.
+ NOSE_TIP = 8;
+
+ // Upper lip.
+ UPPER_LIP = 9;
+
+ // Lower lip.
+ LOWER_LIP = 10;
+
+ // Mouth left.
+ MOUTH_LEFT = 11;
+
+ // Mouth right.
+ MOUTH_RIGHT = 12;
+
+ // Mouth center.
+ MOUTH_CENTER = 13;
+
+ // Nose, bottom right.
+ NOSE_BOTTOM_RIGHT = 14;
+
+ // Nose, bottom left.
+ NOSE_BOTTOM_LEFT = 15;
+
+ // Nose, bottom center.
+ NOSE_BOTTOM_CENTER = 16;
+
+ // Left eye, top boundary.
+ LEFT_EYE_TOP_BOUNDARY = 17;
+
+ // Left eye, right corner.
+ LEFT_EYE_RIGHT_CORNER = 18;
+
+ // Left eye, bottom boundary.
+ LEFT_EYE_BOTTOM_BOUNDARY = 19;
+
+ // Left eye, left corner.
+ LEFT_EYE_LEFT_CORNER = 20;
+
+ // Right eye, top boundary.
+ RIGHT_EYE_TOP_BOUNDARY = 21;
+
+ // Right eye, right corner.
+ RIGHT_EYE_RIGHT_CORNER = 22;
+
+ // Right eye, bottom boundary.
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23;
+
+ // Right eye, left corner.
+ RIGHT_EYE_LEFT_CORNER = 24;
+
+ // Left eyebrow, upper midpoint.
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25;
+
+ // Right eyebrow, upper midpoint.
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26;
+
+ // Left ear tragion.
+ LEFT_EAR_TRAGION = 27;
+
+ // Right ear tragion.
+ RIGHT_EAR_TRAGION = 28;
+
+ // Left eye pupil.
+ LEFT_EYE_PUPIL = 29;
+
+ // Right eye pupil.
+ RIGHT_EYE_PUPIL = 30;
+
+ // Forehead glabella.
+ FOREHEAD_GLABELLA = 31;
+
+ // Chin gnathion.
+ CHIN_GNATHION = 32;
+
+ // Chin left gonion.
+ CHIN_LEFT_GONION = 33;
+
+ // Chin right gonion.
+ CHIN_RIGHT_GONION = 34;
+ }
+
+ // Face landmark type.
+ Type type = 3;
+
+ // Face landmark position.
+ Position position = 4;
+ }
+
+ // The bounding polygon around the face. The coordinates of the bounding box
+ // are in the original image's scale.
+ // The bounding box is computed to "frame" the face in accordance with human
+ // expectations. It is based on the landmarker results.
+ // Note that one or more x and/or y coordinates may not be generated in the
+ // `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ // appears in the image to be annotated.
+ BoundingPoly bounding_poly = 1;
+
+ // The `fd_bounding_poly` bounding polygon is tighter than the
+ // `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ // is used to eliminate the face from any image analysis that detects the
+ // "amount of skin" visible in an image. It is not based on the
+ // landmarker results, only on the initial face detection, hence
+ // the fd
(face detection) prefix.
+ BoundingPoly fd_bounding_poly = 2;
+
+ // Detected face landmarks.
+ repeated Landmark landmarks = 3;
+
+ // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ // of the face relative to the image vertical about the axis perpendicular to
+ // the face. Range [-180,180].
+ float roll_angle = 4;
+
+ // Yaw angle, which indicates the leftward/rightward angle that the face is
+ // pointing relative to the vertical plane perpendicular to the image. Range
+ // [-180,180].
+ float pan_angle = 5;
+
+ // Pitch angle, which indicates the upwards/downwards angle that the face is
+ // pointing relative to the image's horizontal plane. Range [-180,180].
+ float tilt_angle = 6;
+
+ // Detection confidence. Range [0, 1].
+ float detection_confidence = 7;
+
+ // Face landmarking confidence. Range [0, 1].
+ float landmarking_confidence = 8;
+
+ // Joy likelihood.
+ Likelihood joy_likelihood = 9;
+
+ // Sorrow likelihood.
+ Likelihood sorrow_likelihood = 10;
+
+ // Anger likelihood.
+ Likelihood anger_likelihood = 11;
+
+ // Surprise likelihood.
+ Likelihood surprise_likelihood = 12;
+
+ // Under-exposed likelihood.
+ Likelihood under_exposed_likelihood = 13;
+
+ // Blurred likelihood.
+ Likelihood blurred_likelihood = 14;
+
+ // Headwear likelihood.
+ Likelihood headwear_likelihood = 15;
+}
+
+// Detected entity location information.
+message LocationInfo {
+ // lat/long location coordinates.
+ google.type.LatLng lat_lng = 1;
+}
+
+// A `Property` consists of a user-supplied name/value pair.
+message Property {
+ // Name of the property.
+ string name = 1;
+
+ // Value of the property.
+ string value = 2;
+
+ // Value of numeric properties.
+ uint64 uint64_value = 3;
+}
+
+// Set of detected entity features.
+message EntityAnnotation {
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ string mid = 1;
+
+ // The language code for the locale in which the entity textual
+ // `description` is expressed.
+ string locale = 2;
+
+ // Entity textual description, expressed in its `locale` language.
+ string description = 3;
+
+ // Overall score of the result. Range [0, 1].
+ float score = 4;
+
+ // **Deprecated. Use `score` instead.**
+ // The accuracy of the entity detection in an image.
+ // For example, for an image in which the "Eiffel Tower" entity is detected,
+ // this field represents the confidence that there is a tower in the query
+ // image. Range [0, 1].
+ float confidence = 5 [deprecated = true];
+
+ // The relevancy of the ICA (Image Content Annotation) label to the
+ // image. For example, the relevancy of "tower" is likely higher to an image
+ // containing the detected "Eiffel Tower" than to an image containing a
+ // detected distant towering building, even though the confidence that
+ // there is a tower in each image may be the same. Range [0, 1].
+ float topicality = 6;
+
+ // Image region to which this entity belongs. Not produced
+ // for `LABEL_DETECTION` features.
+ BoundingPoly bounding_poly = 7;
+
+ // The location information for the detected entity. Multiple
+ // `LocationInfo` elements can be present because one location may
+ // indicate the location of the scene in the image, and another location
+ // may indicate the location of the place where the image was taken.
+ // Location information is usually present for landmarks.
+ repeated LocationInfo locations = 8;
+
+ // Some entities may have optional user-supplied `Property` (name/value)
+ // fields, such a score or string that qualifies the entity.
+ repeated Property properties = 9;
+}
+
+// Set of detected objects with bounding boxes.
+message LocalizedObjectAnnotation {
+ // Object ID that should align with EntityAnnotation mid.
+ string mid = 1;
+
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+
+ // Object name, expressed in its `language_code` language.
+ string name = 3;
+
+ // Score of the result. Range [0, 1].
+ float score = 4;
+
+ // Image region to which this object belongs. This must be populated.
+ BoundingPoly bounding_poly = 5;
+}
+
+// Set of features pertaining to the image, computed by computer vision
+// methods over safe-search verticals (for example, adult, spoof, medical,
+// violence).
+message SafeSearchAnnotation {
+ // Represents the adult content likelihood for the image. Adult content may
+ // contain elements such as nudity, pornographic images or cartoons, or
+ // sexual activities.
+ Likelihood adult = 1;
+
+ // Spoof likelihood. The likelihood that an modification
+ // was made to the image's canonical version to make it appear
+ // funny or offensive.
+ Likelihood spoof = 2;
+
+ // Likelihood that this is a medical image.
+ Likelihood medical = 3;
+
+ // Likelihood that this image contains violent content.
+ Likelihood violence = 4;
+
+ // Likelihood that the request image contains racy content. Racy content may
+ // include (but is not limited to) skimpy or sheer clothing, strategically
+ // covered nudity, lewd or provocative poses, or close-ups of sensitive
+ // body areas.
+ Likelihood racy = 9;
+}
+
+// Rectangle determined by min and max `LatLng` pairs.
+message LatLongRect {
+ // Min lat/long pair.
+ google.type.LatLng min_lat_lng = 1;
+
+ // Max lat/long pair.
+ google.type.LatLng max_lat_lng = 2;
+}
+
+// Color information consists of RGB channels, score, and the fraction of
+// the image that the color occupies in the image.
+message ColorInfo {
+ // RGB components of the color.
+ google.type.Color color = 1;
+
+ // Image-specific score for this color. Value in range [0, 1].
+ float score = 2;
+
+ // The fraction of pixels the color occupies in the image.
+ // Value in range [0, 1].
+ float pixel_fraction = 3;
+}
+
+// Set of dominant colors and their corresponding scores.
+message DominantColorsAnnotation {
+ // RGB color values with their score and pixel fraction.
+ repeated ColorInfo colors = 1;
+}
+
+// Stores image properties, such as dominant colors.
+message ImageProperties {
+ // If present, dominant colors completed successfully.
+ DominantColorsAnnotation dominant_colors = 1;
+}
+
+// Single crop hint that is used to generate a new crop when serving an image.
+message CropHint {
+ // The bounding polygon for the crop region. The coordinates of the bounding
+ // box are in the original image's scale.
+ BoundingPoly bounding_poly = 1;
+
+ // Confidence of this being a salient region. Range [0, 1].
+ float confidence = 2;
+
+ // Fraction of importance of this salient region with respect to the original
+ // image.
+ float importance_fraction = 3;
+}
+
+// Set of crop hints that are used to generate new crops when serving images.
+message CropHintsAnnotation {
+ // Crop hint results.
+ repeated CropHint crop_hints = 1;
+}
+
+// Parameters for crop hints annotation request.
+message CropHintsParams {
+ // Aspect ratios in floats, representing the ratio of the width to the height
+ // of the image. For example, if the desired aspect ratio is 4/3, the
+ // corresponding float value should be 1.33333. If not specified, the
+ // best possible crop is returned. The number of provided aspect ratios is
+ // limited to a maximum of 16; any aspect ratios provided after the 16th are
+ // ignored.
+ repeated float aspect_ratios = 1;
+}
+
+// Parameters for web detection request.
+message WebDetectionParams {
+ // Whether to include results derived from the geo information in the image.
+ bool include_geo_results = 2;
+}
+
+// Image context and/or feature-specific parameters.
+message ImageContext {
+ // Not used.
+ LatLongRect lat_long_rect = 1;
+
+ // List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ // yields the best results since it enables automatic language detection. For
+ // languages based on the Latin alphabet, setting `language_hints` is not
+ // needed. In rare cases, when the language of the text in the image is known,
+ // setting a hint will help get better results (although it will be a
+ // significant hindrance if the hint is wrong). Text detection returns an
+ // error if one or more of the specified languages is not one of the
+ // [supported languages](/vision/docs/languages).
+ repeated string language_hints = 2;
+
+ // Parameters for crop hints annotation request.
+ CropHintsParams crop_hints_params = 4;
+
+ // Parameters for product search.
+ ProductSearchParams product_search_params = 5;
+
+ // Parameters for web detection.
+ WebDetectionParams web_detection_params = 6;
+}
+
+// Request for performing Google Cloud Vision API tasks over a user-provided
+// image, with user-requested features, and with context information.
+message AnnotateImageRequest {
+ // The image to be processed.
+ Image image = 1;
+
+ // Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image.
+ ImageContext image_context = 3;
+}
+
+// If an image was produced from a file (e.g. a PDF), this message gives
+// information about the source of that image.
+message ImageAnnotationContext {
+ // The URI of the file used to produce the image.
+ string uri = 1;
+
+ // If the file was a PDF or TIFF, this field gives the page number within
+ // the file used to produce the image.
+ int32 page_number = 2;
+}
+
+// Response to an image annotation request.
+message AnnotateImageResponse {
+ // If present, face detection has completed successfully.
+ repeated FaceAnnotation face_annotations = 1;
+
+ // If present, landmark detection has completed successfully.
+ repeated EntityAnnotation landmark_annotations = 2;
+
+ // If present, logo detection has completed successfully.
+ repeated EntityAnnotation logo_annotations = 3;
+
+ // If present, label detection has completed successfully.
+ repeated EntityAnnotation label_annotations = 4;
+
+ // If present, localized object detection has completed successfully.
+ // This will be sorted descending by confidence score.
+ repeated LocalizedObjectAnnotation localized_object_annotations = 22;
+
+ // If present, text (OCR) detection has completed successfully.
+ repeated EntityAnnotation text_annotations = 5;
+
+ // If present, text (OCR) detection or document (OCR) text detection has
+ // completed successfully.
+ // This annotation provides the structural hierarchy for the OCR detected
+ // text.
+ TextAnnotation full_text_annotation = 12;
+
+ // If present, safe-search annotation has completed successfully.
+ SafeSearchAnnotation safe_search_annotation = 6;
+
+ // If present, image properties were extracted successfully.
+ ImageProperties image_properties_annotation = 8;
+
+ // If present, crop hints have completed successfully.
+ CropHintsAnnotation crop_hints_annotation = 11;
+
+ // If present, web detection has completed successfully.
+ WebDetection web_detection = 13;
+
+ // If present, product search has completed successfully.
+ ProductSearchResults product_search_results = 14;
+
+ // If set, represents the error message for the operation.
+ // Note that filled-in image annotations are guaranteed to be
+ // correct, even when `error` is set.
+ google.rpc.Status error = 9;
+
+ // If present, contextual information is needed to understand where this image
+ // comes from.
+ ImageAnnotationContext context = 21;
+}
+
+// Response to a single file annotation request. A file may contain one or more
+// images, which individually have their own responses.
+message AnnotateFileResponse {
+ // Information about the file for which this response is generated.
+ InputConfig input_config = 1;
+
+ // Individual responses to images found within the file.
+ repeated AnnotateImageResponse responses = 2;
+
+ // This field gives the total number of pages in the file.
+ int32 total_pages = 3;
+}
+
+// Multiple image annotation requests are batched into a single service call.
+message BatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+}
+
+// Response to a batch image annotation request.
+message BatchAnnotateImagesResponse {
+ // Individual responses to image annotation requests within the batch.
+ repeated AnnotateImageResponse responses = 1;
+}
+
+// A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
+message AnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Pages of the file to perform image annotation.
+ //
+ // Pages starts from 1, we assume the first page of the file is page 1.
+ // At most 5 pages are supported per request. Pages can be negative.
+ //
+ // Page 1 means the first page.
+ // Page 2 means the second page.
+ // Page -1 means the last page.
+ // Page -2 means the second to the last page.
+ //
+ // If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
+ //
+ // If this field is empty, by default the service performs image annotation
+ // for the first 5 pages of the file.
+ repeated int32 pages = 4;
+}
+
+// A list of requests to annotate files using the BatchAnnotateFiles API.
+message BatchAnnotateFilesRequest {
+ // The list of file annotation requests. Right now we support only one
+ // AnnotateFileRequest in BatchAnnotateFilesRequest.
+ repeated AnnotateFileRequest requests = 1;
+}
+
+// A list of file annotation responses.
+message BatchAnnotateFilesResponse {
+ // The list of file annotation responses, each response corresponding to each
+ // AnnotateFileRequest in BatchAnnotateFilesRequest.
+ repeated AnnotateFileResponse responses = 1;
+}
+
+// An offline file annotation request.
+message AsyncAnnotateFileRequest {
+ // Required. Information about the input file.
+ InputConfig input_config = 1;
+
+ // Required. Requested features.
+ repeated Feature features = 2;
+
+ // Additional context that may accompany the image(s) in the file.
+ ImageContext image_context = 3;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 4;
+}
+
+// The response for a single offline file annotation request.
+message AsyncAnnotateFileResponse {
+ // The output location and metadata from AsyncAnnotateFileRequest.
+ OutputConfig output_config = 1;
+}
+
+// Request for async image annotation for a list of images.
+message AsyncBatchAnnotateImagesRequest {
+ // Individual image annotation requests for this batch.
+ repeated AnnotateImageRequest requests = 1;
+
+ // Required. The desired output location and metadata (e.g. format).
+ OutputConfig output_config = 2;
+}
+
+// Response to an async batch image annotation request.
+message AsyncBatchAnnotateImagesResponse {
+ // The output location and metadata from AsyncBatchAnnotateImagesRequest.
+ OutputConfig output_config = 1;
+}
+
+// Multiple async file annotation requests are batched into a single service
+// call.
+message AsyncBatchAnnotateFilesRequest {
+ // Individual async file annotation requests for this batch.
+ repeated AsyncAnnotateFileRequest requests = 1;
+}
+
+// Response to an async batch file annotation request.
+message AsyncBatchAnnotateFilesResponse {
+ // The list of file annotation responses, one for each request in
+ // AsyncBatchAnnotateFilesRequest.
+ repeated AsyncAnnotateFileResponse responses = 1;
+}
+
+// The desired input location and metadata.
+message InputConfig {
+ // The Google Cloud Storage location to read the input from.
+ GcsSource gcs_source = 1;
+
+ // File content, represented as a stream of bytes.
+ // Note: As with all `bytes` fields, protobuffers use a pure binary
+ // representation, whereas JSON representations use base64.
+ //
+ // Currently, this field only works for BatchAnnotateFiles requests. It does
+ // not work for AsyncBatchAnnotateFiles requests.
+ bytes content = 3;
+
+ // The type of the file. Currently only "application/pdf" and "image/tiff"
+ // are supported. Wildcards are not supported.
+ string mime_type = 2;
+}
+
+// The desired output location and metadata.
+message OutputConfig {
+ // The Google Cloud Storage location to write the output(s) to.
+ GcsDestination gcs_destination = 1;
+
+ // The max number of response protos to put into each output JSON file on
+ // Google Cloud Storage.
+ // The valid range is [1, 100]. If not specified, the default value is 20.
+ //
+ // For example, for one pdf file with 100 pages, 100 response protos will
+ // be generated. If `batch_size` = 20, then 5 json files each
+ // containing 20 response protos will be written under the prefix
+ // `gcs_destination`.`uri`.
+ //
+ // Currently, batch_size only applies to GcsDestination, with potential future
+ // support for other output configurations.
+ int32 batch_size = 2;
+}
+
+// The Google Cloud Storage location where the input will be read from.
+message GcsSource {
+ // Google Cloud Storage URI for the input file. This must only be a
+ // Google Cloud Storage object. Wildcards are not currently supported.
+ string uri = 1;
+}
+
+// The Google Cloud Storage location where the output will be written to.
+message GcsDestination {
+ // Google Cloud Storage URI where the results will be stored. Results will
+ // be in JSON format and preceded by its corresponding input URI. This field
+ // can either represent a single file, or a prefix for multiple outputs.
+ // Prefixes must end in a `/`.
+ //
+ // Examples:
+ //
+ // * File: gs://bucket-name/filename.json
+ // * Prefix: gs://bucket-name/prefix/here/
+ // * File: gs://bucket-name/prefix/here
+ //
+ // If multiple outputs, each response is still AnnotateFileResponse, each of
+ // which contains some subset of the full list of AnnotateImageResponse.
+ // Multiple outputs can happen if, for example, the output JSON is too large
+ // and overflows into multiple sharded files.
+ string uri = 1;
+}
+
+// Contains metadata for the BatchAnnotateImages operation.
+message OperationMetadata {
+ // Batch operation states.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is received.
+ CREATED = 1;
+
+ // Request is actively being processed.
+ RUNNING = 2;
+
+ // The batch processing is done.
+ DONE = 3;
+
+ // The batch processing was cancelled.
+ CANCELLED = 4;
+ }
+
+ // Current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was received.
+ google.protobuf.Timestamp create_time = 5;
+
+ // The time when the operation result was last updated.
+ google.protobuf.Timestamp update_time = 6;
+}
diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/product_search.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/product_search.proto
new file mode 100644
index 00000000000..3ec6767b5b5
--- /dev/null
+++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/product_search.proto
@@ -0,0 +1,97 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+import "google/cloud/vision/v1p4beta1/product_search_service.proto";
+import "google/protobuf/timestamp.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+
+// Parameters for a product search request.
+message ProductSearchParams {
+ // The bounding polygon around the area of interest in the image.
+ // Optional. If it is not specified, system discretion will be applied.
+ BoundingPoly bounding_poly = 9;
+
+ // The resource name of a [ProductSet][google.cloud.vision.v1p4beta1.ProductSet] to be searched for similar images.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ string product_set = 6;
+
+ // The list of product categories to search in. Currently, we only consider
+ // the first category, and either "homegoods", "apparel", or "toys" should be
+ // specified.
+ repeated string product_categories = 7;
+
+ // The filtering expression. This can be used to restrict search results based
+ // on Product labels. We currently support an AND of OR of key-value
+ // expressions, where each expression within an OR must have the same key.
+ //
+ // For example, "(color = red OR color = blue) AND brand = Google" is
+ // acceptable, but not "(color = red OR brand = Google)" or "color: red".
+ string filter = 8;
+}
+
+// Results for a product search request.
+message ProductSearchResults {
+ // Information about a product.
+ message Result {
+ // The Product.
+ Product product = 1;
+
+ // A confidence level on the match, ranging from 0 (no confidence) to
+ // 1 (full confidence).
+ float score = 2;
+
+ // The resource name of the image from the product that is the closest match
+ // to the query.
+ string image = 3;
+ }
+
+ // Information about the products similar to a single product in a query
+ // image.
+ message GroupedResult {
+ // The bounding polygon around the product detected in the query image.
+ BoundingPoly bounding_poly = 1;
+
+ // List of results, one for each product match.
+ repeated Result results = 2;
+ }
+
+ // Timestamp of the index which provided these results. Changes made after
+ // this time are not reflected in the current results.
+ google.protobuf.Timestamp index_time = 2;
+
+ // List of results, one for each product match.
+ repeated Result results = 5;
+
+ // List of results grouped by products detected in the query image. Each entry
+ // corresponds to one bounding polygon in the query image, and contains the
+ // matching products specific to that region. There may be duplicate product
+ // matches in the union of all the per-product results.
+ repeated GroupedResult product_grouped_results = 6;
+}
diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/product_search_service.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/product_search_service.proto
new file mode 100644
index 00000000000..1a20351b63f
--- /dev/null
+++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/product_search_service.proto
@@ -0,0 +1,835 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "ProductSearchServiceProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+
+// Manages Products and ProductSets of reference images for use in product
+// search. It uses the following resource model:
+//
+// - The API has a collection of [ProductSet][google.cloud.vision.v1p4beta1.ProductSet] resources, named
+// `projects/*/locations/*/productSets/*`, which acts as a way to put different
+// products into groups to limit identification.
+//
+// In parallel,
+//
+// - The API has a collection of [Product][google.cloud.vision.v1p4beta1.Product] resources, named
+// `projects/*/locations/*/products/*`
+//
+// - Each [Product][google.cloud.vision.v1p4beta1.Product] has a collection of [ReferenceImage][google.cloud.vision.v1p4beta1.ReferenceImage] resources, named
+// `projects/*/locations/*/products/*/referenceImages/*`
+service ProductSearch {
+ // Creates and returns a new ProductSet resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing, or is longer than
+ // 4096 characters.
+ rpc CreateProductSet(CreateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*}/productSets"
+ body: "product_set"
+ };
+ }
+
+ // Lists ProductSets in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100, or less
+ // than 1.
+ rpc ListProductSets(ListProductSetsRequest) returns (ListProductSetsResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{parent=projects/*/locations/*}/productSets"
+ };
+ }
+
+ // Gets information associated with a ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc GetProductSet(GetProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Makes changes to a ProductSet resource.
+ // Only display_name can be updated currently.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but
+ // missing from the request or longer than 4096 characters.
+ rpc UpdateProductSet(UpdateProductSetRequest) returns (ProductSet) {
+ option (google.api.http) = {
+ patch: "/v1p4beta1/{product_set.name=projects/*/locations/*/productSets/*}"
+ body: "product_set"
+ };
+ }
+
+ // Permanently deletes a ProductSet. Products and ReferenceImages in the
+ // ProductSet are not deleted.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the ProductSet does not exist.
+ rpc DeleteProductSet(DeleteProductSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}"
+ };
+ }
+
+ // Creates and returns a new product resource.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if display_name is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if description is longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is missing or invalid.
+ rpc CreateProduct(CreateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*}/products"
+ body: "product"
+ };
+ }
+
+ // Lists products in an unspecified order.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProducts(ListProductsRequest) returns (ListProductsResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{parent=projects/*/locations/*}/products"
+ };
+ }
+
+ // Gets information associated with a Product.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ rpc GetProduct(GetProductRequest) returns (Product) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Makes changes to a Product resource.
+ // Only the `display_name`, `description`, and `labels` fields can be updated
+ // right now.
+ //
+ // If labels are updated, the change will not be reflected in queries until
+ // the next index time.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product does not exist.
+ // * Returns INVALID_ARGUMENT if display_name is present in update_mask but is
+ // missing from the request or longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if description is present in update_mask but is
+ // longer than 4096 characters.
+ // * Returns INVALID_ARGUMENT if product_category is present in update_mask.
+ rpc UpdateProduct(UpdateProductRequest) returns (Product) {
+ option (google.api.http) = {
+ patch: "/v1p4beta1/{product.name=projects/*/locations/*/products/*}"
+ body: "product"
+ };
+ }
+
+ // Permanently deletes a product and its reference images.
+ //
+ // Metadata of the product and all its images will be deleted right away, but
+ // search queries against ProductSets containing the product may still work
+ // until all related caches are refreshed.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the product does not exist.
+ rpc DeleteProduct(DeleteProductRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p4beta1/{name=projects/*/locations/*/products/*}"
+ };
+ }
+
+ // Creates and returns a new ReferenceImage resource.
+ //
+ // The `bounding_poly` field is optional. If `bounding_poly` is not specified,
+ // the system will try to detect regions of interest in the image that are
+ // compatible with the product_category on the parent product. If it is
+ // specified, detection is ALWAYS skipped. The system converts polygons into
+ // non-rotated rectangles.
+ //
+ // Note that the pipeline will resize the image if the image resolution is too
+ // large to process (above 50MP).
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096
+ // characters.
+ // * Returns INVALID_ARGUMENT if the product does not exist.
+ // * Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing
+ // compatible with the parent product's product_category is detected.
+ // * Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons.
+ rpc CreateReferenceImage(CreateReferenceImageRequest) returns (ReferenceImage) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ body: "reference_image"
+ };
+ }
+
+ // Permanently deletes a reference image.
+ //
+ // The image metadata will be deleted right away, but search queries
+ // against ProductSets containing the image may still work until all related
+ // caches are refreshed.
+ //
+ // The actual image files are not deleted from Google Cloud Storage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the reference image does not exist.
+ rpc DeleteReferenceImage(DeleteReferenceImageRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1p4beta1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Lists reference images.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the parent product does not exist.
+ // * Returns INVALID_ARGUMENT if the page_size is greater than 100, or less
+ // than 1.
+ rpc ListReferenceImages(ListReferenceImagesRequest) returns (ListReferenceImagesResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{parent=projects/*/locations/*/products/*}/referenceImages"
+ };
+ }
+
+ // Gets information associated with a ReferenceImage.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the specified image does not exist.
+ rpc GetReferenceImage(GetReferenceImageRequest) returns (ReferenceImage) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/products/*/referenceImages/*}"
+ };
+ }
+
+ // Adds a Product to the specified ProductSet. If the Product is already
+ // present, no change is made.
+ //
+ // One Product can be added to at most 100 ProductSets.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND if the Product or the ProductSet doesn't exist.
+ rpc AddProductToProductSet(AddProductToProductSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}:addProduct"
+ body: "*"
+ };
+ }
+
+ // Removes a Product from the specified ProductSet.
+ //
+ // Possible errors:
+ //
+ // * Returns NOT_FOUND If the Product is not found under the ProductSet.
+ rpc RemoveProductFromProductSet(RemoveProductFromProductSetRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}:removeProduct"
+ body: "*"
+ };
+ }
+
+ // Lists the Products in a ProductSet, in an unspecified order. If the
+ // ProductSet does not exist, the products field of the response will be
+ // empty.
+ //
+ // Possible errors:
+ //
+ // * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ rpc ListProductsInProductSet(ListProductsInProductSetRequest) returns (ListProductsInProductSetResponse) {
+ option (google.api.http) = {
+ get: "/v1p4beta1/{name=projects/*/locations/*/productSets/*}/products"
+ };
+ }
+
+ // Asynchronous API that imports a list of reference images to specified
+ // product sets based on a list of image information.
+ //
+ // The [google.longrunning.Operation][google.longrunning.Operation] API can be used to keep track of the
+ // progress and results of the request.
+ // `Operation.metadata` contains `BatchOperationMetadata`. (progress)
+ // `Operation.response` contains `ImportProductSetsResponse`. (results)
+ //
+ // The input source of this method is a csv file on Google Cloud Storage.
+ // For the format of the csv file please see
+ // [ImportProductSetsGcsSource.csv_file_uri][google.cloud.vision.v1p4beta1.ImportProductSetsGcsSource.csv_file_uri].
+ rpc ImportProductSets(ImportProductSetsRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1p4beta1/{parent=projects/*/locations/*}/productSets:import"
+ body: "*"
+ };
+ }
+}
+
+// A Product contains ReferenceImages.
+message Product {
+ // A product label represented as a key-value pair.
+ message KeyValue {
+ // The key of the label attached to the product. Cannot be empty and cannot
+ // exceed 128 bytes.
+ string key = 1;
+
+ // The value of the label attached to the product. Cannot be empty and
+ // cannot exceed 128 bytes.
+ string value = 2;
+ }
+
+ // The resource name of the product.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ //
+ // This field is ignored when creating a product.
+ string name = 1;
+
+ // The user-provided name for this Product. Must not be empty. Must be at most
+ // 4096 characters long.
+ string display_name = 2;
+
+ // User-provided metadata to be stored with this product. Must be at most 4096
+ // characters long.
+ string description = 3;
+
+ // The category for the product identified by the reference image. This should
+ // be either "homegoods", "apparel", or "toys".
+ //
+ // This field is immutable.
+ string product_category = 4;
+
+ // Key-value pairs that can be attached to a product. At query time,
+ // constraints can be specified based on the product_labels.
+ //
+ // Note that integer values can be provided as strings, e.g. "1199". Only
+ // strings with integer values can match a range-based restriction which is
+ // to be supported soon.
+ //
+ // Multiple values can be assigned to the same key. One product may have up to
+ // 100 product_labels.
+ repeated KeyValue product_labels = 5;
+}
+
+// A ProductSet contains Products. A ProductSet can contain a maximum of 1
+// million reference images. If the limit is exceeded, periodic indexing will
+// fail.
+message ProductSet {
+ // The resource name of the ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ //
+ // This field is ignored when creating a ProductSet.
+ string name = 1;
+
+ // The user-provided name for this ProductSet. Must not be empty. Must be at
+ // most 4096 characters long.
+ string display_name = 2;
+
+ // Output only. The time at which this ProductSet was last indexed. Query
+ // results will reflect all updates before this time. If this ProductSet has
+ // never been indexed, this timestamp is the default value
+ // "1970-01-01T00:00:00Z".
+ //
+ // This field is ignored when creating a ProductSet.
+ google.protobuf.Timestamp index_time = 3;
+
+ // Output only. If there was an error with indexing the product set, the field
+ // is populated.
+ //
+ // This field is ignored when creating a ProductSet.
+ google.rpc.Status index_error = 4;
+}
+
+// A `ReferenceImage` represents a product image and its associated metadata,
+// such as bounding boxes.
+message ReferenceImage {
+ // The resource name of the reference image.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ //
+ // This field is ignored when creating a reference image.
+ string name = 1;
+
+ // The Google Cloud Storage URI of the reference image.
+ //
+ // The URI must start with `gs://`.
+ //
+ // Required.
+ string uri = 2;
+
+ // Bounding polygons around the areas of interest in the reference image.
+ // Optional. If this field is empty, the system will try to detect regions of
+ // interest. At most 10 bounding polygons will be used.
+ //
+ // The provided shape is converted into a non-rotated rectangle. Once
+ // converted, the small edge of the rectangle must be greater than or equal
+ // to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
+ // is not).
+ repeated BoundingPoly bounding_polys = 3;
+}
+
+// Request message for the `CreateProduct` method.
+message CreateProductRequest {
+ // The project in which the Product should be created.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The product to create.
+ Product product = 2;
+
+ // A user-supplied resource id for this Product. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_id = 3;
+}
+
+// Request message for the `ListProducts` method.
+message ListProductsRequest {
+ // The project OR ProductSet from which Products should be listed.
+ //
+ // Format:
+ // `projects/PROJECT_ID/locations/LOC_ID`
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProducts` method.
+message ListProductsResponse {
+ // List of products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProduct` method.
+message GetProductRequest {
+ // Resource name of the Product to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProduct` method.
+message UpdateProductRequest {
+ // The Product resource which replaces the one on the server.
+ // product.name is immutable.
+ Product product = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields
+ // to update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask paths include `product_labels`, `display_name`, and
+ // `description`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProduct` method.
+message DeleteProductRequest {
+ // Resource name of product to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateProductSet` method.
+message CreateProductSetRequest {
+ // The project in which the ProductSet should be created.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The ProductSet to create.
+ ProductSet product_set = 2;
+
+ // A user-supplied resource id for this ProductSet. If set, the server will
+ // attempt to use this value as the resource id. If it is already in use, an
+ // error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ // long. It cannot contain the character `/`.
+ string product_set_id = 3;
+}
+
+// Request message for the `ListProductSets` method.
+message ListProductSetsRequest {
+ // The project from which ProductSets should be listed.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductSets` method.
+message ListProductSetsResponse {
+ // List of ProductSets.
+ repeated ProductSet product_sets = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// Request message for the `GetProductSet` method.
+message GetProductSetRequest {
+ // Resource name of the ProductSet to get.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `UpdateProductSet` method.
+message UpdateProductSetRequest {
+ // The ProductSet resource which replaces the one on the server.
+ ProductSet product_set = 1;
+
+ // The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
+ // update.
+ // If update_mask isn't specified, all mutable fields are to be updated.
+ // Valid mask path is `display_name`.
+ google.protobuf.FieldMask update_mask = 2;
+}
+
+// Request message for the `DeleteProductSet` method.
+message DeleteProductSetRequest {
+ // Resource name of the ProductSet to delete.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+}
+
+// Request message for the `CreateReferenceImage` method.
+message CreateReferenceImageRequest {
+ // Resource name of the product in which to create the reference image.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The reference image to create.
+ // If an image ID is specified, it is ignored.
+ ReferenceImage reference_image = 2;
+
+ // A user-supplied resource id for the ReferenceImage to be added. If set,
+ // the server will attempt to use this value as the resource id. If it is
+ // already in use, an error is returned with code ALREADY_EXISTS. Must be at
+ // most 128 characters long. It cannot contain the character `/`.
+ string reference_image_id = 3;
+}
+
+// Request message for the `ListReferenceImages` method.
+message ListReferenceImagesRequest {
+ // Resource name of the product containing the reference images.
+ //
+ // Format is
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ string parent = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // A token identifying a page of results to be returned. This is the value
+ // of `nextPageToken` returned in a previous reference image list request.
+ //
+ // Defaults to the first page if not specified.
+ string page_token = 3;
+}
+
+// Response message for the `ListReferenceImages` method.
+message ListReferenceImagesResponse {
+ // The list of reference images.
+ repeated ReferenceImage reference_images = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string next_page_token = 3;
+}
+
+// Request message for the `GetReferenceImage` method.
+message GetReferenceImageRequest {
+ // The resource name of the ReferenceImage to get.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ string name = 1;
+}
+
+// Request message for the `DeleteReferenceImage` method.
+message DeleteReferenceImageRequest {
+ // The resource name of the reference image to delete.
+ //
+ // Format is:
+ //
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`
+ string name = 1;
+}
+
+// Request message for the `AddProductToProductSet` method.
+message AddProductToProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be added to this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `RemoveProductFromProductSet` method.
+message RemoveProductFromProductSetRequest {
+ // The resource name for the ProductSet to modify.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The resource name for the Product to be removed from this ProductSet.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ string product = 2;
+}
+
+// Request message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetRequest {
+ // The ProductSet resource for which to retrieve Products.
+ //
+ // Format is:
+ // `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ string name = 1;
+
+ // The maximum number of items to return. Default 10, maximum 100.
+ int32 page_size = 2;
+
+ // The next_page_token returned from a previous List request, if any.
+ string page_token = 3;
+}
+
+// Response message for the `ListProductsInProductSet` method.
+message ListProductsInProductSetResponse {
+ // The list of Products.
+ repeated Product products = 1;
+
+ // Token to retrieve the next page of results, or empty if there are no more
+ // results in the list.
+ string next_page_token = 2;
+}
+
+// The Google Cloud Storage location for a csv file which preserves a list of
+// ImportProductSetRequests in each line.
+message ImportProductSetsGcsSource {
+ // The Google Cloud Storage URI of the input csv file.
+ //
+ // The URI must start with `gs://`.
+ //
+ // The format of the input csv file should be one image per line.
+ // In each line, there are 8 columns.
+ //
+ // 1. image-uri
+ // 2. image-id
+ // 3. product-set-id
+ // 4. product-id
+ // 5. product-category
+ // 6. product-display-name
+ // 7. labels
+ // 8. bounding-poly
+ //
+ // The `image-uri`, `product-set-id`, `product-id`, and `product-category`
+ // columns are required. All other columns are optional.
+ //
+ // If the `ProductSet` or `Product` specified by the `product-set-id` and
+ // `product-id` values does not exist, then the system will create a new
+ // `ProductSet` or `Product` for the image. In this case, the
+ // `product-display-name` column refers to
+ // [display_name][google.cloud.vision.v1p4beta1.Product.display_name], the
+ // `product-category` column refers to
+ // [product_category][google.cloud.vision.v1p4beta1.Product.product_category], and the
+ // `labels` column refers to [product_labels][google.cloud.vision.v1p4beta1.Product.product_labels].
+ //
+ // The `image-id` column is optional but must be unique if provided. If it is
+ // empty, the system will automatically assign a unique id to the image.
+ //
+ // The `product-display-name` column is optional. If it is empty, the system
+ // sets the [display_name][google.cloud.vision.v1p4beta1.Product.display_name] field for the product to a
+ // space (" "). You can update the `display_name` later by using the API.
+ //
+ // If a `Product` with the specified `product-id` already exists, then the
+ // system ignores the `product-display-name`, `product-category`, and `labels`
+ // columns.
+ //
+ // The `labels` column (optional) is a line containing a list of
+ // comma-separated key-value pairs, in the following format:
+ //
+ // "key_1=value_1,key_2=value_2,...,key_n=value_n"
+ //
+ // The `bounding-poly` column (optional) identifies one region of
+ // interest from the image in the same manner as `CreateReferenceImage`. If
+ // you do not specify the `bounding-poly` column, then the system will try to
+ // detect regions of interest automatically.
+ //
+ // At most one `bounding-poly` column is allowed per line. If the image
+ // contains multiple regions of interest, add a line to the CSV file that
+ // includes the same product information, and the `bounding-poly` values for
+ // each region of interest.
+ //
+ // The `bounding-poly` column must contain an even number of comma-separated
+ // numbers, in the format "p1_x,p1_y,p2_x,p2_y,...,pn_x,pn_y". Use
+ // non-negative integers for absolute bounding polygons, and float values
+ // in [0, 1] for normalized bounding polygons.
+ //
+ // The system will resize the image if the image resolution is too
+ // large to process (larger than 20MP).
+ string csv_file_uri = 1;
+}
+
+// The input content for the `ImportProductSets` method.
+message ImportProductSetsInputConfig {
+ // The source of the input.
+ oneof source {
+ // The Google Cloud Storage location for a csv file which preserves a list
+ // of ImportProductSetRequests in each line.
+ ImportProductSetsGcsSource gcs_source = 1;
+ }
+}
+
+// Request message for the `ImportProductSets` method.
+message ImportProductSetsRequest {
+ // The project in which the ProductSets should be imported.
+ //
+ // Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ string parent = 1;
+
+ // The input content for the list of requests.
+ ImportProductSetsInputConfig input_config = 2;
+}
+
+// Response message for the `ImportProductSets` method.
+//
+// This message is returned by the
+// [google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation] method in the returned
+// [google.longrunning.Operation.response][google.longrunning.Operation.response] field.
+message ImportProductSetsResponse {
+ // The list of reference_images that are imported successfully.
+ repeated ReferenceImage reference_images = 1;
+
+ // The rpc status for each ImportProductSet request, including both successes
+ // and errors.
+ //
+ // The number of statuses here matches the number of lines in the csv file,
+ // and statuses[i] stores the success or failure status of processing the i-th
+ // line of the csv, starting from line 0.
+ repeated google.rpc.Status statuses = 2;
+}
+
+// Metadata for the batch operations such as the current state.
+//
+// This is included in the `metadata` field of the `Operation` returned by the
+// `GetOperation` call of the `google::longrunning::Operations` service.
+message BatchOperationMetadata {
+ // Enumerates the possible states that the batch request can be in.
+ enum State {
+ // Invalid.
+ STATE_UNSPECIFIED = 0;
+
+ // Request is actively being processed.
+ PROCESSING = 1;
+
+ // The request is done and at least one item has been successfully
+ // processed.
+ SUCCESSFUL = 2;
+
+ // The request is done and no item has been successfully processed.
+ FAILED = 3;
+
+ // The request is done after the longrunning.Operations.CancelOperation has
+ // been called by the user. Any records that were processed before the
+ // cancel command are output as specified in the request.
+ CANCELLED = 4;
+ }
+
+ // The current state of the batch operation.
+ State state = 1;
+
+ // The time when the batch request was submitted to the server.
+ google.protobuf.Timestamp submit_time = 2;
+
+ // The time when the batch request is finished and
+ // [google.longrunning.Operation.done][google.longrunning.Operation.done] is set to true.
+ google.protobuf.Timestamp end_time = 3;
+}
diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/text_annotation.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/text_annotation.proto
new file mode 100644
index 00000000000..095d96fa91e
--- /dev/null
+++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/text_annotation.proto
@@ -0,0 +1,261 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+import "google/cloud/vision/v1p4beta1/geometry.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "TextAnnotationProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+
+// TextAnnotation contains a structured representation of OCR extracted text.
+// The hierarchy of an OCR extracted text structure is like this:
+// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+// Each structural component, starting from Page, may further have their own
+// properties. Properties describe detected languages, breaks etc.. Please refer
+// to the [TextAnnotation.TextProperty][google.cloud.vision.v1p4beta1.TextAnnotation.TextProperty] message definition below for more
+// detail.
+message TextAnnotation {
+ // Detected language for a structural component.
+ message DetectedLanguage {
+ // The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ // information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 1;
+
+ // Confidence of detected language. Range [0, 1].
+ float confidence = 2;
+ }
+
+ // Detected start or end of a structural component.
+ message DetectedBreak {
+ // Enum to denote the type of break found. New line, space etc.
+ enum BreakType {
+ // Unknown break label type.
+ UNKNOWN = 0;
+
+ // Regular space.
+ SPACE = 1;
+
+ // Sure space (very wide).
+ SURE_SPACE = 2;
+
+ // Line-wrapping break.
+ EOL_SURE_SPACE = 3;
+
+ // End-line hyphen that is not present in text; does not co-occur with
+ // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ HYPHEN = 4;
+
+ // Line break that ends a paragraph.
+ LINE_BREAK = 5;
+ }
+
+ // Detected break type.
+ BreakType type = 1;
+
+ // True if break prepends the element.
+ bool is_prefix = 2;
+ }
+
+ // Additional information detected on the structural component.
+ message TextProperty {
+ // A list of detected languages together with confidence.
+ repeated DetectedLanguage detected_languages = 1;
+
+ // Detected start or end of a text segment.
+ DetectedBreak detected_break = 2;
+ }
+
+ // List of pages detected by OCR.
+ repeated Page pages = 1;
+
+ // UTF-8 text detected on the pages.
+ string text = 2;
+}
+
+// Detected page from OCR.
+message Page {
+ // Additional information detected on the page.
+ TextAnnotation.TextProperty property = 1;
+
+ // Page width. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 width = 2;
+
+ // Page height. For PDFs the unit is points. For images (including
+ // TIFFs) the unit is pixels.
+ int32 height = 3;
+
+ // List of blocks of text, images etc on this page.
+ repeated Block blocks = 4;
+
+ // Confidence of the OCR results on the page. Range [0, 1].
+ float confidence = 5;
+}
+
+// Logical element on the page.
+message Block {
+ // Type of a block (text, image etc) as identified by OCR.
+ enum BlockType {
+ // Unknown block type.
+ UNKNOWN = 0;
+
+ // Regular text block.
+ TEXT = 1;
+
+ // Table block.
+ TABLE = 2;
+
+ // Image block.
+ PICTURE = 3;
+
+ // Horizontal/vertical line box.
+ RULER = 4;
+
+ // Barcode block.
+ BARCODE = 5;
+ }
+
+ // Additional information detected for the block.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the block.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ //
+ // * when the text is horizontal it might look like:
+ //
+ // 0----1
+ // | |
+ // 3----2
+ //
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ //
+ // 2----3
+ // | |
+ // 1----0
+ //
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of paragraphs in this block (if this blocks is of type text).
+ repeated Paragraph paragraphs = 3;
+
+ // Detected block type (text, image etc) for this block.
+ BlockType block_type = 4;
+
+ // Confidence of the OCR results on the block. Range [0, 1].
+ float confidence = 5;
+}
+
+// Structural unit of text representing a number of words in certain order.
+message Paragraph {
+ // Additional information detected for the paragraph.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the paragraph.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of words in this paragraph.
+ repeated Word words = 3;
+
+ // Confidence of the OCR results for the paragraph. Range [0, 1].
+ float confidence = 4;
+}
+
+// A word representation.
+message Word {
+ // Additional information detected for the word.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the word.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertex order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // List of symbols in the word.
+ // The order of the symbols follows the natural reading order.
+ repeated Symbol symbols = 3;
+
+ // Confidence of the OCR results for the word. Range [0, 1].
+ float confidence = 4;
+}
+
+// A single symbol representation.
+message Symbol {
+ // Additional information detected for the symbol.
+ TextAnnotation.TextProperty property = 1;
+
+ // The bounding box for the symbol.
+ // The vertices are in the order of top-left, top-right, bottom-right,
+ // bottom-left. When a rotation of the bounding box is detected the rotation
+ // is represented as around the top-left corner as defined when the text is
+ // read in the 'natural' orientation.
+ // For example:
+ // * when the text is horizontal it might look like:
+ // 0----1
+ // | |
+ // 3----2
+ // * when it's rotated 180 degrees around the top-left corner it becomes:
+ // 2----3
+ // | |
+ // 1----0
+ // and the vertice order will still be (0, 1, 2, 3).
+ BoundingPoly bounding_box = 2;
+
+ // The actual UTF-8 representation of the symbol.
+ string text = 3;
+
+ // Confidence of the OCR results for the symbol. Range [0, 1].
+ float confidence = 4;
+}
diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/web_detection.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/web_detection.proto
new file mode 100644
index 00000000000..e3f24f52c91
--- /dev/null
+++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p4beta1/web_detection.proto
@@ -0,0 +1,108 @@
+// Copyright 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+syntax = "proto3";
+
+package google.cloud.vision.v1p4beta1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1;vision";
+option java_multiple_files = true;
+option java_outer_classname = "WebDetectionProto";
+option java_package = "com.google.cloud.vision.v1p4beta1";
+option objc_class_prefix = "GCVN";
+
+
+// Relevant information for the image from the Internet.
+message WebDetection {
+ // Entity deduced from similar images on the Internet.
+ message WebEntity {
+ // Opaque entity ID.
+ string entity_id = 1;
+
+ // Overall relevancy score for the entity.
+ // Not normalized and not comparable across different image queries.
+ float score = 2;
+
+ // Canonical description of the entity, in English.
+ string description = 3;
+ }
+
+ // Metadata for online images.
+ message WebImage {
+ // The result image URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the image.
+ float score = 2;
+ }
+
+ // Label to provide extra metadata for the web detection.
+ message WebLabel {
+ // Label for extra metadata.
+ string label = 1;
+
+ // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ // For more information, see
+ // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ string language_code = 2;
+ }
+
+ // Metadata for web pages.
+ message WebPage {
+ // The result web page URL.
+ string url = 1;
+
+ // (Deprecated) Overall relevancy score for the web page.
+ float score = 2;
+
+ // Title for the web page, may contain HTML markups.
+ string page_title = 3;
+
+ // Fully matching images on the page.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 4;
+
+ // Partial matching images on the page.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its
+ // crops.
+ repeated WebImage partial_matching_images = 5;
+ }
+
+ // Deduced entities from similar images on the Internet.
+ repeated WebEntity web_entities = 1;
+
+ // Fully matching images from the Internet.
+ // Can include resized copies of the query image.
+ repeated WebImage full_matching_images = 2;
+
+ // Partial matching images from the Internet.
+ // Those images are similar enough to share some key-point features. For
+ // example an original image will likely have partial matching for its crops.
+ repeated WebImage partial_matching_images = 3;
+
+ // Web pages containing the matching images from the Internet.
+ repeated WebPage pages_with_matching_images = 4;
+
+ // The visually similar image results.
+ repeated WebImage visually_similar_images = 6;
+
+ // The service's best guess as to the topic of the request image.
+ // Inferred from similar images on the open web.
+ repeated WebLabel best_guess_labels = 8;
+}
diff --git a/packages/google-cloud-vision/src/index.js b/packages/google-cloud-vision/src/index.js
index 1b07bbbfdc8..f70be4fdca4 100644
--- a/packages/google-cloud-vision/src/index.js
+++ b/packages/google-cloud-vision/src/index.js
@@ -24,6 +24,9 @@
/**
* @namespace google.cloud.vision.v1p3beta1
*/
+/**
+ * @namespace google.cloud.vision.v1p4beta1
+ */
/**
* @namespace google.longrunning
*/
@@ -47,6 +50,7 @@ const gapic = Object.freeze({
v1p1beta1: require('./v1p1beta1'),
v1p2beta1: require('./v1p2beta1'),
v1p3beta1: require('./v1p3beta1'),
+ v1p4beta1: require('./v1p4beta1'),
});
// Augment the SpeechClient objects with the helpers.
@@ -111,5 +115,12 @@ module.exports.v1p2beta1 = gapic.v1p2beta1;
*/
module.exports.v1p3beta1 = gapic.v1p3beta1;
+/**
+ * @type {object}
+ * @property {constructor} ImageAnnotatorClient
+ * Reference to {@link v1p4beta1.ImageAnnotatorClient}
+ */
+module.exports.v1p4beta1 = gapic.v1p4beta1;
+
// Alias `module.exports` as `module.exports.default`, for future-proofing.
module.exports.default = Object.assign({}, module.exports);
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_geometry.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_geometry.js
new file mode 100644
index 00000000000..eebf862e9cf
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_geometry.js
@@ -0,0 +1,96 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * A vertex represents a 2D point in the image.
+ * NOTE: the vertex coordinates are in the same scale as the original image.
+ *
+ * @property {number} x
+ * X coordinate.
+ *
+ * @property {number} y
+ * Y coordinate.
+ *
+ * @typedef Vertex
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Vertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/geometry.proto}
+ */
+const Vertex = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A vertex represents a 2D point in the image.
+ * NOTE: the normalized vertex coordinates are relative to the original image
+ * and range from 0 to 1.
+ *
+ * @property {number} x
+ * X coordinate.
+ *
+ * @property {number} y
+ * Y coordinate.
+ *
+ * @typedef NormalizedVertex
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.NormalizedVertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/geometry.proto}
+ */
+const NormalizedVertex = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A bounding polygon for the detected image annotation.
+ *
+ * @property {Object[]} vertices
+ * The bounding polygon vertices.
+ *
+ * This object should have the same structure as [Vertex]{@link google.cloud.vision.v1p4beta1.Vertex}
+ *
+ * @property {Object[]} normalizedVertices
+ * The bounding polygon normalized vertices.
+ *
+ * This object should have the same structure as [NormalizedVertex]{@link google.cloud.vision.v1p4beta1.NormalizedVertex}
+ *
+ * @typedef BoundingPoly
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.BoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/geometry.proto}
+ */
+const BoundingPoly = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A 3D position in the image, used primarily for Face detection landmarks.
+ * A valid Position must have both x and y coordinates.
+ * The position coordinates are in the same scale as the original image.
+ *
+ * @property {number} x
+ * X coordinate.
+ *
+ * @property {number} y
+ * Y coordinate.
+ *
+ * @property {number} z
+ * Z coordinate (or depth).
+ *
+ * @typedef Position
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Position definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/geometry.proto}
+ */
+const Position = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_image_annotator.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_image_annotator.js
new file mode 100644
index 00000000000..7faebb7bff4
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_image_annotator.js
@@ -0,0 +1,1431 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * The type of Google Cloud Vision API detection to perform, and the maximum
+ * number of results to return for that type. Multiple `Feature` objects can
+ * be specified in the `features` list.
+ *
+ * @property {number} type
+ * The feature type.
+ *
+ * The number should be among the values of [Type]{@link google.cloud.vision.v1p4beta1.Type}
+ *
+ * @property {number} maxResults
+ * Maximum number of results of this type. Does not apply to
+ * `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ *
+ * @property {string} model
+ * Model to use for the feature.
+ * Supported values: "builtin/stable" (the default if unset) and
+ * "builtin/latest".
+ *
+ * @typedef Feature
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Feature definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const Feature = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Type of Google Cloud Vision API feature to be extracted.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+ Type: {
+
+ /**
+ * Unspecified feature type.
+ */
+ TYPE_UNSPECIFIED: 0,
+
+ /**
+ * Run face detection.
+ */
+ FACE_DETECTION: 1,
+
+ /**
+ * Run landmark detection.
+ */
+ LANDMARK_DETECTION: 2,
+
+ /**
+ * Run logo detection.
+ */
+ LOGO_DETECTION: 3,
+
+ /**
+ * Run label detection.
+ */
+ LABEL_DETECTION: 4,
+
+ /**
+ * Run text detection / optical character recognition (OCR). Text detection
+ * is optimized for areas of text within a larger image; if the image is
+ * a document, use `DOCUMENT_TEXT_DETECTION` instead.
+ */
+ TEXT_DETECTION: 5,
+
+ /**
+ * Run dense text document OCR. Takes precedence when both
+ * `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
+ */
+ DOCUMENT_TEXT_DETECTION: 11,
+
+ /**
+ * Run Safe Search to detect potentially unsafe
+ * or undesirable content.
+ */
+ SAFE_SEARCH_DETECTION: 6,
+
+ /**
+ * Compute a set of image properties, such as the
+ * image's dominant colors.
+ */
+ IMAGE_PROPERTIES: 7,
+
+ /**
+ * Run crop hints.
+ */
+ CROP_HINTS: 9,
+
+ /**
+ * Run web detection.
+ */
+ WEB_DETECTION: 10,
+
+ /**
+ * Run Product Search.
+ */
+ PRODUCT_SEARCH: 12,
+
+ /**
+ * Run localizer for object detection.
+ */
+ OBJECT_LOCALIZATION: 19
+ }
+};
+
+/**
+ * External image source (Google Cloud Storage or web URL image location).
+ *
+ * @property {string} gcsImageUri
+ * **Use `image_uri` instead.**
+ *
+ * The Google Cloud Storage URI of the form
+ * `gs://bucket_name/object_name`. Object versioning is not supported. See
+ * [Google Cloud Storage Request
+ * URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+ *
+ * @property {string} imageUri
+ * The URI of the source image. Can be either:
+ *
+ * 1. A Google Cloud Storage URI of the form
+ * `gs://bucket_name/object_name`. Object versioning is not supported. See
+ * [Google Cloud Storage Request
+ * URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+ * info.
+ *
+ * 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+ * HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+ * completed. Your request may fail if the specified host denies the
+ * request (e.g. due to request throttling or DOS prevention), or if Google
+ * throttles requests to the site for abuse prevention. You should not
+ * depend on externally-hosted images for production applications.
+ *
+ * When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+ * precedence.
+ *
+ * @typedef ImageSource
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImageSource definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const ImageSource = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Client image to perform Google Cloud Vision API tasks over.
+ *
+ * @property {string} content
+ * Image content, represented as a stream of bytes.
+ * Note: As with all `bytes` fields, protobuffers use a pure binary
+ * representation, whereas JSON representations use base64.
+ *
+ * @property {Object} source
+ * Google Cloud Storage image location, or publicly-accessible image
+ * URL. If both `content` and `source` are provided for an image, `content`
+ * takes precedence and is used to perform the image annotation request.
+ *
+ * This object should have the same structure as [ImageSource]{@link google.cloud.vision.v1p4beta1.ImageSource}
+ *
+ * @typedef Image
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Image definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const Image = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A face annotation object contains the results of face detection.
+ *
+ * @property {Object} boundingPoly
+ * The bounding polygon around the face. The coordinates of the bounding box
+ * are in the original image's scale.
+ * The bounding box is computed to "frame" the face in accordance with human
+ * expectations. It is based on the landmarker results.
+ * Note that one or more x and/or y coordinates may not be generated in the
+ * `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ * appears in the image to be annotated.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object} fdBoundingPoly
+ * The `fd_bounding_poly` bounding polygon is tighter than the
+ * `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ * is used to eliminate the face from any image analysis that detects the
+ * "amount of skin" visible in an image. It is not based on the
+ * landmarker results, only on the initial face detection, hence
+ * the fd
(face detection) prefix.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object[]} landmarks
+ * Detected face landmarks.
+ *
+ * This object should have the same structure as [Landmark]{@link google.cloud.vision.v1p4beta1.Landmark}
+ *
+ * @property {number} rollAngle
+ * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ * of the face relative to the image vertical about the axis perpendicular to
+ * the face. Range [-180,180].
+ *
+ * @property {number} panAngle
+ * Yaw angle, which indicates the leftward/rightward angle that the face is
+ * pointing relative to the vertical plane perpendicular to the image. Range
+ * [-180,180].
+ *
+ * @property {number} tiltAngle
+ * Pitch angle, which indicates the upwards/downwards angle that the face is
+ * pointing relative to the image's horizontal plane. Range [-180,180].
+ *
+ * @property {number} detectionConfidence
+ * Detection confidence. Range [0, 1].
+ *
+ * @property {number} landmarkingConfidence
+ * Face landmarking confidence. Range [0, 1].
+ *
+ * @property {number} joyLikelihood
+ * Joy likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} sorrowLikelihood
+ * Sorrow likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} angerLikelihood
+ * Anger likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} surpriseLikelihood
+ * Surprise likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} underExposedLikelihood
+ * Under-exposed likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} blurredLikelihood
+ * Blurred likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} headwearLikelihood
+ * Headwear likelihood.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @typedef FaceAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const FaceAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * A face-specific landmark (for example, a face feature).
+ *
+ * @property {number} type
+ * Face landmark type.
+ *
+ * The number should be among the values of [Type]{@link google.cloud.vision.v1p4beta1.Type}
+ *
+ * @property {Object} position
+ * Face landmark position.
+ *
+ * This object should have the same structure as [Position]{@link google.cloud.vision.v1p4beta1.Position}
+ *
+ * @typedef Landmark
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.FaceAnnotation.Landmark definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+ Landmark: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Face landmark (feature) type.
+ * Left and right are defined from the vantage of the viewer of the image
+ * without considering mirror projections typical of photos. So, `LEFT_EYE`,
+ * typically, is the person's right eye.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+ Type: {
+
+ /**
+ * Unknown face landmark detected. Should not be filled.
+ */
+ UNKNOWN_LANDMARK: 0,
+
+ /**
+ * Left eye.
+ */
+ LEFT_EYE: 1,
+
+ /**
+ * Right eye.
+ */
+ RIGHT_EYE: 2,
+
+ /**
+ * Left of left eyebrow.
+ */
+ LEFT_OF_LEFT_EYEBROW: 3,
+
+ /**
+ * Right of left eyebrow.
+ */
+ RIGHT_OF_LEFT_EYEBROW: 4,
+
+ /**
+ * Left of right eyebrow.
+ */
+ LEFT_OF_RIGHT_EYEBROW: 5,
+
+ /**
+ * Right of right eyebrow.
+ */
+ RIGHT_OF_RIGHT_EYEBROW: 6,
+
+ /**
+ * Midpoint between eyes.
+ */
+ MIDPOINT_BETWEEN_EYES: 7,
+
+ /**
+ * Nose tip.
+ */
+ NOSE_TIP: 8,
+
+ /**
+ * Upper lip.
+ */
+ UPPER_LIP: 9,
+
+ /**
+ * Lower lip.
+ */
+ LOWER_LIP: 10,
+
+ /**
+ * Mouth left.
+ */
+ MOUTH_LEFT: 11,
+
+ /**
+ * Mouth right.
+ */
+ MOUTH_RIGHT: 12,
+
+ /**
+ * Mouth center.
+ */
+ MOUTH_CENTER: 13,
+
+ /**
+ * Nose, bottom right.
+ */
+ NOSE_BOTTOM_RIGHT: 14,
+
+ /**
+ * Nose, bottom left.
+ */
+ NOSE_BOTTOM_LEFT: 15,
+
+ /**
+ * Nose, bottom center.
+ */
+ NOSE_BOTTOM_CENTER: 16,
+
+ /**
+ * Left eye, top boundary.
+ */
+ LEFT_EYE_TOP_BOUNDARY: 17,
+
+ /**
+ * Left eye, right corner.
+ */
+ LEFT_EYE_RIGHT_CORNER: 18,
+
+ /**
+ * Left eye, bottom boundary.
+ */
+ LEFT_EYE_BOTTOM_BOUNDARY: 19,
+
+ /**
+ * Left eye, left corner.
+ */
+ LEFT_EYE_LEFT_CORNER: 20,
+
+ /**
+ * Right eye, top boundary.
+ */
+ RIGHT_EYE_TOP_BOUNDARY: 21,
+
+ /**
+ * Right eye, right corner.
+ */
+ RIGHT_EYE_RIGHT_CORNER: 22,
+
+ /**
+ * Right eye, bottom boundary.
+ */
+ RIGHT_EYE_BOTTOM_BOUNDARY: 23,
+
+ /**
+ * Right eye, left corner.
+ */
+ RIGHT_EYE_LEFT_CORNER: 24,
+
+ /**
+ * Left eyebrow, upper midpoint.
+ */
+ LEFT_EYEBROW_UPPER_MIDPOINT: 25,
+
+ /**
+ * Right eyebrow, upper midpoint.
+ */
+ RIGHT_EYEBROW_UPPER_MIDPOINT: 26,
+
+ /**
+ * Left ear tragion.
+ */
+ LEFT_EAR_TRAGION: 27,
+
+ /**
+ * Right ear tragion.
+ */
+ RIGHT_EAR_TRAGION: 28,
+
+ /**
+ * Left eye pupil.
+ */
+ LEFT_EYE_PUPIL: 29,
+
+ /**
+ * Right eye pupil.
+ */
+ RIGHT_EYE_PUPIL: 30,
+
+ /**
+ * Forehead glabella.
+ */
+ FOREHEAD_GLABELLA: 31,
+
+ /**
+ * Chin gnathion.
+ */
+ CHIN_GNATHION: 32,
+
+ /**
+ * Chin left gonion.
+ */
+ CHIN_LEFT_GONION: 33,
+
+ /**
+ * Chin right gonion.
+ */
+ CHIN_RIGHT_GONION: 34
+ }
+ }
+};
+
+/**
+ * Detected entity location information.
+ *
+ * @property {Object} latLng
+ * lat/long location coordinates.
+ *
+ * This object should have the same structure as [LatLng]{@link google.type.LatLng}
+ *
+ * @typedef LocationInfo
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.LocationInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const LocationInfo = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A `Property` consists of a user-supplied name/value pair.
+ *
+ * @property {string} name
+ * Name of the property.
+ *
+ * @property {string} value
+ * Value of the property.
+ *
+ * @property {number} uint64Value
+ * Value of numeric properties.
+ *
+ * @typedef Property
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Property definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const Property = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Set of detected entity features.
+ *
+ * @property {string} mid
+ * Opaque entity ID. Some IDs may be available in
+ * [Google Knowledge Graph Search
+ * API](https://developers.google.com/knowledge-graph/).
+ *
+ * @property {string} locale
+ * The language code for the locale in which the entity textual
+ * `description` is expressed.
+ *
+ * @property {string} description
+ * Entity textual description, expressed in its `locale` language.
+ *
+ * @property {number} score
+ * Overall score of the result. Range [0, 1].
+ *
+ * @property {number} confidence
+ * **Deprecated. Use `score` instead.**
+ * The accuracy of the entity detection in an image.
+ * For example, for an image in which the "Eiffel Tower" entity is detected,
+ * this field represents the confidence that there is a tower in the query
+ * image. Range [0, 1].
+ *
+ * @property {number} topicality
+ * The relevancy of the ICA (Image Content Annotation) label to the
+ * image. For example, the relevancy of "tower" is likely higher to an image
+ * containing the detected "Eiffel Tower" than to an image containing a
+ * detected distant towering building, even though the confidence that
+ * there is a tower in each image may be the same. Range [0, 1].
+ *
+ * @property {Object} boundingPoly
+ * Image region to which this entity belongs. Not produced
+ * for `LABEL_DETECTION` features.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object[]} locations
+ * The location information for the detected entity. Multiple
+ * `LocationInfo` elements can be present because one location may
+ * indicate the location of the scene in the image, and another location
+ * may indicate the location of the place where the image was taken.
+ * Location information is usually present for landmarks.
+ *
+ * This object should have the same structure as [LocationInfo]{@link google.cloud.vision.v1p4beta1.LocationInfo}
+ *
+ * @property {Object[]} properties
+ * Some entities may have optional user-supplied `Property` (name/value)
+ * fields, such a score or string that qualifies the entity.
+ *
+ * This object should have the same structure as [Property]{@link google.cloud.vision.v1p4beta1.Property}
+ *
+ * @typedef EntityAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.EntityAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const EntityAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Set of detected objects with bounding boxes.
+ *
+ * @property {string} mid
+ * Object ID that should align with EntityAnnotation mid.
+ *
+ * @property {string} languageCode
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ * information, see
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ *
+ * @property {string} name
+ * Object name, expressed in its `language_code` language.
+ *
+ * @property {number} score
+ * Score of the result. Range [0, 1].
+ *
+ * @property {Object} boundingPoly
+ * Image region to which this object belongs. This must be populated.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @typedef LocalizedObjectAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.LocalizedObjectAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const LocalizedObjectAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Set of features pertaining to the image, computed by computer vision
+ * methods over safe-search verticals (for example, adult, spoof, medical,
+ * violence).
+ *
+ * @property {number} adult
+ * Represents the adult content likelihood for the image. Adult content may
+ * contain elements such as nudity, pornographic images or cartoons, or
+ * sexual activities.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} spoof
+ * Spoof likelihood. The likelihood that an modification
+ * was made to the image's canonical version to make it appear
+ * funny or offensive.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} medical
+ * Likelihood that this is a medical image.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} violence
+ * Likelihood that this image contains violent content.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @property {number} racy
+ * Likelihood that the request image contains racy content. Racy content may
+ * include (but is not limited to) skimpy or sheer clothing, strategically
+ * covered nudity, lewd or provocative poses, or close-ups of sensitive
+ * body areas.
+ *
+ * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p4beta1.Likelihood}
+ *
+ * @typedef SafeSearchAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.SafeSearchAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const SafeSearchAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Rectangle determined by min and max `LatLng` pairs.
+ *
+ * @property {Object} minLatLng
+ * Min lat/long pair.
+ *
+ * This object should have the same structure as [LatLng]{@link google.type.LatLng}
+ *
+ * @property {Object} maxLatLng
+ * Max lat/long pair.
+ *
+ * This object should have the same structure as [LatLng]{@link google.type.LatLng}
+ *
+ * @typedef LatLongRect
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.LatLongRect definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const LatLongRect = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Color information consists of RGB channels, score, and the fraction of
+ * the image that the color occupies in the image.
+ *
+ * @property {Object} color
+ * RGB components of the color.
+ *
+ * This object should have the same structure as [Color]{@link google.type.Color}
+ *
+ * @property {number} score
+ * Image-specific score for this color. Value in range [0, 1].
+ *
+ * @property {number} pixelFraction
+ * The fraction of pixels the color occupies in the image.
+ * Value in range [0, 1].
+ *
+ * @typedef ColorInfo
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ColorInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const ColorInfo = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Set of dominant colors and their corresponding scores.
+ *
+ * @property {Object[]} colors
+ * RGB color values with their score and pixel fraction.
+ *
+ * This object should have the same structure as [ColorInfo]{@link google.cloud.vision.v1p4beta1.ColorInfo}
+ *
+ * @typedef DominantColorsAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.DominantColorsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const DominantColorsAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Stores image properties, such as dominant colors.
+ *
+ * @property {Object} dominantColors
+ * If present, dominant colors completed successfully.
+ *
+ * This object should have the same structure as [DominantColorsAnnotation]{@link google.cloud.vision.v1p4beta1.DominantColorsAnnotation}
+ *
+ * @typedef ImageProperties
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImageProperties definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const ImageProperties = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Single crop hint that is used to generate a new crop when serving an image.
+ *
+ * @property {Object} boundingPoly
+ * The bounding polygon for the crop region. The coordinates of the bounding
+ * box are in the original image's scale.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {number} confidence
+ * Confidence of this being a salient region. Range [0, 1].
+ *
+ * @property {number} importanceFraction
+ * Fraction of importance of this salient region with respect to the original
+ * image.
+ *
+ * @typedef CropHint
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.CropHint definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const CropHint = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Set of crop hints that are used to generate new crops when serving images.
+ *
+ * @property {Object[]} cropHints
+ * Crop hint results.
+ *
+ * This object should have the same structure as [CropHint]{@link google.cloud.vision.v1p4beta1.CropHint}
+ *
+ * @typedef CropHintsAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.CropHintsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const CropHintsAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Parameters for crop hints annotation request.
+ *
+ * @property {number[]} aspectRatios
+ * Aspect ratios in floats, representing the ratio of the width to the height
+ * of the image. For example, if the desired aspect ratio is 4/3, the
+ * corresponding float value should be 1.33333. If not specified, the
+ * best possible crop is returned. The number of provided aspect ratios is
+ * limited to a maximum of 16; any aspect ratios provided after the 16th are
+ * ignored.
+ *
+ * @typedef CropHintsParams
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.CropHintsParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const CropHintsParams = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Parameters for web detection request.
+ *
+ * @property {boolean} includeGeoResults
+ * Whether to include results derived from the geo information in the image.
+ *
+ * @typedef WebDetectionParams
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.WebDetectionParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const WebDetectionParams = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Image context and/or feature-specific parameters.
+ *
+ * @property {Object} latLongRect
+ * Not used.
+ *
+ * This object should have the same structure as [LatLongRect]{@link google.cloud.vision.v1p4beta1.LatLongRect}
+ *
+ * @property {string[]} languageHints
+ * List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ * yields the best results since it enables automatic language detection. For
+ * languages based on the Latin alphabet, setting `language_hints` is not
+ * needed. In rare cases, when the language of the text in the image is known,
+ * setting a hint will help get better results (although it will be a
+ * significant hindrance if the hint is wrong). Text detection returns an
+ * error if one or more of the specified languages is not one of the
+ * [supported languages](https://cloud.google.com/vision/docs/languages).
+ *
+ * @property {Object} cropHintsParams
+ * Parameters for crop hints annotation request.
+ *
+ * This object should have the same structure as [CropHintsParams]{@link google.cloud.vision.v1p4beta1.CropHintsParams}
+ *
+ * @property {Object} productSearchParams
+ * Parameters for product search.
+ *
+ * This object should have the same structure as [ProductSearchParams]{@link google.cloud.vision.v1p4beta1.ProductSearchParams}
+ *
+ * @property {Object} webDetectionParams
+ * Parameters for web detection.
+ *
+ * This object should have the same structure as [WebDetectionParams]{@link google.cloud.vision.v1p4beta1.WebDetectionParams}
+ *
+ * @typedef ImageContext
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImageContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const ImageContext = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request for performing Google Cloud Vision API tasks over a user-provided
+ * image, with user-requested features, and with context information.
+ *
+ * @property {Object} image
+ * The image to be processed.
+ *
+ * This object should have the same structure as [Image]{@link google.cloud.vision.v1p4beta1.Image}
+ *
+ * @property {Object[]} features
+ * Requested features.
+ *
+ * This object should have the same structure as [Feature]{@link google.cloud.vision.v1p4beta1.Feature}
+ *
+ * @property {Object} imageContext
+ * Additional context that may accompany the image.
+ *
+ * This object should have the same structure as [ImageContext]{@link google.cloud.vision.v1p4beta1.ImageContext}
+ *
+ * @typedef AnnotateImageRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AnnotateImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AnnotateImageRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * If an image was produced from a file (e.g. a PDF), this message gives
+ * information about the source of that image.
+ *
+ * @property {string} uri
+ * The URI of the file used to produce the image.
+ *
+ * @property {number} pageNumber
+ * If the file was a PDF or TIFF, this field gives the page number within
+ * the file used to produce the image.
+ *
+ * @typedef ImageAnnotationContext
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImageAnnotationContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const ImageAnnotationContext = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response to an image annotation request.
+ *
+ * @property {Object[]} faceAnnotations
+ * If present, face detection has completed successfully.
+ *
+ * This object should have the same structure as [FaceAnnotation]{@link google.cloud.vision.v1p4beta1.FaceAnnotation}
+ *
+ * @property {Object[]} landmarkAnnotations
+ * If present, landmark detection has completed successfully.
+ *
+ * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p4beta1.EntityAnnotation}
+ *
+ * @property {Object[]} logoAnnotations
+ * If present, logo detection has completed successfully.
+ *
+ * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p4beta1.EntityAnnotation}
+ *
+ * @property {Object[]} labelAnnotations
+ * If present, label detection has completed successfully.
+ *
+ * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p4beta1.EntityAnnotation}
+ *
+ * @property {Object[]} localizedObjectAnnotations
+ * If present, localized object detection has completed successfully.
+ * This will be sorted descending by confidence score.
+ *
+ * This object should have the same structure as [LocalizedObjectAnnotation]{@link google.cloud.vision.v1p4beta1.LocalizedObjectAnnotation}
+ *
+ * @property {Object[]} textAnnotations
+ * If present, text (OCR) detection has completed successfully.
+ *
+ * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p4beta1.EntityAnnotation}
+ *
+ * @property {Object} fullTextAnnotation
+ * If present, text (OCR) detection or document (OCR) text detection has
+ * completed successfully.
+ * This annotation provides the structural hierarchy for the OCR detected
+ * text.
+ *
+ * This object should have the same structure as [TextAnnotation]{@link google.cloud.vision.v1p4beta1.TextAnnotation}
+ *
+ * @property {Object} safeSearchAnnotation
+ * If present, safe-search annotation has completed successfully.
+ *
+ * This object should have the same structure as [SafeSearchAnnotation]{@link google.cloud.vision.v1p4beta1.SafeSearchAnnotation}
+ *
+ * @property {Object} imagePropertiesAnnotation
+ * If present, image properties were extracted successfully.
+ *
+ * This object should have the same structure as [ImageProperties]{@link google.cloud.vision.v1p4beta1.ImageProperties}
+ *
+ * @property {Object} cropHintsAnnotation
+ * If present, crop hints have completed successfully.
+ *
+ * This object should have the same structure as [CropHintsAnnotation]{@link google.cloud.vision.v1p4beta1.CropHintsAnnotation}
+ *
+ * @property {Object} webDetection
+ * If present, web detection has completed successfully.
+ *
+ * This object should have the same structure as [WebDetection]{@link google.cloud.vision.v1p4beta1.WebDetection}
+ *
+ * @property {Object} productSearchResults
+ * If present, product search has completed successfully.
+ *
+ * This object should have the same structure as [ProductSearchResults]{@link google.cloud.vision.v1p4beta1.ProductSearchResults}
+ *
+ * @property {Object} error
+ * If set, represents the error message for the operation.
+ * Note that filled-in image annotations are guaranteed to be
+ * correct, even when `error` is set.
+ *
+ * This object should have the same structure as [Status]{@link google.rpc.Status}
+ *
+ * @property {Object} context
+ * If present, contextual information is needed to understand where this image
+ * comes from.
+ *
+ * This object should have the same structure as [ImageAnnotationContext]{@link google.cloud.vision.v1p4beta1.ImageAnnotationContext}
+ *
+ * @typedef AnnotateImageResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AnnotateImageResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AnnotateImageResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response to a single file annotation request. A file may contain one or more
+ * images, which individually have their own responses.
+ *
+ * @property {Object} inputConfig
+ * Information about the file for which this response is generated.
+ *
+ * This object should have the same structure as [InputConfig]{@link google.cloud.vision.v1p4beta1.InputConfig}
+ *
+ * @property {Object[]} responses
+ * Individual responses to images found within the file.
+ *
+ * This object should have the same structure as [AnnotateImageResponse]{@link google.cloud.vision.v1p4beta1.AnnotateImageResponse}
+ *
+ * @property {number} totalPages
+ * This field gives the total number of pages in the file.
+ *
+ * @typedef AnnotateFileResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AnnotateFileResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AnnotateFileResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Multiple image annotation requests are batched into a single service call.
+ *
+ * @property {Object[]} requests
+ * Individual image annotation requests for this batch.
+ *
+ * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1p4beta1.AnnotateImageRequest}
+ *
+ * @typedef BatchAnnotateImagesRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.BatchAnnotateImagesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const BatchAnnotateImagesRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response to a batch image annotation request.
+ *
+ * @property {Object[]} responses
+ * Individual responses to image annotation requests within the batch.
+ *
+ * This object should have the same structure as [AnnotateImageResponse]{@link google.cloud.vision.v1p4beta1.AnnotateImageResponse}
+ *
+ * @typedef BatchAnnotateImagesResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const BatchAnnotateImagesResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
+ *
+ * @property {Object} inputConfig
+ * Required. Information about the input file.
+ *
+ * This object should have the same structure as [InputConfig]{@link google.cloud.vision.v1p4beta1.InputConfig}
+ *
+ * @property {Object[]} features
+ * Required. Requested features.
+ *
+ * This object should have the same structure as [Feature]{@link google.cloud.vision.v1p4beta1.Feature}
+ *
+ * @property {Object} imageContext
+ * Additional context that may accompany the image(s) in the file.
+ *
+ * This object should have the same structure as [ImageContext]{@link google.cloud.vision.v1p4beta1.ImageContext}
+ *
+ * @property {number[]} pages
+ * Pages of the file to perform image annotation.
+ *
+ * Pages starts from 1, we assume the first page of the file is page 1.
+ * At most 5 pages are supported per request. Pages can be negative.
+ *
+ * Page 1 means the first page.
+ * Page 2 means the second page.
+ * Page -1 means the last page.
+ * Page -2 means the second to the last page.
+ *
+ * If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
+ *
+ * If this field is empty, by default the service performs image annotation
+ * for the first 5 pages of the file.
+ *
+ * @typedef AnnotateFileRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AnnotateFileRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AnnotateFileRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A list of requests to annotate files using the BatchAnnotateFiles API.
+ *
+ * @property {Object[]} requests
+ * The list of file annotation requests. Right now we support only one
+ * AnnotateFileRequest in BatchAnnotateFilesRequest.
+ *
+ * This object should have the same structure as [AnnotateFileRequest]{@link google.cloud.vision.v1p4beta1.AnnotateFileRequest}
+ *
+ * @typedef BatchAnnotateFilesRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.BatchAnnotateFilesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const BatchAnnotateFilesRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A list of file annotation responses.
+ *
+ * @property {Object[]} responses
+ * The list of file annotation responses, each response corresponding to each
+ * AnnotateFileRequest in BatchAnnotateFilesRequest.
+ *
+ * This object should have the same structure as [AnnotateFileResponse]{@link google.cloud.vision.v1p4beta1.AnnotateFileResponse}
+ *
+ * @typedef BatchAnnotateFilesResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.BatchAnnotateFilesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const BatchAnnotateFilesResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * An offline file annotation request.
+ *
+ * @property {Object} inputConfig
+ * Required. Information about the input file.
+ *
+ * This object should have the same structure as [InputConfig]{@link google.cloud.vision.v1p4beta1.InputConfig}
+ *
+ * @property {Object[]} features
+ * Required. Requested features.
+ *
+ * This object should have the same structure as [Feature]{@link google.cloud.vision.v1p4beta1.Feature}
+ *
+ * @property {Object} imageContext
+ * Additional context that may accompany the image(s) in the file.
+ *
+ * This object should have the same structure as [ImageContext]{@link google.cloud.vision.v1p4beta1.ImageContext}
+ *
+ * @property {Object} outputConfig
+ * Required. The desired output location and metadata (e.g. format).
+ *
+ * This object should have the same structure as [OutputConfig]{@link google.cloud.vision.v1p4beta1.OutputConfig}
+ *
+ * @typedef AsyncAnnotateFileRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AsyncAnnotateFileRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AsyncAnnotateFileRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The response for a single offline file annotation request.
+ *
+ * @property {Object} outputConfig
+ * The output location and metadata from AsyncAnnotateFileRequest.
+ *
+ * This object should have the same structure as [OutputConfig]{@link google.cloud.vision.v1p4beta1.OutputConfig}
+ *
+ * @typedef AsyncAnnotateFileResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AsyncAnnotateFileResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AsyncAnnotateFileResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request for async image annotation for a list of images.
+ *
+ * @property {Object[]} requests
+ * Individual image annotation requests for this batch.
+ *
+ * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1p4beta1.AnnotateImageRequest}
+ *
+ * @property {Object} outputConfig
+ * Required. The desired output location and metadata (e.g. format).
+ *
+ * This object should have the same structure as [OutputConfig]{@link google.cloud.vision.v1p4beta1.OutputConfig}
+ *
+ * @typedef AsyncBatchAnnotateImagesRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AsyncBatchAnnotateImagesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AsyncBatchAnnotateImagesRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response to an async batch image annotation request.
+ *
+ * @property {Object} outputConfig
+ * The output location and metadata from AsyncBatchAnnotateImagesRequest.
+ *
+ * This object should have the same structure as [OutputConfig]{@link google.cloud.vision.v1p4beta1.OutputConfig}
+ *
+ * @typedef AsyncBatchAnnotateImagesResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AsyncBatchAnnotateImagesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AsyncBatchAnnotateImagesResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Multiple async file annotation requests are batched into a single service
+ * call.
+ *
+ * @property {Object[]} requests
+ * Individual async file annotation requests for this batch.
+ *
+ * This object should have the same structure as [AsyncAnnotateFileRequest]{@link google.cloud.vision.v1p4beta1.AsyncAnnotateFileRequest}
+ *
+ * @typedef AsyncBatchAnnotateFilesRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AsyncBatchAnnotateFilesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AsyncBatchAnnotateFilesRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response to an async batch file annotation request.
+ *
+ * @property {Object[]} responses
+ * The list of file annotation responses, one for each request in
+ * AsyncBatchAnnotateFilesRequest.
+ *
+ * This object should have the same structure as [AsyncAnnotateFileResponse]{@link google.cloud.vision.v1p4beta1.AsyncAnnotateFileResponse}
+ *
+ * @typedef AsyncBatchAnnotateFilesResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AsyncBatchAnnotateFilesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const AsyncBatchAnnotateFilesResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The desired input location and metadata.
+ *
+ * @property {Object} gcsSource
+ * The Google Cloud Storage location to read the input from.
+ *
+ * This object should have the same structure as [GcsSource]{@link google.cloud.vision.v1p4beta1.GcsSource}
+ *
+ * @property {string} content
+ * File content, represented as a stream of bytes.
+ * Note: As with all `bytes` fields, protobuffers use a pure binary
+ * representation, whereas JSON representations use base64.
+ *
+ * Currently, this field only works for BatchAnnotateFiles requests. It does
+ * not work for AsyncBatchAnnotateFiles requests.
+ *
+ * @property {string} mimeType
+ * The type of the file. Currently only "application/pdf" and "image/tiff"
+ * are supported. Wildcards are not supported.
+ *
+ * @typedef InputConfig
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.InputConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const InputConfig = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The desired output location and metadata.
+ *
+ * @property {Object} gcsDestination
+ * The Google Cloud Storage location to write the output(s) to.
+ *
+ * This object should have the same structure as [GcsDestination]{@link google.cloud.vision.v1p4beta1.GcsDestination}
+ *
+ * @property {number} batchSize
+ * The max number of response protos to put into each output JSON file on
+ * Google Cloud Storage.
+ * The valid range is [1, 100]. If not specified, the default value is 20.
+ *
+ * For example, for one pdf file with 100 pages, 100 response protos will
+ * be generated. If `batch_size` = 20, then 5 json files each
+ * containing 20 response protos will be written under the prefix
+ * `gcs_destination`.`uri`.
+ *
+ * Currently, batch_size only applies to GcsDestination, with potential future
+ * support for other output configurations.
+ *
+ * @typedef OutputConfig
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.OutputConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const OutputConfig = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The Google Cloud Storage location where the input will be read from.
+ *
+ * @property {string} uri
+ * Google Cloud Storage URI for the input file. This must only be a
+ * Google Cloud Storage object. Wildcards are not currently supported.
+ *
+ * @typedef GcsSource
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.GcsSource definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const GcsSource = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The Google Cloud Storage location where the output will be written to.
+ *
+ * @property {string} uri
+ * Google Cloud Storage URI where the results will be stored. Results will
+ * be in JSON format and preceded by its corresponding input URI. This field
+ * can either represent a single file, or a prefix for multiple outputs.
+ * Prefixes must end in a `/`.
+ *
+ * Examples:
+ *
+ * * File: gs://bucket-name/filename.json
+ * * Prefix: gs://bucket-name/prefix/here/
+ * * File: gs://bucket-name/prefix/here
+ *
+ * If multiple outputs, each response is still AnnotateFileResponse, each of
+ * which contains some subset of the full list of AnnotateImageResponse.
+ * Multiple outputs can happen if, for example, the output JSON is too large
+ * and overflows into multiple sharded files.
+ *
+ * @typedef GcsDestination
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.GcsDestination definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const GcsDestination = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Contains metadata for the BatchAnnotateImages operation.
+ *
+ * @property {number} state
+ * Current state of the batch operation.
+ *
+ * The number should be among the values of [State]{@link google.cloud.vision.v1p4beta1.State}
+ *
+ * @property {Object} createTime
+ * The time when the batch request was received.
+ *
+ * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
+ *
+ * @property {Object} updateTime
+ * The time when the operation result was last updated.
+ *
+ * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
+ *
+ * @typedef OperationMetadata
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.OperationMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/image_annotator.proto}
+ */
+const OperationMetadata = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Batch operation states.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+ State: {
+
+ /**
+ * Invalid.
+ */
+ STATE_UNSPECIFIED: 0,
+
+ /**
+ * Request is received.
+ */
+ CREATED: 1,
+
+ /**
+ * Request is actively being processed.
+ */
+ RUNNING: 2,
+
+ /**
+ * The batch processing is done.
+ */
+ DONE: 3,
+
+ /**
+ * The batch processing was cancelled.
+ */
+ CANCELLED: 4
+ }
+};
+
+/**
+ * A bucketized representation of likelihood, which is intended to give clients
+ * highly stable results across model upgrades.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+const Likelihood = {
+
+ /**
+ * Unknown likelihood.
+ */
+ UNKNOWN: 0,
+
+ /**
+ * It is very unlikely that the image belongs to the specified vertical.
+ */
+ VERY_UNLIKELY: 1,
+
+ /**
+ * It is unlikely that the image belongs to the specified vertical.
+ */
+ UNLIKELY: 2,
+
+ /**
+ * It is possible that the image belongs to the specified vertical.
+ */
+ POSSIBLE: 3,
+
+ /**
+ * It is likely that the image belongs to the specified vertical.
+ */
+ LIKELY: 4,
+
+ /**
+ * It is very likely that the image belongs to the specified vertical.
+ */
+ VERY_LIKELY: 5
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_product_search.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_product_search.js
new file mode 100644
index 00000000000..05f05474e0d
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_product_search.js
@@ -0,0 +1,128 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * Parameters for a product search request.
+ *
+ * @property {Object} boundingPoly
+ * The bounding polygon around the area of interest in the image.
+ * Optional. If it is not specified, system discretion will be applied.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {string} productSet
+ * The resource name of a ProductSet to be searched for similar images.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ *
+ * @property {string[]} productCategories
+ * The list of product categories to search in. Currently, we only consider
+ * the first category, and either "homegoods", "apparel", or "toys" should be
+ * specified.
+ *
+ * @property {string} filter
+ * The filtering expression. This can be used to restrict search results based
+ * on Product labels. We currently support an AND of OR of key-value
+ * expressions, where each expression within an OR must have the same key.
+ *
+ * For example, "(color = red OR color = blue) AND brand = Google" is
+ * acceptable, but not "(color = red OR brand = Google)" or "color: red".
+ *
+ * @typedef ProductSearchParams
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ProductSearchParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search.proto}
+ */
+const ProductSearchParams = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Results for a product search request.
+ *
+ * @property {Object} indexTime
+ * Timestamp of the index which provided these results. Changes made after
+ * this time are not reflected in the current results.
+ *
+ * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
+ *
+ * @property {Object[]} results
+ * List of results, one for each product match.
+ *
+ * This object should have the same structure as [Result]{@link google.cloud.vision.v1p4beta1.Result}
+ *
+ * @property {Object[]} productGroupedResults
+ * List of results grouped by products detected in the query image. Each entry
+ * corresponds to one bounding polygon in the query image, and contains the
+ * matching products specific to that region. There may be duplicate product
+ * matches in the union of all the per-product results.
+ *
+ * This object should have the same structure as [GroupedResult]{@link google.cloud.vision.v1p4beta1.GroupedResult}
+ *
+ * @typedef ProductSearchResults
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ProductSearchResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search.proto}
+ */
+const ProductSearchResults = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Information about a product.
+ *
+ * @property {Object} product
+ * The Product.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ *
+ * @property {number} score
+ * A confidence level on the match, ranging from 0 (no confidence) to
+ * 1 (full confidence).
+ *
+ * @property {string} image
+ * The resource name of the image from the product that is the closest match
+ * to the query.
+ *
+ * @typedef Result
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ProductSearchResults.Result definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search.proto}
+ */
+ Result: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ },
+
+ /**
+ * Information about the products similar to a single product in a query
+ * image.
+ *
+ * @property {Object} boundingPoly
+ * The bounding polygon around the product detected in the query image.
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object[]} results
+ * List of results, one for each product match.
+ *
+ * This object should have the same structure as [Result]{@link google.cloud.vision.v1p4beta1.Result}
+ *
+ * @typedef GroupedResult
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ProductSearchResults.GroupedResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search.proto}
+ */
+ GroupedResult: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ }
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_product_search_service.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_product_search_service.js
new file mode 100644
index 00000000000..71881ce3edd
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_product_search_service.js
@@ -0,0 +1,834 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * A Product contains ReferenceImages.
+ *
+ * @property {string} name
+ * The resource name of the product.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ *
+ * This field is ignored when creating a product.
+ *
+ * @property {string} displayName
+ * The user-provided name for this Product. Must not be empty. Must be at most
+ * 4096 characters long.
+ *
+ * @property {string} description
+ * User-provided metadata to be stored with this product. Must be at most 4096
+ * characters long.
+ *
+ * @property {string} productCategory
+ * The category for the product identified by the reference image. This should
+ * be either "homegoods", "apparel", or "toys".
+ *
+ * This field is immutable.
+ *
+ * @property {Object[]} productLabels
+ * Key-value pairs that can be attached to a product. At query time,
+ * constraints can be specified based on the product_labels.
+ *
+ * Note that integer values can be provided as strings, e.g. "1199". Only
+ * strings with integer values can match a range-based restriction which is
+ * to be supported soon.
+ *
+ * Multiple values can be assigned to the same key. One product may have up to
+ * 100 product_labels.
+ *
+ * This object should have the same structure as [KeyValue]{@link google.cloud.vision.v1p4beta1.KeyValue}
+ *
+ * @typedef Product
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Product definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const Product = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * A product label represented as a key-value pair.
+ *
+ * @property {string} key
+ * The key of the label attached to the product. Cannot be empty and cannot
+ * exceed 128 bytes.
+ *
+ * @property {string} value
+ * The value of the label attached to the product. Cannot be empty and
+ * cannot exceed 128 bytes.
+ *
+ * @typedef KeyValue
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Product.KeyValue definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+ KeyValue: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ }
+};
+
+/**
+ * A ProductSet contains Products. A ProductSet can contain a maximum of 1
+ * million reference images. If the limit is exceeded, periodic indexing will
+ * fail.
+ *
+ * @property {string} name
+ * The resource name of the ProductSet.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+ *
+ * This field is ignored when creating a ProductSet.
+ *
+ * @property {string} displayName
+ * The user-provided name for this ProductSet. Must not be empty. Must be at
+ * most 4096 characters long.
+ *
+ * @property {Object} indexTime
+ * Output only. The time at which this ProductSet was last indexed. Query
+ * results will reflect all updates before this time. If this ProductSet has
+ * never been indexed, this timestamp is the default value
+ * "1970-01-01T00:00:00Z".
+ *
+ * This field is ignored when creating a ProductSet.
+ *
+ * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
+ *
+ * @property {Object} indexError
+ * Output only. If there was an error with indexing the product set, the field
+ * is populated.
+ *
+ * This field is ignored when creating a ProductSet.
+ *
+ * This object should have the same structure as [Status]{@link google.rpc.Status}
+ *
+ * @typedef ProductSet
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ProductSet definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ProductSet = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A `ReferenceImage` represents a product image and its associated metadata,
+ * such as bounding boxes.
+ *
+ * @property {string} name
+ * The resource name of the reference image.
+ *
+ * Format is:
+ *
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ *
+ * This field is ignored when creating a reference image.
+ *
+ * @property {string} uri
+ * The Google Cloud Storage URI of the reference image.
+ *
+ * The URI must start with `gs://`.
+ *
+ * Required.
+ *
+ * @property {Object[]} boundingPolys
+ * Bounding polygons around the areas of interest in the reference image.
+ * Optional. If this field is empty, the system will try to detect regions of
+ * interest. At most 10 bounding polygons will be used.
+ *
+ * The provided shape is converted into a non-rotated rectangle. Once
+ * converted, the small edge of the rectangle must be greater than or equal
+ * to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
+ * is not).
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @typedef ReferenceImage
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ReferenceImage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ReferenceImage = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `CreateProduct` method.
+ *
+ * @property {string} parent
+ * The project in which the Product should be created.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID`.
+ *
+ * @property {Object} product
+ * The product to create.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ *
+ * @property {string} productId
+ * A user-supplied resource id for this Product. If set, the server will
+ * attempt to use this value as the resource id. If it is already in use, an
+ * error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ * long. It cannot contain the character `/`.
+ *
+ * @typedef CreateProductRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.CreateProductRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const CreateProductRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `ListProducts` method.
+ *
+ * @property {string} parent
+ * The project OR ProductSet from which Products should be listed.
+ *
+ * Format:
+ * `projects/PROJECT_ID/locations/LOC_ID`
+ *
+ * @property {number} pageSize
+ * The maximum number of items to return. Default 10, maximum 100.
+ *
+ * @property {string} pageToken
+ * The next_page_token returned from a previous List request, if any.
+ *
+ * @typedef ListProductsRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListProductsRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListProductsRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response message for the `ListProducts` method.
+ *
+ * @property {Object[]} products
+ * List of products.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ *
+ * @property {string} nextPageToken
+ * Token to retrieve the next page of results, or empty if there are no more
+ * results in the list.
+ *
+ * @typedef ListProductsResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListProductsResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListProductsResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `GetProduct` method.
+ *
+ * @property {string} name
+ * Resource name of the Product to get.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ *
+ * @typedef GetProductRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.GetProductRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const GetProductRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `UpdateProduct` method.
+ *
+ * @property {Object} product
+ * The Product resource which replaces the one on the server.
+ * product.name is immutable.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ *
+ * @property {Object} updateMask
+ * The FieldMask that specifies which fields
+ * to update.
+ * If update_mask isn't specified, all mutable fields are to be updated.
+ * Valid mask paths include `product_labels`, `display_name`, and
+ * `description`.
+ *
+ * This object should have the same structure as [FieldMask]{@link google.protobuf.FieldMask}
+ *
+ * @typedef UpdateProductRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.UpdateProductRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const UpdateProductRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `DeleteProduct` method.
+ *
+ * @property {string} name
+ * Resource name of product to delete.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ *
+ * @typedef DeleteProductRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.DeleteProductRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const DeleteProductRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `CreateProductSet` method.
+ *
+ * @property {string} parent
+ * The project in which the ProductSet should be created.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ *
+ * @property {Object} productSet
+ * The ProductSet to create.
+ *
+ * This object should have the same structure as [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}
+ *
+ * @property {string} productSetId
+ * A user-supplied resource id for this ProductSet. If set, the server will
+ * attempt to use this value as the resource id. If it is already in use, an
+ * error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ * long. It cannot contain the character `/`.
+ *
+ * @typedef CreateProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.CreateProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const CreateProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `ListProductSets` method.
+ *
+ * @property {string} parent
+ * The project from which ProductSets should be listed.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ *
+ * @property {number} pageSize
+ * The maximum number of items to return. Default 10, maximum 100.
+ *
+ * @property {string} pageToken
+ * The next_page_token returned from a previous List request, if any.
+ *
+ * @typedef ListProductSetsRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListProductSetsRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListProductSetsRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response message for the `ListProductSets` method.
+ *
+ * @property {Object[]} productSets
+ * List of ProductSets.
+ *
+ * This object should have the same structure as [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}
+ *
+ * @property {string} nextPageToken
+ * Token to retrieve the next page of results, or empty if there are no more
+ * results in the list.
+ *
+ * @typedef ListProductSetsResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListProductSetsResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListProductSetsResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `GetProductSet` method.
+ *
+ * @property {string} name
+ * Resource name of the ProductSet to get.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`
+ *
+ * @typedef GetProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.GetProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const GetProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `UpdateProductSet` method.
+ *
+ * @property {Object} productSet
+ * The ProductSet resource which replaces the one on the server.
+ *
+ * This object should have the same structure as [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}
+ *
+ * @property {Object} updateMask
+ * The FieldMask that specifies which fields to
+ * update.
+ * If update_mask isn't specified, all mutable fields are to be updated.
+ * Valid mask path is `display_name`.
+ *
+ * This object should have the same structure as [FieldMask]{@link google.protobuf.FieldMask}
+ *
+ * @typedef UpdateProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.UpdateProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const UpdateProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `DeleteProductSet` method.
+ *
+ * @property {string} name
+ * Resource name of the ProductSet to delete.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ *
+ * @typedef DeleteProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.DeleteProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const DeleteProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `CreateReferenceImage` method.
+ *
+ * @property {string} parent
+ * Resource name of the product in which to create the reference image.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ *
+ * @property {Object} referenceImage
+ * The reference image to create.
+ * If an image ID is specified, it is ignored.
+ *
+ * This object should have the same structure as [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}
+ *
+ * @property {string} referenceImageId
+ * A user-supplied resource id for the ReferenceImage to be added. If set,
+ * the server will attempt to use this value as the resource id. If it is
+ * already in use, an error is returned with code ALREADY_EXISTS. Must be at
+ * most 128 characters long. It cannot contain the character `/`.
+ *
+ * @typedef CreateReferenceImageRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.CreateReferenceImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const CreateReferenceImageRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `ListReferenceImages` method.
+ *
+ * @property {string} parent
+ * Resource name of the product containing the reference images.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ *
+ * @property {number} pageSize
+ * The maximum number of items to return. Default 10, maximum 100.
+ *
+ * @property {string} pageToken
+ * A token identifying a page of results to be returned. This is the value
+ * of `nextPageToken` returned in a previous reference image list request.
+ *
+ * Defaults to the first page if not specified.
+ *
+ * @typedef ListReferenceImagesRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListReferenceImagesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListReferenceImagesRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response message for the `ListReferenceImages` method.
+ *
+ * @property {Object[]} referenceImages
+ * The list of reference images.
+ *
+ * This object should have the same structure as [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}
+ *
+ * @property {number} pageSize
+ * The maximum number of items to return. Default 10, maximum 100.
+ *
+ * @property {string} nextPageToken
+ * The next_page_token returned from a previous List request, if any.
+ *
+ * @typedef ListReferenceImagesResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListReferenceImagesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListReferenceImagesResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `GetReferenceImage` method.
+ *
+ * @property {string} name
+ * The resource name of the ReferenceImage to get.
+ *
+ * Format is:
+ *
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ *
+ * @typedef GetReferenceImageRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.GetReferenceImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const GetReferenceImageRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `DeleteReferenceImage` method.
+ *
+ * @property {string} name
+ * The resource name of the reference image to delete.
+ *
+ * Format is:
+ *
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`
+ *
+ * @typedef DeleteReferenceImageRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.DeleteReferenceImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const DeleteReferenceImageRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `AddProductToProductSet` method.
+ *
+ * @property {string} name
+ * The resource name for the ProductSet to modify.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ *
+ * @property {string} product
+ * The resource name for the Product to be added to this ProductSet.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ *
+ * @typedef AddProductToProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.AddProductToProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const AddProductToProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `RemoveProductFromProductSet` method.
+ *
+ * @property {string} name
+ * The resource name for the ProductSet to modify.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ *
+ * @property {string} product
+ * The resource name for the Product to be removed from this ProductSet.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ *
+ * @typedef RemoveProductFromProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.RemoveProductFromProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const RemoveProductFromProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `ListProductsInProductSet` method.
+ *
+ * @property {string} name
+ * The ProductSet resource for which to retrieve Products.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ *
+ * @property {number} pageSize
+ * The maximum number of items to return. Default 10, maximum 100.
+ *
+ * @property {string} pageToken
+ * The next_page_token returned from a previous List request, if any.
+ *
+ * @typedef ListProductsInProductSetRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListProductsInProductSetRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListProductsInProductSetRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response message for the `ListProductsInProductSet` method.
+ *
+ * @property {Object[]} products
+ * The list of Products.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ *
+ * @property {string} nextPageToken
+ * Token to retrieve the next page of results, or empty if there are no more
+ * results in the list.
+ *
+ * @typedef ListProductsInProductSetResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ListProductsInProductSetResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ListProductsInProductSetResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The Google Cloud Storage location for a csv file which preserves a list of
+ * ImportProductSetRequests in each line.
+ *
+ * @property {string} csvFileUri
+ * The Google Cloud Storage URI of the input csv file.
+ *
+ * The URI must start with `gs://`.
+ *
+ * The format of the input csv file should be one image per line.
+ * In each line, there are 8 columns.
+ *
+ * 1. image-uri
+ * 2. image-id
+ * 3. product-set-id
+ * 4. product-id
+ * 5. product-category
+ * 6. product-display-name
+ * 7. labels
+ * 8. bounding-poly
+ *
+ * The `image-uri`, `product-set-id`, `product-id`, and `product-category`
+ * columns are required. All other columns are optional.
+ *
+ * If the `ProductSet` or `Product` specified by the `product-set-id` and
+ * `product-id` values does not exist, then the system will create a new
+ * `ProductSet` or `Product` for the image. In this case, the
+ * `product-display-name` column refers to
+ * display_name, the
+ * `product-category` column refers to
+ * product_category, and the
+ * `labels` column refers to product_labels.
+ *
+ * The `image-id` column is optional but must be unique if provided. If it is
+ * empty, the system will automatically assign a unique id to the image.
+ *
+ * The `product-display-name` column is optional. If it is empty, the system
+ * sets the display_name field for the product to a
+ * space (" "). You can update the `display_name` later by using the API.
+ *
+ * If a `Product` with the specified `product-id` already exists, then the
+ * system ignores the `product-display-name`, `product-category`, and `labels`
+ * columns.
+ *
+ * The `labels` column (optional) is a line containing a list of
+ * comma-separated key-value pairs, in the following format:
+ *
+ * "key_1=value_1,key_2=value_2,...,key_n=value_n"
+ *
+ * The `bounding-poly` column (optional) identifies one region of
+ * interest from the image in the same manner as `CreateReferenceImage`. If
+ * you do not specify the `bounding-poly` column, then the system will try to
+ * detect regions of interest automatically.
+ *
+ * At most one `bounding-poly` column is allowed per line. If the image
+ * contains multiple regions of interest, add a line to the CSV file that
+ * includes the same product information, and the `bounding-poly` values for
+ * each region of interest.
+ *
+ * The `bounding-poly` column must contain an even number of comma-separated
+ * numbers, in the format "p1_x,p1_y,p2_x,p2_y,...,pn_x,pn_y". Use
+ * non-negative integers for absolute bounding polygons, and float values
+ * in [0, 1] for normalized bounding polygons.
+ *
+ * The system will resize the image if the image resolution is too
+ * large to process (larger than 20MP).
+ *
+ * @typedef ImportProductSetsGcsSource
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImportProductSetsGcsSource definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ImportProductSetsGcsSource = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The input content for the `ImportProductSets` method.
+ *
+ * @property {Object} gcsSource
+ * The Google Cloud Storage location for a csv file which preserves a list
+ * of ImportProductSetRequests in each line.
+ *
+ * This object should have the same structure as [ImportProductSetsGcsSource]{@link google.cloud.vision.v1p4beta1.ImportProductSetsGcsSource}
+ *
+ * @typedef ImportProductSetsInputConfig
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImportProductSetsInputConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ImportProductSetsInputConfig = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Request message for the `ImportProductSets` method.
+ *
+ * @property {string} parent
+ * The project in which the ProductSets should be imported.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ *
+ * @property {Object} inputConfig
+ * The input content for the list of requests.
+ *
+ * This object should have the same structure as [ImportProductSetsInputConfig]{@link google.cloud.vision.v1p4beta1.ImportProductSetsInputConfig}
+ *
+ * @typedef ImportProductSetsRequest
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImportProductSetsRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ImportProductSetsRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Response message for the `ImportProductSets` method.
+ *
+ * This message is returned by the
+ * google.longrunning.Operations.GetOperation method in the returned
+ * google.longrunning.Operation.response field.
+ *
+ * @property {Object[]} referenceImages
+ * The list of reference_images that are imported successfully.
+ *
+ * This object should have the same structure as [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}
+ *
+ * @property {Object[]} statuses
+ * The rpc status for each ImportProductSet request, including both successes
+ * and errors.
+ *
+ * The number of statuses here matches the number of lines in the csv file,
+ * and statuses[i] stores the success or failure status of processing the i-th
+ * line of the csv, starting from line 0.
+ *
+ * This object should have the same structure as [Status]{@link google.rpc.Status}
+ *
+ * @typedef ImportProductSetsResponse
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.ImportProductSetsResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const ImportProductSetsResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Metadata for the batch operations such as the current state.
+ *
+ * This is included in the `metadata` field of the `Operation` returned by the
+ * `GetOperation` call of the `google::longrunning::Operations` service.
+ *
+ * @property {number} state
+ * The current state of the batch operation.
+ *
+ * The number should be among the values of [State]{@link google.cloud.vision.v1p4beta1.State}
+ *
+ * @property {Object} submitTime
+ * The time when the batch request was submitted to the server.
+ *
+ * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
+ *
+ * @property {Object} endTime
+ * The time when the batch request is finished and
+ * google.longrunning.Operation.done is set to true.
+ *
+ * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
+ *
+ * @typedef BatchOperationMetadata
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.BatchOperationMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/product_search_service.proto}
+ */
+const BatchOperationMetadata = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Enumerates the possible states that the batch request can be in.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+ State: {
+
+ /**
+ * Invalid.
+ */
+ STATE_UNSPECIFIED: 0,
+
+ /**
+ * Request is actively being processed.
+ */
+ PROCESSING: 1,
+
+ /**
+ * The request is done and at least one item has been successfully
+ * processed.
+ */
+ SUCCESSFUL: 2,
+
+ /**
+ * The request is done and no item has been successfully processed.
+ */
+ FAILED: 3,
+
+ /**
+ * The request is done after the longrunning.Operations.CancelOperation has
+ * been called by the user. Any records that were processed before the
+ * cancel command are output as specified in the request.
+ */
+ CANCELLED: 4
+ }
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_text_annotation.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_text_annotation.js
new file mode 100644
index 00000000000..3024b97f63f
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_text_annotation.js
@@ -0,0 +1,392 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * TextAnnotation contains a structured representation of OCR extracted text.
+ * The hierarchy of an OCR extracted text structure is like this:
+ * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+ * Each structural component, starting from Page, may further have their own
+ * properties. Properties describe detected languages, breaks etc.. Please refer
+ * to the TextAnnotation.TextProperty message definition below for more
+ * detail.
+ *
+ * @property {Object[]} pages
+ * List of pages detected by OCR.
+ *
+ * This object should have the same structure as [Page]{@link google.cloud.vision.v1p4beta1.Page}
+ *
+ * @property {string} text
+ * UTF-8 text detected on the pages.
+ *
+ * @typedef TextAnnotation
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+const TextAnnotation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Detected language for a structural component.
+ *
+ * @property {string} languageCode
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ * information, see
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ *
+ * @property {number} confidence
+ * Confidence of detected language. Range [0, 1].
+ *
+ * @typedef DetectedLanguage
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.TextAnnotation.DetectedLanguage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+ DetectedLanguage: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ },
+
+ /**
+ * Detected start or end of a structural component.
+ *
+ * @property {number} type
+ * Detected break type.
+ *
+ * The number should be among the values of [BreakType]{@link google.cloud.vision.v1p4beta1.BreakType}
+ *
+ * @property {boolean} isPrefix
+ * True if break prepends the element.
+ *
+ * @typedef DetectedBreak
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.TextAnnotation.DetectedBreak definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+ DetectedBreak: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Enum to denote the type of break found. New line, space etc.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+ BreakType: {
+
+ /**
+ * Unknown break label type.
+ */
+ UNKNOWN: 0,
+
+ /**
+ * Regular space.
+ */
+ SPACE: 1,
+
+ /**
+ * Sure space (very wide).
+ */
+ SURE_SPACE: 2,
+
+ /**
+ * Line-wrapping break.
+ */
+ EOL_SURE_SPACE: 3,
+
+ /**
+ * End-line hyphen that is not present in text; does not co-occur with
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
+ */
+ HYPHEN: 4,
+
+ /**
+ * Line break that ends a paragraph.
+ */
+ LINE_BREAK: 5
+ }
+ },
+
+ /**
+ * Additional information detected on the structural component.
+ *
+ * @property {Object[]} detectedLanguages
+ * A list of detected languages together with confidence.
+ *
+ * This object should have the same structure as [DetectedLanguage]{@link google.cloud.vision.v1p4beta1.DetectedLanguage}
+ *
+ * @property {Object} detectedBreak
+ * Detected start or end of a text segment.
+ *
+ * This object should have the same structure as [DetectedBreak]{@link google.cloud.vision.v1p4beta1.DetectedBreak}
+ *
+ * @typedef TextProperty
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.TextAnnotation.TextProperty definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+ TextProperty: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ }
+};
+
+/**
+ * Detected page from OCR.
+ *
+ * @property {Object} property
+ * Additional information detected on the page.
+ *
+ * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p4beta1.TextProperty}
+ *
+ * @property {number} width
+ * Page width. For PDFs the unit is points. For images (including
+ * TIFFs) the unit is pixels.
+ *
+ * @property {number} height
+ * Page height. For PDFs the unit is points. For images (including
+ * TIFFs) the unit is pixels.
+ *
+ * @property {Object[]} blocks
+ * List of blocks of text, images etc on this page.
+ *
+ * This object should have the same structure as [Block]{@link google.cloud.vision.v1p4beta1.Block}
+ *
+ * @property {number} confidence
+ * Confidence of the OCR results on the page. Range [0, 1].
+ *
+ * @typedef Page
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Page definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+const Page = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Logical element on the page.
+ *
+ * @property {Object} property
+ * Additional information detected for the block.
+ *
+ * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p4beta1.TextProperty}
+ *
+ * @property {Object} boundingBox
+ * The bounding box for the block.
+ * The vertices are in the order of top-left, top-right, bottom-right,
+ * bottom-left. When a rotation of the bounding box is detected the rotation
+ * is represented as around the top-left corner as defined when the text is
+ * read in the 'natural' orientation.
+ * For example:
+ *
+ * * when the text is horizontal it might look like:
+ *
+ * 0----1
+ * | |
+ * 3----2
+ *
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
+ *
+ * 2----3
+ * | |
+ * 1----0
+ *
+ * and the vertex order will still be (0, 1, 2, 3).
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object[]} paragraphs
+ * List of paragraphs in this block (if this blocks is of type text).
+ *
+ * This object should have the same structure as [Paragraph]{@link google.cloud.vision.v1p4beta1.Paragraph}
+ *
+ * @property {number} blockType
+ * Detected block type (text, image etc) for this block.
+ *
+ * The number should be among the values of [BlockType]{@link google.cloud.vision.v1p4beta1.BlockType}
+ *
+ * @property {number} confidence
+ * Confidence of the OCR results on the block. Range [0, 1].
+ *
+ * @typedef Block
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Block definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+const Block = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Type of a block (text, image etc) as identified by OCR.
+ *
+ * @enum {number}
+ * @memberof google.cloud.vision.v1p4beta1
+ */
+ BlockType: {
+
+ /**
+ * Unknown block type.
+ */
+ UNKNOWN: 0,
+
+ /**
+ * Regular text block.
+ */
+ TEXT: 1,
+
+ /**
+ * Table block.
+ */
+ TABLE: 2,
+
+ /**
+ * Image block.
+ */
+ PICTURE: 3,
+
+ /**
+ * Horizontal/vertical line box.
+ */
+ RULER: 4,
+
+ /**
+ * Barcode block.
+ */
+ BARCODE: 5
+ }
+};
+
+/**
+ * Structural unit of text representing a number of words in certain order.
+ *
+ * @property {Object} property
+ * Additional information detected for the paragraph.
+ *
+ * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p4beta1.TextProperty}
+ *
+ * @property {Object} boundingBox
+ * The bounding box for the paragraph.
+ * The vertices are in the order of top-left, top-right, bottom-right,
+ * bottom-left. When a rotation of the bounding box is detected the rotation
+ * is represented as around the top-left corner as defined when the text is
+ * read in the 'natural' orientation.
+ * For example:
+ * * when the text is horizontal it might look like:
+ * 0----1
+ * | |
+ * 3----2
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
+ * 2----3
+ * | |
+ * 1----0
+ * and the vertex order will still be (0, 1, 2, 3).
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object[]} words
+ * List of words in this paragraph.
+ *
+ * This object should have the same structure as [Word]{@link google.cloud.vision.v1p4beta1.Word}
+ *
+ * @property {number} confidence
+ * Confidence of the OCR results for the paragraph. Range [0, 1].
+ *
+ * @typedef Paragraph
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Paragraph definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+const Paragraph = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A word representation.
+ *
+ * @property {Object} property
+ * Additional information detected for the word.
+ *
+ * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p4beta1.TextProperty}
+ *
+ * @property {Object} boundingBox
+ * The bounding box for the word.
+ * The vertices are in the order of top-left, top-right, bottom-right,
+ * bottom-left. When a rotation of the bounding box is detected the rotation
+ * is represented as around the top-left corner as defined when the text is
+ * read in the 'natural' orientation.
+ * For example:
+ * * when the text is horizontal it might look like:
+ * 0----1
+ * | |
+ * 3----2
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
+ * 2----3
+ * | |
+ * 1----0
+ * and the vertex order will still be (0, 1, 2, 3).
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {Object[]} symbols
+ * List of symbols in the word.
+ * The order of the symbols follows the natural reading order.
+ *
+ * This object should have the same structure as [Symbol]{@link google.cloud.vision.v1p4beta1.Symbol}
+ *
+ * @property {number} confidence
+ * Confidence of the OCR results for the word. Range [0, 1].
+ *
+ * @typedef Word
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Word definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+const Word = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A single symbol representation.
+ *
+ * @property {Object} property
+ * Additional information detected for the symbol.
+ *
+ * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p4beta1.TextProperty}
+ *
+ * @property {Object} boundingBox
+ * The bounding box for the symbol.
+ * The vertices are in the order of top-left, top-right, bottom-right,
+ * bottom-left. When a rotation of the bounding box is detected the rotation
+ * is represented as around the top-left corner as defined when the text is
+ * read in the 'natural' orientation.
+ * For example:
+ * * when the text is horizontal it might look like:
+ * 0----1
+ * | |
+ * 3----2
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
+ * 2----3
+ * | |
+ * 1----0
+ * and the vertice order will still be (0, 1, 2, 3).
+ *
+ * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p4beta1.BoundingPoly}
+ *
+ * @property {string} text
+ * The actual UTF-8 representation of the symbol.
+ *
+ * @property {number} confidence
+ * Confidence of the OCR results for the symbol. Range [0, 1].
+ *
+ * @typedef Symbol
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.Symbol definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/text_annotation.proto}
+ */
+const Symbol = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_web_detection.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_web_detection.js
new file mode 100644
index 00000000000..4678c875fe6
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/cloud/vision/v1p4beta1/doc_web_detection.js
@@ -0,0 +1,152 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * Relevant information for the image from the Internet.
+ *
+ * @property {Object[]} webEntities
+ * Deduced entities from similar images on the Internet.
+ *
+ * This object should have the same structure as [WebEntity]{@link google.cloud.vision.v1p4beta1.WebEntity}
+ *
+ * @property {Object[]} fullMatchingImages
+ * Fully matching images from the Internet.
+ * Can include resized copies of the query image.
+ *
+ * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p4beta1.WebImage}
+ *
+ * @property {Object[]} partialMatchingImages
+ * Partial matching images from the Internet.
+ * Those images are similar enough to share some key-point features. For
+ * example an original image will likely have partial matching for its crops.
+ *
+ * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p4beta1.WebImage}
+ *
+ * @property {Object[]} pagesWithMatchingImages
+ * Web pages containing the matching images from the Internet.
+ *
+ * This object should have the same structure as [WebPage]{@link google.cloud.vision.v1p4beta1.WebPage}
+ *
+ * @property {Object[]} visuallySimilarImages
+ * The visually similar image results.
+ *
+ * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p4beta1.WebImage}
+ *
+ * @property {Object[]} bestGuessLabels
+ * The service's best guess as to the topic of the request image.
+ * Inferred from similar images on the open web.
+ *
+ * This object should have the same structure as [WebLabel]{@link google.cloud.vision.v1p4beta1.WebLabel}
+ *
+ * @typedef WebDetection
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.WebDetection definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/web_detection.proto}
+ */
+const WebDetection = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Entity deduced from similar images on the Internet.
+ *
+ * @property {string} entityId
+ * Opaque entity ID.
+ *
+ * @property {number} score
+ * Overall relevancy score for the entity.
+ * Not normalized and not comparable across different image queries.
+ *
+ * @property {string} description
+ * Canonical description of the entity, in English.
+ *
+ * @typedef WebEntity
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.WebDetection.WebEntity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/web_detection.proto}
+ */
+ WebEntity: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ },
+
+ /**
+ * Metadata for online images.
+ *
+ * @property {string} url
+ * The result image URL.
+ *
+ * @property {number} score
+ * (Deprecated) Overall relevancy score for the image.
+ *
+ * @typedef WebImage
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.WebDetection.WebImage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/web_detection.proto}
+ */
+ WebImage: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ },
+
+ /**
+ * Label to provide extra metadata for the web detection.
+ *
+ * @property {string} label
+ * Label for extra metadata.
+ *
+ * @property {string} languageCode
+ * The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+ * For more information, see
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ *
+ * @typedef WebLabel
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.WebDetection.WebLabel definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/web_detection.proto}
+ */
+ WebLabel: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ },
+
+ /**
+ * Metadata for web pages.
+ *
+ * @property {string} url
+ * The result web page URL.
+ *
+ * @property {number} score
+ * (Deprecated) Overall relevancy score for the web page.
+ *
+ * @property {string} pageTitle
+ * Title for the web page, may contain HTML markups.
+ *
+ * @property {Object[]} fullMatchingImages
+ * Fully matching images on the page.
+ * Can include resized copies of the query image.
+ *
+ * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p4beta1.WebImage}
+ *
+ * @property {Object[]} partialMatchingImages
+ * Partial matching images on the page.
+ * Those images are similar enough to share some key-point features. For
+ * example an original image will likely have partial matching for its
+ * crops.
+ *
+ * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p4beta1.WebImage}
+ *
+ * @typedef WebPage
+ * @memberof google.cloud.vision.v1p4beta1
+ * @see [google.cloud.vision.v1p4beta1.WebDetection.WebPage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p4beta1/web_detection.proto}
+ */
+ WebPage: {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+ }
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/longrunning/doc_operations.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/longrunning/doc_operations.js
new file mode 100644
index 00000000000..bd03cc3da0e
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/longrunning/doc_operations.js
@@ -0,0 +1,63 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * This resource represents a long-running operation that is the result of a
+ * network API call.
+ *
+ * @property {string} name
+ * The server-assigned name, which is only unique within the same service that
+ * originally returns it. If you use the default HTTP mapping, the
+ * `name` should have the format of `operations/some/unique/name`.
+ *
+ * @property {Object} metadata
+ * Service-specific metadata associated with the operation. It typically
+ * contains progress information and common metadata such as create time.
+ * Some services might not provide such metadata. Any method that returns a
+ * long-running operation should document the metadata type, if any.
+ *
+ * This object should have the same structure as [Any]{@link google.protobuf.Any}
+ *
+ * @property {boolean} done
+ * If the value is `false`, it means the operation is still in progress.
+ * If true, the operation is completed, and either `error` or `response` is
+ * available.
+ *
+ * @property {Object} error
+ * The error result of the operation in case of failure or cancellation.
+ *
+ * This object should have the same structure as [Status]{@link google.rpc.Status}
+ *
+ * @property {Object} response
+ * The normal response of the operation in case of success. If the original
+ * method returns no data on success, such as `Delete`, the response is
+ * `google.protobuf.Empty`. If the original method is standard
+ * `Get`/`Create`/`Update`, the response should be the resource. For other
+ * methods, the response should have the type `XxxResponse`, where `Xxx`
+ * is the original method name. For example, if the original method name
+ * is `TakeSnapshot()`, the inferred response type is
+ * `TakeSnapshotResponse`.
+ *
+ * This object should have the same structure as [Any]{@link google.protobuf.Any}
+ *
+ * @typedef Operation
+ * @memberof google.longrunning
+ * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
+ */
+const Operation = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_any.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_any.js
new file mode 100644
index 00000000000..f3278b34e66
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_any.js
@@ -0,0 +1,136 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * `Any` contains an arbitrary serialized protocol buffer message along with a
+ * URL that describes the type of the serialized message.
+ *
+ * Protobuf library provides support to pack/unpack Any values in the form
+ * of utility functions or additional generated methods of the Any type.
+ *
+ * Example 1: Pack and unpack a message in C++.
+ *
+ * Foo foo = ...;
+ * Any any;
+ * any.PackFrom(foo);
+ * ...
+ * if (any.UnpackTo(&foo)) {
+ * ...
+ * }
+ *
+ * Example 2: Pack and unpack a message in Java.
+ *
+ * Foo foo = ...;
+ * Any any = Any.pack(foo);
+ * ...
+ * if (any.is(Foo.class)) {
+ * foo = any.unpack(Foo.class);
+ * }
+ *
+ * Example 3: Pack and unpack a message in Python.
+ *
+ * foo = Foo(...)
+ * any = Any()
+ * any.Pack(foo)
+ * ...
+ * if any.Is(Foo.DESCRIPTOR):
+ * any.Unpack(foo)
+ * ...
+ *
+ * Example 4: Pack and unpack a message in Go
+ *
+ * foo := &pb.Foo{...}
+ * any, err := ptypes.MarshalAny(foo)
+ * ...
+ * foo := &pb.Foo{}
+ * if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ * ...
+ * }
+ *
+ * The pack methods provided by protobuf library will by default use
+ * 'type.googleapis.com/full.type.name' as the type URL and the unpack
+ * methods only use the fully qualified type name after the last '/'
+ * in the type URL, for example "foo.bar.com/x/y.z" will yield type
+ * name "y.z".
+ *
+ *
+ * # JSON
+ *
+ * The JSON representation of an `Any` value uses the regular
+ * representation of the deserialized, embedded message, with an
+ * additional field `@type` which contains the type URL. Example:
+ *
+ * package google.profile;
+ * message Person {
+ * string first_name = 1;
+ * string last_name = 2;
+ * }
+ *
+ * {
+ * "@type": "type.googleapis.com/google.profile.Person",
+ * "firstName": ,
+ * "lastName":
+ * }
+ *
+ * If the embedded message type is well-known and has a custom JSON
+ * representation, that representation will be embedded adding a field
+ * `value` which holds the custom JSON in addition to the `@type`
+ * field. Example (for message google.protobuf.Duration):
+ *
+ * {
+ * "@type": "type.googleapis.com/google.protobuf.Duration",
+ * "value": "1.212s"
+ * }
+ *
+ * @property {string} typeUrl
+ * A URL/resource name that uniquely identifies the type of the serialized
+ * protocol buffer message. The last segment of the URL's path must represent
+ * the fully qualified name of the type (as in
+ * `path/google.protobuf.Duration`). The name should be in a canonical form
+ * (e.g., leading "." is not accepted).
+ *
+ * In practice, teams usually precompile into the binary all types that they
+ * expect it to use in the context of Any. However, for URLs which use the
+ * scheme `http`, `https`, or no scheme, one can optionally set up a type
+ * server that maps type URLs to message definitions as follows:
+ *
+ * * If no scheme is provided, `https` is assumed.
+ * * An HTTP GET on the URL must yield a google.protobuf.Type
+ * value in binary format, or produce an error.
+ * * Applications are allowed to cache lookup results based on the
+ * URL, or have them precompiled into a binary to avoid any
+ * lookup. Therefore, binary compatibility needs to be preserved
+ * on changes to types. (Use versioned type names to manage
+ * breaking changes.)
+ *
+ * Note: this functionality is not currently available in the official
+ * protobuf release, and it is not used for type URLs beginning with
+ * type.googleapis.com.
+ *
+ * Schemes other than `http`, `https` (or the empty scheme) might be
+ * used with implementation specific semantics.
+ *
+ * @property {string} value
+ * Must be a valid serialized protocol buffer of the above specified type.
+ *
+ * @typedef Any
+ * @memberof google.protobuf
+ * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
+ */
+const Any = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_empty.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_empty.js
new file mode 100644
index 00000000000..0b446dd9ce4
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_empty.js
@@ -0,0 +1,34 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * A generic empty message that you can re-use to avoid defining duplicated
+ * empty messages in your APIs. A typical example is to use it as the request
+ * or the response type of an API method. For instance:
+ *
+ * service Foo {
+ * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+ * }
+ *
+ * The JSON representation for `Empty` is empty JSON object `{}`.
+ * @typedef Empty
+ * @memberof google.protobuf
+ * @see [google.protobuf.Empty definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/empty.proto}
+ */
+const Empty = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_field_mask.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_field_mask.js
new file mode 100644
index 00000000000..d55d97e6e38
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_field_mask.js
@@ -0,0 +1,236 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * `FieldMask` represents a set of symbolic field paths, for example:
+ *
+ * paths: "f.a"
+ * paths: "f.b.d"
+ *
+ * Here `f` represents a field in some root message, `a` and `b`
+ * fields in the message found in `f`, and `d` a field found in the
+ * message in `f.b`.
+ *
+ * Field masks are used to specify a subset of fields that should be
+ * returned by a get operation or modified by an update operation.
+ * Field masks also have a custom JSON encoding (see below).
+ *
+ * # Field Masks in Projections
+ *
+ * When used in the context of a projection, a response message or
+ * sub-message is filtered by the API to only contain those fields as
+ * specified in the mask. For example, if the mask in the previous
+ * example is applied to a response message as follows:
+ *
+ * f {
+ * a : 22
+ * b {
+ * d : 1
+ * x : 2
+ * }
+ * y : 13
+ * }
+ * z: 8
+ *
+ * The result will not contain specific values for fields x,y and z
+ * (their value will be set to the default, and omitted in proto text
+ * output):
+ *
+ *
+ * f {
+ * a : 22
+ * b {
+ * d : 1
+ * }
+ * }
+ *
+ * A repeated field is not allowed except at the last position of a
+ * paths string.
+ *
+ * If a FieldMask object is not present in a get operation, the
+ * operation applies to all fields (as if a FieldMask of all fields
+ * had been specified).
+ *
+ * Note that a field mask does not necessarily apply to the
+ * top-level response message. In case of a REST get operation, the
+ * field mask applies directly to the response, but in case of a REST
+ * list operation, the mask instead applies to each individual message
+ * in the returned resource list. In case of a REST custom method,
+ * other definitions may be used. Where the mask applies will be
+ * clearly documented together with its declaration in the API. In
+ * any case, the effect on the returned resource/resources is required
+ * behavior for APIs.
+ *
+ * # Field Masks in Update Operations
+ *
+ * A field mask in update operations specifies which fields of the
+ * targeted resource are going to be updated. The API is required
+ * to only change the values of the fields as specified in the mask
+ * and leave the others untouched. If a resource is passed in to
+ * describe the updated values, the API ignores the values of all
+ * fields not covered by the mask.
+ *
+ * If a repeated field is specified for an update operation, the existing
+ * repeated values in the target resource will be overwritten by the new values.
+ * Note that a repeated field is only allowed in the last position of a `paths`
+ * string.
+ *
+ * If a sub-message is specified in the last position of the field mask for an
+ * update operation, then the existing sub-message in the target resource is
+ * overwritten. Given the target message:
+ *
+ * f {
+ * b {
+ * d : 1
+ * x : 2
+ * }
+ * c : 1
+ * }
+ *
+ * And an update message:
+ *
+ * f {
+ * b {
+ * d : 10
+ * }
+ * }
+ *
+ * then if the field mask is:
+ *
+ * paths: "f.b"
+ *
+ * then the result will be:
+ *
+ * f {
+ * b {
+ * d : 10
+ * }
+ * c : 1
+ * }
+ *
+ * However, if the update mask was:
+ *
+ * paths: "f.b.d"
+ *
+ * then the result would be:
+ *
+ * f {
+ * b {
+ * d : 10
+ * x : 2
+ * }
+ * c : 1
+ * }
+ *
+ * In order to reset a field's value to the default, the field must
+ * be in the mask and set to the default value in the provided resource.
+ * Hence, in order to reset all fields of a resource, provide a default
+ * instance of the resource and set all fields in the mask, or do
+ * not provide a mask as described below.
+ *
+ * If a field mask is not present on update, the operation applies to
+ * all fields (as if a field mask of all fields has been specified).
+ * Note that in the presence of schema evolution, this may mean that
+ * fields the client does not know and has therefore not filled into
+ * the request will be reset to their default. If this is unwanted
+ * behavior, a specific service may require a client to always specify
+ * a field mask, producing an error if not.
+ *
+ * As with get operations, the location of the resource which
+ * describes the updated values in the request message depends on the
+ * operation kind. In any case, the effect of the field mask is
+ * required to be honored by the API.
+ *
+ * ## Considerations for HTTP REST
+ *
+ * The HTTP kind of an update operation which uses a field mask must
+ * be set to PATCH instead of PUT in order to satisfy HTTP semantics
+ * (PUT must only be used for full updates).
+ *
+ * # JSON Encoding of Field Masks
+ *
+ * In JSON, a field mask is encoded as a single string where paths are
+ * separated by a comma. Fields name in each path are converted
+ * to/from lower-camel naming conventions.
+ *
+ * As an example, consider the following message declarations:
+ *
+ * message Profile {
+ * User user = 1;
+ * Photo photo = 2;
+ * }
+ * message User {
+ * string display_name = 1;
+ * string address = 2;
+ * }
+ *
+ * In proto a field mask for `Profile` may look as such:
+ *
+ * mask {
+ * paths: "user.display_name"
+ * paths: "photo"
+ * }
+ *
+ * In JSON, the same mask is represented as below:
+ *
+ * {
+ * mask: "user.displayName,photo"
+ * }
+ *
+ * # Field Masks and Oneof Fields
+ *
+ * Field masks treat fields in oneofs just as regular fields. Consider the
+ * following message:
+ *
+ * message SampleMessage {
+ * oneof test_oneof {
+ * string name = 4;
+ * SubMessage sub_message = 9;
+ * }
+ * }
+ *
+ * The field mask can be:
+ *
+ * mask {
+ * paths: "name"
+ * }
+ *
+ * Or:
+ *
+ * mask {
+ * paths: "sub_message"
+ * }
+ *
+ * Note that oneof type names ("test_oneof" in this case) cannot be used in
+ * paths.
+ *
+ * ## Field Mask Verification
+ *
+ * The implementation of any API method which has a FieldMask type field in the
+ * request should verify the included field paths, and return an
+ * `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
+ *
+ * @property {string[]} paths
+ * The set of field mask paths.
+ *
+ * @typedef FieldMask
+ * @memberof google.protobuf
+ * @see [google.protobuf.FieldMask definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/field_mask.proto}
+ */
+const FieldMask = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_timestamp.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_timestamp.js
new file mode 100644
index 00000000000..b47f41c2b30
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_timestamp.js
@@ -0,0 +1,113 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * A Timestamp represents a point in time independent of any time zone
+ * or calendar, represented as seconds and fractions of seconds at
+ * nanosecond resolution in UTC Epoch time. It is encoded using the
+ * Proleptic Gregorian Calendar which extends the Gregorian calendar
+ * backwards to year one. It is encoded assuming all minutes are 60
+ * seconds long, i.e. leap seconds are "smeared" so that no leap second
+ * table is needed for interpretation. Range is from
+ * 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+ * By restricting to that range, we ensure that we can convert to
+ * and from RFC 3339 date strings.
+ * See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+ *
+ * # Examples
+ *
+ * Example 1: Compute Timestamp from POSIX `time()`.
+ *
+ * Timestamp timestamp;
+ * timestamp.set_seconds(time(NULL));
+ * timestamp.set_nanos(0);
+ *
+ * Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+ *
+ * struct timeval tv;
+ * gettimeofday(&tv, NULL);
+ *
+ * Timestamp timestamp;
+ * timestamp.set_seconds(tv.tv_sec);
+ * timestamp.set_nanos(tv.tv_usec * 1000);
+ *
+ * Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+ *
+ * FILETIME ft;
+ * GetSystemTimeAsFileTime(&ft);
+ * UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+ *
+ * // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+ * // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+ * Timestamp timestamp;
+ * timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+ * timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+ *
+ * Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+ *
+ * long millis = System.currentTimeMillis();
+ *
+ * Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+ * .setNanos((int) ((millis % 1000) * 1000000)).build();
+ *
+ *
+ * Example 5: Compute Timestamp from current time in Python.
+ *
+ * timestamp = Timestamp()
+ * timestamp.GetCurrentTime()
+ *
+ * # JSON Mapping
+ *
+ * In JSON format, the Timestamp type is encoded as a string in the
+ * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+ * format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+ * where {year} is always expressed using four digits while {month}, {day},
+ * {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+ * seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+ * are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+ * is required. A proto3 JSON serializer should always use UTC (as indicated by
+ * "Z") when printing the Timestamp type and a proto3 JSON parser should be
+ * able to accept both UTC and other timezones (as indicated by an offset).
+ *
+ * For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+ * 01:30 UTC on January 15, 2017.
+ *
+ * In JavaScript, one can convert a Date object to this format using the
+ * standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+ * method. In Python, a standard `datetime.datetime` object can be converted
+ * to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+ * with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+ * can use the Joda Time's [`ISODateTimeFormat.dateTime()`](https://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) to obtain a formatter capable of generating timestamps in this format.
+ *
+ * @property {number} seconds
+ * Represents seconds of UTC time since Unix epoch
+ * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ * 9999-12-31T23:59:59Z inclusive.
+ *
+ * @property {number} nanos
+ * Non-negative fractions of a second at nanosecond resolution. Negative
+ * second values with fractions must still have non-negative nanos values
+ * that count forward in time. Must be from 0 to 999,999,999
+ * inclusive.
+ *
+ * @typedef Timestamp
+ * @memberof google.protobuf
+ * @see [google.protobuf.Timestamp definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/timestamp.proto}
+ */
+const Timestamp = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_wrappers.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_wrappers.js
new file mode 100644
index 00000000000..71de58e0d6e
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/protobuf/doc_wrappers.js
@@ -0,0 +1,32 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * Wrapper message for `float`.
+ *
+ * The JSON representation for `FloatValue` is JSON number.
+ *
+ * @property {number} value
+ * The float value.
+ *
+ * @typedef FloatValue
+ * @memberof google.protobuf
+ * @see [google.protobuf.FloatValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto}
+ */
+const FloatValue = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/rpc/doc_status.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/rpc/doc_status.js
new file mode 100644
index 00000000000..fc4b5be93f0
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/rpc/doc_status.js
@@ -0,0 +1,92 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * The `Status` type defines a logical error model that is suitable for different
+ * programming environments, including REST APIs and RPC APIs. It is used by
+ * [gRPC](https://github.com/grpc). The error model is designed to be:
+ *
+ * - Simple to use and understand for most users
+ * - Flexible enough to meet unexpected needs
+ *
+ * # Overview
+ *
+ * The `Status` message contains three pieces of data: error code, error message,
+ * and error details. The error code should be an enum value of
+ * google.rpc.Code, but it may accept additional error codes if needed. The
+ * error message should be a developer-facing English message that helps
+ * developers *understand* and *resolve* the error. If a localized user-facing
+ * error message is needed, put the localized message in the error details or
+ * localize it in the client. The optional error details may contain arbitrary
+ * information about the error. There is a predefined set of error detail types
+ * in the package `google.rpc` that can be used for common error conditions.
+ *
+ * # Language mapping
+ *
+ * The `Status` message is the logical representation of the error model, but it
+ * is not necessarily the actual wire format. When the `Status` message is
+ * exposed in different client libraries and different wire protocols, it can be
+ * mapped differently. For example, it will likely be mapped to some exceptions
+ * in Java, but more likely mapped to some error codes in C.
+ *
+ * # Other uses
+ *
+ * The error model and the `Status` message can be used in a variety of
+ * environments, either with or without APIs, to provide a
+ * consistent developer experience across different environments.
+ *
+ * Example uses of this error model include:
+ *
+ * - Partial errors. If a service needs to return partial errors to the client,
+ * it may embed the `Status` in the normal response to indicate the partial
+ * errors.
+ *
+ * - Workflow errors. A typical workflow has multiple steps. Each step may
+ * have a `Status` message for error reporting.
+ *
+ * - Batch operations. If a client uses batch request and batch response, the
+ * `Status` message should be used directly inside batch response, one for
+ * each error sub-response.
+ *
+ * - Asynchronous operations. If an API call embeds asynchronous operation
+ * results in its response, the status of those operations should be
+ * represented directly using the `Status` message.
+ *
+ * - Logging. If some API errors are stored in logs, the message `Status` could
+ * be used directly after any stripping needed for security/privacy reasons.
+ *
+ * @property {number} code
+ * The status code, which should be an enum value of google.rpc.Code.
+ *
+ * @property {string} message
+ * A developer-facing error message, which should be in English. Any
+ * user-facing error message should be localized and sent in the
+ * google.rpc.Status.details field, or localized by the client.
+ *
+ * @property {Object[]} details
+ * A list of messages that carry the error details. There is a common set of
+ * message types for APIs to use.
+ *
+ * This object should have the same structure as [Any]{@link google.protobuf.Any}
+ *
+ * @typedef Status
+ * @memberof google.rpc
+ * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
+ */
+const Status = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/type/doc_color.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/type/doc_color.js
new file mode 100644
index 00000000000..0b8c30004ce
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/type/doc_color.js
@@ -0,0 +1,164 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * Represents a color in the RGBA color space. This representation is designed
+ * for simplicity of conversion to/from color representations in various
+ * languages over compactness; for example, the fields of this representation
+ * can be trivially provided to the constructor of "java.awt.Color" in Java; it
+ * can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
+ * method in iOS; and, with just a little work, it can be easily formatted into
+ * a CSS "rgba()" string in JavaScript, as well. Here are some examples:
+ *
+ * Example (Java):
+ *
+ * import com.google.type.Color;
+ *
+ * // ...
+ * public static java.awt.Color fromProto(Color protocolor) {
+ * float alpha = protocolor.hasAlpha()
+ * ? protocolor.getAlpha().getValue()
+ * : 1.0;
+ *
+ * return new java.awt.Color(
+ * protocolor.getRed(),
+ * protocolor.getGreen(),
+ * protocolor.getBlue(),
+ * alpha);
+ * }
+ *
+ * public static Color toProto(java.awt.Color color) {
+ * float red = (float) color.getRed();
+ * float green = (float) color.getGreen();
+ * float blue = (float) color.getBlue();
+ * float denominator = 255.0;
+ * Color.Builder resultBuilder =
+ * Color
+ * .newBuilder()
+ * .setRed(red / denominator)
+ * .setGreen(green / denominator)
+ * .setBlue(blue / denominator);
+ * int alpha = color.getAlpha();
+ * if (alpha != 255) {
+ * result.setAlpha(
+ * FloatValue
+ * .newBuilder()
+ * .setValue(((float) alpha) / denominator)
+ * .build());
+ * }
+ * return resultBuilder.build();
+ * }
+ * // ...
+ *
+ * Example (iOS / Obj-C):
+ *
+ * // ...
+ * static UIColor* fromProto(Color* protocolor) {
+ * float red = [protocolor red];
+ * float green = [protocolor green];
+ * float blue = [protocolor blue];
+ * FloatValue* alpha_wrapper = [protocolor alpha];
+ * float alpha = 1.0;
+ * if (alpha_wrapper != nil) {
+ * alpha = [alpha_wrapper value];
+ * }
+ * return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
+ * }
+ *
+ * static Color* toProto(UIColor* color) {
+ * CGFloat red, green, blue, alpha;
+ * if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
+ * return nil;
+ * }
+ * Color* result = [Color alloc] init];
+ * [result setRed:red];
+ * [result setGreen:green];
+ * [result setBlue:blue];
+ * if (alpha <= 0.9999) {
+ * [result setAlpha:floatWrapperWithValue(alpha)];
+ * }
+ * [result autorelease];
+ * return result;
+ * }
+ * // ...
+ *
+ * Example (JavaScript):
+ *
+ * // ...
+ *
+ * var protoToCssColor = function(rgb_color) {
+ * var redFrac = rgb_color.red || 0.0;
+ * var greenFrac = rgb_color.green || 0.0;
+ * var blueFrac = rgb_color.blue || 0.0;
+ * var red = Math.floor(redFrac * 255);
+ * var green = Math.floor(greenFrac * 255);
+ * var blue = Math.floor(blueFrac * 255);
+ *
+ * if (!('alpha' in rgb_color)) {
+ * return rgbToCssColor_(red, green, blue);
+ * }
+ *
+ * var alphaFrac = rgb_color.alpha.value || 0.0;
+ * var rgbParams = [red, green, blue].join(',');
+ * return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
+ * };
+ *
+ * var rgbToCssColor_ = function(red, green, blue) {
+ * var rgbNumber = new Number((red << 16) | (green << 8) | blue);
+ * var hexString = rgbNumber.toString(16);
+ * var missingZeros = 6 - hexString.length;
+ * var resultBuilder = ['#'];
+ * for (var i = 0; i < missingZeros; i++) {
+ * resultBuilder.push('0');
+ * }
+ * resultBuilder.push(hexString);
+ * return resultBuilder.join('');
+ * };
+ *
+ * // ...
+ *
+ * @property {number} red
+ * The amount of red in the color as a value in the interval [0, 1].
+ *
+ * @property {number} green
+ * The amount of green in the color as a value in the interval [0, 1].
+ *
+ * @property {number} blue
+ * The amount of blue in the color as a value in the interval [0, 1].
+ *
+ * @property {Object} alpha
+ * The fraction of this color that should be applied to the pixel. That is,
+ * the final pixel color is defined by the equation:
+ *
+ * pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
+ *
+ * This means that a value of 1.0 corresponds to a solid color, whereas
+ * a value of 0.0 corresponds to a completely transparent color. This
+ * uses a wrapper message rather than a simple float scalar so that it is
+ * possible to distinguish between a default value and the value being unset.
+ * If omitted, this color object is to be rendered as a solid color
+ * (as if the alpha value had been explicitly given with a value of 1.0).
+ *
+ * This object should have the same structure as [FloatValue]{@link google.protobuf.FloatValue}
+ *
+ * @typedef Color
+ * @memberof google.type
+ * @see [google.type.Color definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/color.proto}
+ */
+const Color = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/doc/google/type/doc_latlng.js b/packages/google-cloud-vision/src/v1p4beta1/doc/google/type/doc_latlng.js
new file mode 100644
index 00000000000..37dca6bcf66
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/doc/google/type/doc_latlng.js
@@ -0,0 +1,71 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Note: this file is purely for documentation. Any contents are not expected
+// to be loaded as the JS file.
+
+/**
+ * An object representing a latitude/longitude pair. This is expressed as a pair
+ * of doubles representing degrees latitude and degrees longitude. Unless
+ * specified otherwise, this must conform to the
+ * WGS84
+ * standard. Values must be within normalized ranges.
+ *
+ * Example of normalization code in Python:
+ *
+ * def NormalizeLongitude(longitude):
+ * """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ * q, r = divmod(longitude, 360.0)
+ * if r > 180.0 or (r == 180.0 and q <= -1.0):
+ * return r - 360.0
+ * return r
+ *
+ * def NormalizeLatLng(latitude, longitude):
+ * """Wraps decimal degrees latitude and longitude to
+ * [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ * r = latitude % 360.0
+ * if r <= 90.0:
+ * return r, NormalizeLongitude(longitude)
+ * elif r >= 270.0:
+ * return r - 360, NormalizeLongitude(longitude)
+ * else:
+ * return 180 - r, NormalizeLongitude(longitude + 180.0)
+ *
+ * assert 180.0 == NormalizeLongitude(180.0)
+ * assert -180.0 == NormalizeLongitude(-180.0)
+ * assert -179.0 == NormalizeLongitude(181.0)
+ * assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ * assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ * assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ * assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ * assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ * assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ * assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ * assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ * assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ * assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ *
+ * @property {number} latitude
+ * The latitude in degrees. It must be in the range [-90.0, +90.0].
+ *
+ * @property {number} longitude
+ * The longitude in degrees. It must be in the range [-180.0, +180.0].
+ *
+ * @typedef LatLng
+ * @memberof google.type
+ * @see [google.type.LatLng definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/latlng.proto}
+ */
+const LatLng = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/google-cloud-vision/src/v1p4beta1/image_annotator_client.js b/packages/google-cloud-vision/src/v1p4beta1/image_annotator_client.js
new file mode 100644
index 00000000000..ce225754ba8
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/image_annotator_client.js
@@ -0,0 +1,544 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+'use strict';
+
+const gapicConfig = require('./image_annotator_client_config');
+const gax = require('google-gax');
+const merge = require('lodash.merge');
+const path = require('path');
+const protobuf = require('protobufjs');
+
+const VERSION = require('../../package.json').version;
+
+/**
+ * Service that performs Google Cloud Vision API detection tasks over client
+ * images, such as face, landmark, logo, label, and text detection. The
+ * ImageAnnotator service returns detected entities from the images.
+ *
+ * @class
+ * @memberof v1p4beta1
+ */
+class ImageAnnotatorClient {
+ /**
+ * Construct an instance of ImageAnnotatorClient.
+ *
+ * @param {object} [options] - The configuration object. See the subsequent
+ * parameters for more details.
+ * @param {object} [options.credentials] - Credentials object.
+ * @param {string} [options.credentials.client_email]
+ * @param {string} [options.credentials.private_key]
+ * @param {string} [options.email] - Account email address. Required when
+ * using a .pem or .p12 keyFilename.
+ * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or
+ * .p12 key downloaded from the Google Developers Console. If you provide
+ * a path to a JSON file, the projectId option below is not necessary.
+ * NOTE: .pem and .p12 require you to specify options.email as well.
+ * @param {number} [options.port] - The port on which to connect to
+ * the remote host.
+ * @param {string} [options.projectId] - The project ID from the Google
+ * Developer's Console, e.g. 'grape-spaceship-123'. We will also check
+ * the environment variable GCLOUD_PROJECT for your project ID. If your
+ * app is running in an environment which supports
+ * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials},
+ * your project ID will be detected automatically.
+ * @param {function} [options.promise] - Custom promise module to use instead
+ * of native Promises.
+ * @param {string} [options.servicePath] - The domain name of the
+ * API remote host.
+ */
+ constructor(opts) {
+ this._descriptors = {};
+
+ // Ensure that options include the service address and port.
+ opts = Object.assign(
+ {
+ clientConfig: {},
+ port: this.constructor.port,
+ servicePath: this.constructor.servicePath,
+ },
+ opts
+ );
+
+ // Create a `gaxGrpc` object, with any grpc-specific options
+ // sent to the client.
+ opts.scopes = this.constructor.scopes;
+ const gaxGrpc = new gax.GrpcClient(opts);
+
+ // Save the auth object to the client, for use by other methods.
+ this.auth = gaxGrpc.auth;
+
+ // Determine the client header string.
+ const clientHeader = [
+ `gl-node/${process.version}`,
+ `grpc/${gaxGrpc.grpcVersion}`,
+ `gax/${gax.version}`,
+ `gapic/${VERSION}`,
+ ];
+ if (opts.libName && opts.libVersion) {
+ clientHeader.push(`${opts.libName}/${opts.libVersion}`);
+ }
+
+ // Load the applicable protos.
+ const protos = merge(
+ {},
+ gaxGrpc.loadProto(
+ path.join(__dirname, '..', '..', 'protos'),
+ 'google/cloud/vision/v1p4beta1/image_annotator.proto'
+ )
+ );
+ let protoFilesRoot = new gax.GoogleProtoFilesRoot();
+ protoFilesRoot = protobuf.loadSync(
+ path.join(
+ __dirname,
+ '..',
+ '..',
+ 'protos',
+ 'google/cloud/vision/v1p4beta1/image_annotator.proto'
+ ),
+ protoFilesRoot
+ );
+
+ // This API contains "long-running operations", which return a
+ // an Operation object that allows for tracking of the operation,
+ // rather than holding a request open.
+ this.operationsClient = new gax.lro({
+ auth: gaxGrpc.auth,
+ grpc: gaxGrpc.grpc,
+ }).operationsClient(opts);
+
+ const asyncBatchAnnotateImagesResponse = protoFilesRoot.lookup(
+ 'google.cloud.vision.v1p4beta1.AsyncBatchAnnotateImagesResponse'
+ );
+ const asyncBatchAnnotateImagesMetadata = protoFilesRoot.lookup(
+ 'google.cloud.vision.v1p4beta1.OperationMetadata'
+ );
+ const asyncBatchAnnotateFilesResponse = protoFilesRoot.lookup(
+ 'google.cloud.vision.v1p4beta1.AsyncBatchAnnotateFilesResponse'
+ );
+ const asyncBatchAnnotateFilesMetadata = protoFilesRoot.lookup(
+ 'google.cloud.vision.v1p4beta1.OperationMetadata'
+ );
+
+ this._descriptors.longrunning = {
+ asyncBatchAnnotateImages: new gax.LongrunningDescriptor(
+ this.operationsClient,
+ asyncBatchAnnotateImagesResponse.decode.bind(
+ asyncBatchAnnotateImagesResponse
+ ),
+ asyncBatchAnnotateImagesMetadata.decode.bind(
+ asyncBatchAnnotateImagesMetadata
+ )
+ ),
+ asyncBatchAnnotateFiles: new gax.LongrunningDescriptor(
+ this.operationsClient,
+ asyncBatchAnnotateFilesResponse.decode.bind(
+ asyncBatchAnnotateFilesResponse
+ ),
+ asyncBatchAnnotateFilesMetadata.decode.bind(
+ asyncBatchAnnotateFilesMetadata
+ )
+ ),
+ };
+
+ // Put together the default options sent with requests.
+ const defaults = gaxGrpc.constructSettings(
+ 'google.cloud.vision.v1p4beta1.ImageAnnotator',
+ gapicConfig,
+ opts.clientConfig,
+ {'x-goog-api-client': clientHeader.join(' ')}
+ );
+
+ // Set up a dictionary of "inner API calls"; the core implementation
+ // of calling the API is handled in `google-gax`, with this code
+ // merely providing the destination and request information.
+ this._innerApiCalls = {};
+
+ // Put together the "service stub" for
+ // google.cloud.vision.v1p4beta1.ImageAnnotator.
+ const imageAnnotatorStub = gaxGrpc.createStub(
+ protos.google.cloud.vision.v1p4beta1.ImageAnnotator,
+ opts
+ );
+
+ // Iterate over each of the methods that the service provides
+ // and create an API call method for each.
+ const imageAnnotatorStubMethods = [
+ 'batchAnnotateImages',
+ 'batchAnnotateFiles',
+ 'asyncBatchAnnotateImages',
+ 'asyncBatchAnnotateFiles',
+ ];
+ for (const methodName of imageAnnotatorStubMethods) {
+ this._innerApiCalls[methodName] = gax.createApiCall(
+ imageAnnotatorStub.then(
+ stub =>
+ function() {
+ const args = Array.prototype.slice.call(arguments, 0);
+ return stub[methodName].apply(stub, args);
+ }
+ ),
+ defaults[methodName],
+ this._descriptors.longrunning[methodName]
+ );
+ }
+ }
+
+ /**
+ * The DNS address for this API service.
+ */
+ static get servicePath() {
+ return 'vision.googleapis.com';
+ }
+
+ /**
+ * The port for this API service.
+ */
+ static get port() {
+ return 443;
+ }
+
+ /**
+ * The scopes needed to make gRPC calls for every method defined
+ * in this service.
+ */
+ static get scopes() {
+ return [
+ 'https://www.googleapis.com/auth/cloud-platform',
+ 'https://www.googleapis.com/auth/cloud-vision',
+ ];
+ }
+
+ /**
+ * Return the project ID used by this class.
+ * @param {function(Error, string)} callback - the callback to
+ * be called with the current project Id.
+ */
+ getProjectId(callback) {
+ return this.auth.getProjectId(callback);
+ }
+
+ // -------------------
+ // -- Service calls --
+ // -------------------
+
+ /**
+ * Run image detection and annotation for a batch of images.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object[]} request.requests
+ * Individual image annotation requests for this batch.
+ *
+ * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1p4beta1.AnnotateImageRequest}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [BatchAnnotateImagesResponse]{@link google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [BatchAnnotateImagesResponse]{@link google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ImageAnnotatorClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const requests = [];
+ * client.batchAnnotateImages({requests: requests})
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ batchAnnotateImages(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.batchAnnotateImages(request, options, callback);
+ }
+
+ /**
+ * Service that performs image detection and annotation for a batch of files.
+ * Now only "application/pdf", "image/tiff" and "image/gif" are supported.
+ *
+ * This service will extract at most the first 10 frames (gif) or pages
+ * (pdf or tiff) from each file provided and perform detection and annotation
+ * for each image extracted.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object[]} request.requests
+ * The list of file annotation requests. Right now we support only one
+ * AnnotateFileRequest in BatchAnnotateFilesRequest.
+ *
+ * This object should have the same structure as [AnnotateFileRequest]{@link google.cloud.vision.v1p4beta1.AnnotateFileRequest}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [BatchAnnotateFilesResponse]{@link google.cloud.vision.v1p4beta1.BatchAnnotateFilesResponse}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [BatchAnnotateFilesResponse]{@link google.cloud.vision.v1p4beta1.BatchAnnotateFilesResponse}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ImageAnnotatorClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const requests = [];
+ * client.batchAnnotateFiles({requests: requests})
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ batchAnnotateFiles(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.batchAnnotateFiles(request, options, callback);
+ }
+
+ /**
+ * Run asynchronous image detection and annotation for a list of images.
+ *
+ * Progress and results can be retrieved through the
+ * `google.longrunning.Operations` interface.
+ * `Operation.metadata` contains `OperationMetadata` (metadata).
+ * `Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
+ *
+ * This service will write image annotation outputs to json files in customer
+ * GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object[]} request.requests
+ * Individual image annotation requests for this batch.
+ *
+ * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1p4beta1.AnnotateImageRequest}
+ * @param {Object} request.outputConfig
+ * Required. The desired output location and metadata (e.g. format).
+ *
+ * This object should have the same structure as [OutputConfig]{@link google.cloud.vision.v1p4beta1.OutputConfig}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ImageAnnotatorClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const requests = [];
+ * const outputConfig = {};
+ * const request = {
+ * requests: requests,
+ * outputConfig: outputConfig,
+ * };
+ *
+ * // Handle the operation using the promise pattern.
+ * client.asyncBatchAnnotateImages(request)
+ * .then(responses => {
+ * const [operation, initialApiResponse] = responses;
+ *
+ * // Operation#promise starts polling for the completion of the LRO.
+ * return operation.promise();
+ * })
+ * .then(responses => {
+ * const result = responses[0];
+ * const metadata = responses[1];
+ * const finalApiResponse = responses[2];
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * const requests = [];
+ * const outputConfig = {};
+ * const request = {
+ * requests: requests,
+ * outputConfig: outputConfig,
+ * };
+ *
+ * // Handle the operation using the event emitter pattern.
+ * client.asyncBatchAnnotateImages(request)
+ * .then(responses => {
+ * const [operation, initialApiResponse] = responses;
+ *
+ * // Adding a listener for the "complete" event starts polling for the
+ * // completion of the operation.
+ * operation.on('complete', (result, metadata, finalApiResponse) => {
+ * // doSomethingWith(result);
+ * });
+ *
+ * // Adding a listener for the "progress" event causes the callback to be
+ * // called on any change in metadata when the operation is polled.
+ * operation.on('progress', (metadata, apiResponse) => {
+ * // doSomethingWith(metadata)
+ * });
+ *
+ * // Adding a listener for the "error" event handles any errors found during polling.
+ * operation.on('error', err => {
+ * // throw(err);
+ * });
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ asyncBatchAnnotateImages(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.asyncBatchAnnotateImages(
+ request,
+ options,
+ callback
+ );
+ }
+
+ /**
+ * Run asynchronous image detection and annotation for a list of generic
+ * files, such as PDF files, which may contain multiple pages and multiple
+ * images per page. Progress and results can be retrieved through the
+ * `google.longrunning.Operations` interface.
+ * `Operation.metadata` contains `OperationMetadata` (metadata).
+ * `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object[]} request.requests
+ * Individual async file annotation requests for this batch.
+ *
+ * This object should have the same structure as [AsyncAnnotateFileRequest]{@link google.cloud.vision.v1p4beta1.AsyncAnnotateFileRequest}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ImageAnnotatorClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const requests = [];
+ *
+ * // Handle the operation using the promise pattern.
+ * client.asyncBatchAnnotateFiles({requests: requests})
+ * .then(responses => {
+ * const [operation, initialApiResponse] = responses;
+ *
+ * // Operation#promise starts polling for the completion of the LRO.
+ * return operation.promise();
+ * })
+ * .then(responses => {
+ * const result = responses[0];
+ * const metadata = responses[1];
+ * const finalApiResponse = responses[2];
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * const requests = [];
+ *
+ * // Handle the operation using the event emitter pattern.
+ * client.asyncBatchAnnotateFiles({requests: requests})
+ * .then(responses => {
+ * const [operation, initialApiResponse] = responses;
+ *
+ * // Adding a listener for the "complete" event starts polling for the
+ * // completion of the operation.
+ * operation.on('complete', (result, metadata, finalApiResponse) => {
+ * // doSomethingWith(result);
+ * });
+ *
+ * // Adding a listener for the "progress" event causes the callback to be
+ * // called on any change in metadata when the operation is polled.
+ * operation.on('progress', (metadata, apiResponse) => {
+ * // doSomethingWith(metadata)
+ * });
+ *
+ * // Adding a listener for the "error" event handles any errors found during polling.
+ * operation.on('error', err => {
+ * // throw(err);
+ * });
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ asyncBatchAnnotateFiles(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.asyncBatchAnnotateFiles(
+ request,
+ options,
+ callback
+ );
+ }
+}
+
+module.exports = ImageAnnotatorClient;
diff --git a/packages/google-cloud-vision/src/v1p4beta1/image_annotator_client_config.json b/packages/google-cloud-vision/src/v1p4beta1/image_annotator_client_config.json
new file mode 100644
index 00000000000..9666bd1f923
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/image_annotator_client_config.json
@@ -0,0 +1,46 @@
+{
+ "interfaces": {
+ "google.cloud.vision.v1p4beta1.ImageAnnotator": {
+ "retry_codes": {
+ "idempotent": [
+ "DEADLINE_EXCEEDED",
+ "UNAVAILABLE"
+ ],
+ "non_idempotent": []
+ },
+ "retry_params": {
+ "default": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 60000,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 60000,
+ "total_timeout_millis": 600000
+ }
+ },
+ "methods": {
+ "BatchAnnotateImages": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "BatchAnnotateFiles": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "AsyncBatchAnnotateImages": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "AsyncBatchAnnotateFiles": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ }
+ }
+ }
+ }
+}
diff --git a/packages/google-cloud-vision/src/v1p4beta1/index.js b/packages/google-cloud-vision/src/v1p4beta1/index.js
new file mode 100644
index 00000000000..8a46227b562
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/index.js
@@ -0,0 +1,21 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+'use strict';
+
+const ProductSearchClient = require('./product_search_client');
+const ImageAnnotatorClient = require('./image_annotator_client');
+
+module.exports.ProductSearchClient = ProductSearchClient;
+module.exports.ImageAnnotatorClient = ImageAnnotatorClient;
diff --git a/packages/google-cloud-vision/src/v1p4beta1/product_search_client.js b/packages/google-cloud-vision/src/v1p4beta1/product_search_client.js
new file mode 100644
index 00000000000..c57fdc86a5b
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/product_search_client.js
@@ -0,0 +1,2050 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+'use strict';
+
+const gapicConfig = require('./product_search_client_config');
+const gax = require('google-gax');
+const merge = require('lodash.merge');
+const path = require('path');
+const protobuf = require('protobufjs');
+
+const VERSION = require('../../package.json').version;
+
+/**
+ * Manages Products and ProductSets of reference images for use in product
+ * search. It uses the following resource model:
+ *
+ * - The API has a collection of ProductSet resources, named
+ * `projects/* /locations/* /productSets/*`, which acts as a way to put different
+ * products into groups to limit identification.
+ *
+ * In parallel,
+ *
+ * - The API has a collection of Product resources, named
+ * `projects/* /locations/* /products/*`
+ *
+ * - Each Product has a collection of ReferenceImage resources, named
+ * `projects/* /locations/* /products/* /referenceImages/*`
+ *
+ * @class
+ * @memberof v1p4beta1
+ */
+class ProductSearchClient {
+ /**
+ * Construct an instance of ProductSearchClient.
+ *
+ * @param {object} [options] - The configuration object. See the subsequent
+ * parameters for more details.
+ * @param {object} [options.credentials] - Credentials object.
+ * @param {string} [options.credentials.client_email]
+ * @param {string} [options.credentials.private_key]
+ * @param {string} [options.email] - Account email address. Required when
+ * using a .pem or .p12 keyFilename.
+ * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or
+ * .p12 key downloaded from the Google Developers Console. If you provide
+ * a path to a JSON file, the projectId option below is not necessary.
+ * NOTE: .pem and .p12 require you to specify options.email as well.
+ * @param {number} [options.port] - The port on which to connect to
+ * the remote host.
+ * @param {string} [options.projectId] - The project ID from the Google
+ * Developer's Console, e.g. 'grape-spaceship-123'. We will also check
+ * the environment variable GCLOUD_PROJECT for your project ID. If your
+ * app is running in an environment which supports
+ * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials},
+ * your project ID will be detected automatically.
+ * @param {function} [options.promise] - Custom promise module to use instead
+ * of native Promises.
+ * @param {string} [options.servicePath] - The domain name of the
+ * API remote host.
+ */
+ constructor(opts) {
+ this._descriptors = {};
+
+ // Ensure that options include the service address and port.
+ opts = Object.assign(
+ {
+ clientConfig: {},
+ port: this.constructor.port,
+ servicePath: this.constructor.servicePath,
+ },
+ opts
+ );
+
+ // Create a `gaxGrpc` object, with any grpc-specific options
+ // sent to the client.
+ opts.scopes = this.constructor.scopes;
+ const gaxGrpc = new gax.GrpcClient(opts);
+
+ // Save the auth object to the client, for use by other methods.
+ this.auth = gaxGrpc.auth;
+
+ // Determine the client header string.
+ const clientHeader = [
+ `gl-node/${process.version}`,
+ `grpc/${gaxGrpc.grpcVersion}`,
+ `gax/${gax.version}`,
+ `gapic/${VERSION}`,
+ ];
+ if (opts.libName && opts.libVersion) {
+ clientHeader.push(`${opts.libName}/${opts.libVersion}`);
+ }
+
+ // Load the applicable protos.
+ const protos = merge(
+ {},
+ gaxGrpc.loadProto(
+ path.join(__dirname, '..', '..', 'protos'),
+ 'google/cloud/vision/v1p4beta1/product_search_service.proto'
+ )
+ );
+
+ // This API contains "path templates"; forward-slash-separated
+ // identifiers to uniquely identify resources within the API.
+ // Create useful helper objects for these.
+ this._pathTemplates = {
+ locationPathTemplate: new gax.PathTemplate(
+ 'projects/{project}/locations/{location}'
+ ),
+ productSetPathTemplate: new gax.PathTemplate(
+ 'projects/{project}/locations/{location}/productSets/{product_set}'
+ ),
+ productPathTemplate: new gax.PathTemplate(
+ 'projects/{project}/locations/{location}/products/{product}'
+ ),
+ referenceImagePathTemplate: new gax.PathTemplate(
+ 'projects/{project}/locations/{location}/products/{product}/referenceImages/{reference_image}'
+ ),
+ };
+
+ // Some of the methods on this service return "paged" results,
+ // (e.g. 50 results at a time, with tokens to get subsequent
+ // pages). Denote the keys used for pagination and results.
+ this._descriptors.page = {
+ listProductSets: new gax.PageDescriptor(
+ 'pageToken',
+ 'nextPageToken',
+ 'productSets'
+ ),
+ listProducts: new gax.PageDescriptor(
+ 'pageToken',
+ 'nextPageToken',
+ 'products'
+ ),
+ listReferenceImages: new gax.PageDescriptor(
+ 'pageToken',
+ 'nextPageToken',
+ 'referenceImages'
+ ),
+ listProductsInProductSet: new gax.PageDescriptor(
+ 'pageToken',
+ 'nextPageToken',
+ 'products'
+ ),
+ };
+ let protoFilesRoot = new gax.GoogleProtoFilesRoot();
+ protoFilesRoot = protobuf.loadSync(
+ path.join(
+ __dirname,
+ '..',
+ '..',
+ 'protos',
+ 'google/cloud/vision/v1p4beta1/product_search_service.proto'
+ ),
+ protoFilesRoot
+ );
+
+ // This API contains "long-running operations", which return a
+ // an Operation object that allows for tracking of the operation,
+ // rather than holding a request open.
+ this.operationsClient = new gax.lro({
+ auth: gaxGrpc.auth,
+ grpc: gaxGrpc.grpc,
+ }).operationsClient(opts);
+
+ const importProductSetsResponse = protoFilesRoot.lookup(
+ 'google.cloud.vision.v1p4beta1.ImportProductSetsResponse'
+ );
+ const importProductSetsMetadata = protoFilesRoot.lookup(
+ 'google.cloud.vision.v1p4beta1.BatchOperationMetadata'
+ );
+
+ this._descriptors.longrunning = {
+ importProductSets: new gax.LongrunningDescriptor(
+ this.operationsClient,
+ importProductSetsResponse.decode.bind(importProductSetsResponse),
+ importProductSetsMetadata.decode.bind(importProductSetsMetadata)
+ ),
+ };
+
+ // Put together the default options sent with requests.
+ const defaults = gaxGrpc.constructSettings(
+ 'google.cloud.vision.v1p4beta1.ProductSearch',
+ gapicConfig,
+ opts.clientConfig,
+ {'x-goog-api-client': clientHeader.join(' ')}
+ );
+
+ // Set up a dictionary of "inner API calls"; the core implementation
+ // of calling the API is handled in `google-gax`, with this code
+ // merely providing the destination and request information.
+ this._innerApiCalls = {};
+
+ // Put together the "service stub" for
+ // google.cloud.vision.v1p4beta1.ProductSearch.
+ const productSearchStub = gaxGrpc.createStub(
+ protos.google.cloud.vision.v1p4beta1.ProductSearch,
+ opts
+ );
+
+ // Iterate over each of the methods that the service provides
+ // and create an API call method for each.
+ const productSearchStubMethods = [
+ 'createProductSet',
+ 'listProductSets',
+ 'getProductSet',
+ 'updateProductSet',
+ 'deleteProductSet',
+ 'createProduct',
+ 'listProducts',
+ 'getProduct',
+ 'updateProduct',
+ 'deleteProduct',
+ 'createReferenceImage',
+ 'deleteReferenceImage',
+ 'listReferenceImages',
+ 'getReferenceImage',
+ 'addProductToProductSet',
+ 'removeProductFromProductSet',
+ 'listProductsInProductSet',
+ 'importProductSets',
+ ];
+ for (const methodName of productSearchStubMethods) {
+ this._innerApiCalls[methodName] = gax.createApiCall(
+ productSearchStub.then(
+ stub =>
+ function() {
+ const args = Array.prototype.slice.call(arguments, 0);
+ return stub[methodName].apply(stub, args);
+ }
+ ),
+ defaults[methodName],
+ this._descriptors.page[methodName] ||
+ this._descriptors.longrunning[methodName]
+ );
+ }
+ }
+
+ /**
+ * The DNS address for this API service.
+ */
+ static get servicePath() {
+ return 'vision.googleapis.com';
+ }
+
+ /**
+ * The port for this API service.
+ */
+ static get port() {
+ return 443;
+ }
+
+ /**
+ * The scopes needed to make gRPC calls for every method defined
+ * in this service.
+ */
+ static get scopes() {
+ return [
+ 'https://www.googleapis.com/auth/cloud-platform',
+ 'https://www.googleapis.com/auth/cloud-vision',
+ ];
+ }
+
+ /**
+ * Return the project ID used by this class.
+ * @param {function(Error, string)} callback - the callback to
+ * be called with the current project Id.
+ */
+ getProjectId(callback) {
+ return this.auth.getProjectId(callback);
+ }
+
+ // -------------------
+ // -- Service calls --
+ // -------------------
+
+ /**
+ * Creates and returns a new ProductSet resource.
+ *
+ * Possible errors:
+ *
+ * * Returns INVALID_ARGUMENT if display_name is missing, or is longer than
+ * 4096 characters.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project in which the ProductSet should be created.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ * @param {Object} request.productSet
+ * The ProductSet to create.
+ *
+ * This object should have the same structure as [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}
+ * @param {string} request.productSetId
+ * A user-supplied resource id for this ProductSet. If set, the server will
+ * attempt to use this value as the resource id. If it is already in use, an
+ * error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ * long. It cannot contain the character `/`.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ * const productSet = {};
+ * const productSetId = '';
+ * const request = {
+ * parent: formattedParent,
+ * productSet: productSet,
+ * productSetId: productSetId,
+ * };
+ * client.createProductSet(request)
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ createProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.createProductSet(request, options, callback);
+ }
+
+ /**
+ * Lists ProductSets in an unspecified order.
+ *
+ * Possible errors:
+ *
+ * * Returns INVALID_ARGUMENT if page_size is greater than 100, or less
+ * than 1.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project from which ProductSets should be listed.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Array, ?Object, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is Array of [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ *
+ * When autoPaginate: false is specified through options, it contains the result
+ * in a single response. If the response indicates the next page exists, the third
+ * parameter is set to be used for the next request object. The fourth parameter keeps
+ * the raw response object of an object representing [ListProductSetsResponse]{@link google.cloud.vision.v1p4beta1.ListProductSetsResponse}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is Array of [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ *
+ * When autoPaginate: false is specified through options, the array has three elements.
+ * The first element is Array of [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet} in a single response.
+ * The second element is the next request object if the response
+ * indicates the next page exists, or null. The third element is
+ * an object representing [ListProductSetsResponse]{@link google.cloud.vision.v1p4beta1.ListProductSetsResponse}.
+ *
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * // Iterate over all elements.
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ *
+ * client.listProductSets({parent: formattedParent})
+ * .then(responses => {
+ * const resources = responses[0];
+ * for (const resource of resources) {
+ * // doThingsWith(resource)
+ * }
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * // Or obtain the paged response.
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ *
+ *
+ * const options = {autoPaginate: false};
+ * const callback = responses => {
+ * // The actual resources in a response.
+ * const resources = responses[0];
+ * // The next request if the response shows that there are more responses.
+ * const nextRequest = responses[1];
+ * // The actual response object, if necessary.
+ * // const rawResponse = responses[2];
+ * for (const resource of resources) {
+ * // doThingsWith(resource);
+ * }
+ * if (nextRequest) {
+ * // Fetch the next page.
+ * return client.listProductSets(nextRequest, options).then(callback);
+ * }
+ * }
+ * client.listProductSets({parent: formattedParent}, options)
+ * .then(callback)
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ listProductSets(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.listProductSets(request, options, callback);
+ }
+
+ /**
+ * Equivalent to {@link listProductSets}, but returns a NodeJS Stream object.
+ *
+ * This fetches the paged responses for {@link listProductSets} continuously
+ * and invokes the callback registered for 'data' event for each element in the
+ * responses.
+ *
+ * The returned object has 'end' method when no more elements are required.
+ *
+ * autoPaginate option will be ignored.
+ *
+ * @see {@link https://nodejs.org/api/stream.html}
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project from which ProductSets should be listed.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @returns {Stream}
+ * An object stream which emits an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet} on 'data' event.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ * client.listProductSetsStream({parent: formattedParent})
+ * .on('data', element => {
+ * // doThingsWith(element)
+ * }).on('error', err => {
+ * console.log(err);
+ * });
+ */
+ listProductSetsStream(request, options) {
+ options = options || {};
+
+ return this._descriptors.page.listProductSets.createStream(
+ this._innerApiCalls.listProductSets,
+ request,
+ options
+ );
+ }
+
+ /**
+ * Gets information associated with a ProductSet.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the ProductSet does not exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * Resource name of the ProductSet to get.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ * client.getProductSet({name: formattedName})
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ getProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.getProductSet(request, options, callback);
+ }
+
+ /**
+ * Makes changes to a ProductSet resource.
+ * Only display_name can be updated currently.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the ProductSet does not exist.
+ * * Returns INVALID_ARGUMENT if display_name is present in update_mask but
+ * missing from the request or longer than 4096 characters.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object} request.productSet
+ * The ProductSet resource which replaces the one on the server.
+ *
+ * This object should have the same structure as [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}
+ * @param {Object} request.updateMask
+ * The FieldMask that specifies which fields to
+ * update.
+ * If update_mask isn't specified, all mutable fields are to be updated.
+ * Valid mask path is `display_name`.
+ *
+ * This object should have the same structure as [FieldMask]{@link google.protobuf.FieldMask}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [ProductSet]{@link google.cloud.vision.v1p4beta1.ProductSet}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const productSet = {};
+ * const updateMask = {};
+ * const request = {
+ * productSet: productSet,
+ * updateMask: updateMask,
+ * };
+ * client.updateProductSet(request)
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ updateProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.updateProductSet(request, options, callback);
+ }
+
+ /**
+ * Permanently deletes a ProductSet. Products and ReferenceImages in the
+ * ProductSet are not deleted.
+ *
+ * The actual image files are not deleted from Google Cloud Storage.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the ProductSet does not exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * Resource name of the ProductSet to delete.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error)} [callback]
+ * The function which will be called with the result of the API call.
+ * @returns {Promise} - The promise which resolves when API call finishes.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ * client.deleteProductSet({name: formattedName}).catch(err => {
+ * console.error(err);
+ * });
+ */
+ deleteProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.deleteProductSet(request, options, callback);
+ }
+
+ /**
+ * Creates and returns a new product resource.
+ *
+ * Possible errors:
+ *
+ * * Returns INVALID_ARGUMENT if display_name is missing or longer than 4096
+ * characters.
+ * * Returns INVALID_ARGUMENT if description is longer than 4096 characters.
+ * * Returns INVALID_ARGUMENT if product_category is missing or invalid.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project in which the Product should be created.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID`.
+ * @param {Object} request.product
+ * The product to create.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ * @param {string} request.productId
+ * A user-supplied resource id for this Product. If set, the server will
+ * attempt to use this value as the resource id. If it is already in use, an
+ * error is returned with code ALREADY_EXISTS. Must be at most 128 characters
+ * long. It cannot contain the character `/`.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ * const product = {};
+ * const productId = '';
+ * const request = {
+ * parent: formattedParent,
+ * product: product,
+ * productId: productId,
+ * };
+ * client.createProduct(request)
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ createProduct(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.createProduct(request, options, callback);
+ }
+
+ /**
+ * Lists products in an unspecified order.
+ *
+ * Possible errors:
+ *
+ * * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project OR ProductSet from which Products should be listed.
+ *
+ * Format:
+ * `projects/PROJECT_ID/locations/LOC_ID`
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Array, ?Object, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is Array of [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ *
+ * When autoPaginate: false is specified through options, it contains the result
+ * in a single response. If the response indicates the next page exists, the third
+ * parameter is set to be used for the next request object. The fourth parameter keeps
+ * the raw response object of an object representing [ListProductsResponse]{@link google.cloud.vision.v1p4beta1.ListProductsResponse}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is Array of [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ *
+ * When autoPaginate: false is specified through options, the array has three elements.
+ * The first element is Array of [Product]{@link google.cloud.vision.v1p4beta1.Product} in a single response.
+ * The second element is the next request object if the response
+ * indicates the next page exists, or null. The third element is
+ * an object representing [ListProductsResponse]{@link google.cloud.vision.v1p4beta1.ListProductsResponse}.
+ *
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * // Iterate over all elements.
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ *
+ * client.listProducts({parent: formattedParent})
+ * .then(responses => {
+ * const resources = responses[0];
+ * for (const resource of resources) {
+ * // doThingsWith(resource)
+ * }
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * // Or obtain the paged response.
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ *
+ *
+ * const options = {autoPaginate: false};
+ * const callback = responses => {
+ * // The actual resources in a response.
+ * const resources = responses[0];
+ * // The next request if the response shows that there are more responses.
+ * const nextRequest = responses[1];
+ * // The actual response object, if necessary.
+ * // const rawResponse = responses[2];
+ * for (const resource of resources) {
+ * // doThingsWith(resource);
+ * }
+ * if (nextRequest) {
+ * // Fetch the next page.
+ * return client.listProducts(nextRequest, options).then(callback);
+ * }
+ * }
+ * client.listProducts({parent: formattedParent}, options)
+ * .then(callback)
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ listProducts(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.listProducts(request, options, callback);
+ }
+
+ /**
+ * Equivalent to {@link listProducts}, but returns a NodeJS Stream object.
+ *
+ * This fetches the paged responses for {@link listProducts} continuously
+ * and invokes the callback registered for 'data' event for each element in the
+ * responses.
+ *
+ * The returned object has 'end' method when no more elements are required.
+ *
+ * autoPaginate option will be ignored.
+ *
+ * @see {@link https://nodejs.org/api/stream.html}
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project OR ProductSet from which Products should be listed.
+ *
+ * Format:
+ * `projects/PROJECT_ID/locations/LOC_ID`
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @returns {Stream}
+ * An object stream which emits an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product} on 'data' event.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ * client.listProductsStream({parent: formattedParent})
+ * .on('data', element => {
+ * // doThingsWith(element)
+ * }).on('error', err => {
+ * console.log(err);
+ * });
+ */
+ listProductsStream(request, options) {
+ options = options || {};
+
+ return this._descriptors.page.listProducts.createStream(
+ this._innerApiCalls.listProducts,
+ request,
+ options
+ );
+ }
+
+ /**
+ * Gets information associated with a Product.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the Product does not exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * Resource name of the Product to get.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productPath('[PROJECT]', '[LOCATION]', '[PRODUCT]');
+ * client.getProduct({name: formattedName})
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ getProduct(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.getProduct(request, options, callback);
+ }
+
+ /**
+ * Makes changes to a Product resource.
+ * Only the `display_name`, `description`, and `labels` fields can be updated
+ * right now.
+ *
+ * If labels are updated, the change will not be reflected in queries until
+ * the next index time.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the Product does not exist.
+ * * Returns INVALID_ARGUMENT if display_name is present in update_mask but is
+ * missing from the request or longer than 4096 characters.
+ * * Returns INVALID_ARGUMENT if description is present in update_mask but is
+ * longer than 4096 characters.
+ * * Returns INVALID_ARGUMENT if product_category is present in update_mask.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object} request.product
+ * The Product resource which replaces the one on the server.
+ * product.name is immutable.
+ *
+ * This object should have the same structure as [Product]{@link google.cloud.vision.v1p4beta1.Product}
+ * @param {Object} request.updateMask
+ * The FieldMask that specifies which fields
+ * to update.
+ * If update_mask isn't specified, all mutable fields are to be updated.
+ * Valid mask paths include `product_labels`, `display_name`, and
+ * `description`.
+ *
+ * This object should have the same structure as [FieldMask]{@link google.protobuf.FieldMask}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const product = {};
+ * const updateMask = {};
+ * const request = {
+ * product: product,
+ * updateMask: updateMask,
+ * };
+ * client.updateProduct(request)
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ updateProduct(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.updateProduct(request, options, callback);
+ }
+
+ /**
+ * Permanently deletes a product and its reference images.
+ *
+ * Metadata of the product and all its images will be deleted right away, but
+ * search queries against ProductSets containing the product may still work
+ * until all related caches are refreshed.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the product does not exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * Resource name of product to delete.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error)} [callback]
+ * The function which will be called with the result of the API call.
+ * @returns {Promise} - The promise which resolves when API call finishes.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productPath('[PROJECT]', '[LOCATION]', '[PRODUCT]');
+ * client.deleteProduct({name: formattedName}).catch(err => {
+ * console.error(err);
+ * });
+ */
+ deleteProduct(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.deleteProduct(request, options, callback);
+ }
+
+ /**
+ * Creates and returns a new ReferenceImage resource.
+ *
+ * The `bounding_poly` field is optional. If `bounding_poly` is not specified,
+ * the system will try to detect regions of interest in the image that are
+ * compatible with the product_category on the parent product. If it is
+ * specified, detection is ALWAYS skipped. The system converts polygons into
+ * non-rotated rectangles.
+ *
+ * Note that the pipeline will resize the image if the image resolution is too
+ * large to process (above 50MP).
+ *
+ * Possible errors:
+ *
+ * * Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096
+ * characters.
+ * * Returns INVALID_ARGUMENT if the product does not exist.
+ * * Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing
+ * compatible with the parent product's product_category is detected.
+ * * Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * Resource name of the product in which to create the reference image.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ * @param {Object} request.referenceImage
+ * The reference image to create.
+ * If an image ID is specified, it is ignored.
+ *
+ * This object should have the same structure as [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}
+ * @param {string} request.referenceImageId
+ * A user-supplied resource id for the ReferenceImage to be added. If set,
+ * the server will attempt to use this value as the resource id. If it is
+ * already in use, an error is returned with code ALREADY_EXISTS. Must be at
+ * most 128 characters long. It cannot contain the character `/`.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.productPath('[PROJECT]', '[LOCATION]', '[PRODUCT]');
+ * const referenceImage = {};
+ * const referenceImageId = '';
+ * const request = {
+ * parent: formattedParent,
+ * referenceImage: referenceImage,
+ * referenceImageId: referenceImageId,
+ * };
+ * client.createReferenceImage(request)
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ createReferenceImage(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.createReferenceImage(request, options, callback);
+ }
+
+ /**
+ * Permanently deletes a reference image.
+ *
+ * The image metadata will be deleted right away, but search queries
+ * against ProductSets containing the image may still work until all related
+ * caches are refreshed.
+ *
+ * The actual image files are not deleted from Google Cloud Storage.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the reference image does not exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * The resource name of the reference image to delete.
+ *
+ * Format is:
+ *
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error)} [callback]
+ * The function which will be called with the result of the API call.
+ * @returns {Promise} - The promise which resolves when API call finishes.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.referenceImagePath('[PROJECT]', '[LOCATION]', '[PRODUCT]', '[REFERENCE_IMAGE]');
+ * client.deleteReferenceImage({name: formattedName}).catch(err => {
+ * console.error(err);
+ * });
+ */
+ deleteReferenceImage(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.deleteReferenceImage(request, options, callback);
+ }
+
+ /**
+ * Lists reference images.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the parent product does not exist.
+ * * Returns INVALID_ARGUMENT if the page_size is greater than 100, or less
+ * than 1.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * Resource name of the product containing the reference images.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Array, ?Object, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is Array of [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}.
+ *
+ * When autoPaginate: false is specified through options, it contains the result
+ * in a single response. If the response indicates the next page exists, the third
+ * parameter is set to be used for the next request object. The fourth parameter keeps
+ * the raw response object of an object representing [ListReferenceImagesResponse]{@link google.cloud.vision.v1p4beta1.ListReferenceImagesResponse}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is Array of [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}.
+ *
+ * When autoPaginate: false is specified through options, the array has three elements.
+ * The first element is Array of [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage} in a single response.
+ * The second element is the next request object if the response
+ * indicates the next page exists, or null. The third element is
+ * an object representing [ListReferenceImagesResponse]{@link google.cloud.vision.v1p4beta1.ListReferenceImagesResponse}.
+ *
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * // Iterate over all elements.
+ * const formattedParent = client.productPath('[PROJECT]', '[LOCATION]', '[PRODUCT]');
+ *
+ * client.listReferenceImages({parent: formattedParent})
+ * .then(responses => {
+ * const resources = responses[0];
+ * for (const resource of resources) {
+ * // doThingsWith(resource)
+ * }
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * // Or obtain the paged response.
+ * const formattedParent = client.productPath('[PROJECT]', '[LOCATION]', '[PRODUCT]');
+ *
+ *
+ * const options = {autoPaginate: false};
+ * const callback = responses => {
+ * // The actual resources in a response.
+ * const resources = responses[0];
+ * // The next request if the response shows that there are more responses.
+ * const nextRequest = responses[1];
+ * // The actual response object, if necessary.
+ * // const rawResponse = responses[2];
+ * for (const resource of resources) {
+ * // doThingsWith(resource);
+ * }
+ * if (nextRequest) {
+ * // Fetch the next page.
+ * return client.listReferenceImages(nextRequest, options).then(callback);
+ * }
+ * }
+ * client.listReferenceImages({parent: formattedParent}, options)
+ * .then(callback)
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ listReferenceImages(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.listReferenceImages(request, options, callback);
+ }
+
+ /**
+ * Equivalent to {@link listReferenceImages}, but returns a NodeJS Stream object.
+ *
+ * This fetches the paged responses for {@link listReferenceImages} continuously
+ * and invokes the callback registered for 'data' event for each element in the
+ * responses.
+ *
+ * The returned object has 'end' method when no more elements are required.
+ *
+ * autoPaginate option will be ignored.
+ *
+ * @see {@link https://nodejs.org/api/stream.html}
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * Resource name of the product containing the reference images.
+ *
+ * Format is
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @returns {Stream}
+ * An object stream which emits an object representing [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage} on 'data' event.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.productPath('[PROJECT]', '[LOCATION]', '[PRODUCT]');
+ * client.listReferenceImagesStream({parent: formattedParent})
+ * .on('data', element => {
+ * // doThingsWith(element)
+ * }).on('error', err => {
+ * console.log(err);
+ * });
+ */
+ listReferenceImagesStream(request, options) {
+ options = options || {};
+
+ return this._descriptors.page.listReferenceImages.createStream(
+ this._innerApiCalls.listReferenceImages,
+ request,
+ options
+ );
+ }
+
+ /**
+ * Gets information associated with a ReferenceImage.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the specified image does not exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * The resource name of the ReferenceImage to get.
+ *
+ * Format is:
+ *
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [ReferenceImage]{@link google.cloud.vision.v1p4beta1.ReferenceImage}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.referenceImagePath('[PROJECT]', '[LOCATION]', '[PRODUCT]', '[REFERENCE_IMAGE]');
+ * client.getReferenceImage({name: formattedName})
+ * .then(responses => {
+ * const response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ getReferenceImage(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.getReferenceImage(request, options, callback);
+ }
+
+ /**
+ * Adds a Product to the specified ProductSet. If the Product is already
+ * present, no change is made.
+ *
+ * One Product can be added to at most 100 ProductSets.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND if the Product or the ProductSet doesn't exist.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * The resource name for the ProductSet to modify.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ * @param {string} request.product
+ * The resource name for the Product to be added to this ProductSet.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error)} [callback]
+ * The function which will be called with the result of the API call.
+ * @returns {Promise} - The promise which resolves when API call finishes.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ * const product = '';
+ * const request = {
+ * name: formattedName,
+ * product: product,
+ * };
+ * client.addProductToProductSet(request).catch(err => {
+ * console.error(err);
+ * });
+ */
+ addProductToProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.addProductToProductSet(
+ request,
+ options,
+ callback
+ );
+ }
+
+ /**
+ * Removes a Product from the specified ProductSet.
+ *
+ * Possible errors:
+ *
+ * * Returns NOT_FOUND If the Product is not found under the ProductSet.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * The resource name for the ProductSet to modify.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ * @param {string} request.product
+ * The resource name for the Product to be removed from this ProductSet.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error)} [callback]
+ * The function which will be called with the result of the API call.
+ * @returns {Promise} - The promise which resolves when API call finishes.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ * const product = '';
+ * const request = {
+ * name: formattedName,
+ * product: product,
+ * };
+ * client.removeProductFromProductSet(request).catch(err => {
+ * console.error(err);
+ * });
+ */
+ removeProductFromProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.removeProductFromProductSet(
+ request,
+ options,
+ callback
+ );
+ }
+
+ /**
+ * Lists the Products in a ProductSet, in an unspecified order. If the
+ * ProductSet does not exist, the products field of the response will be
+ * empty.
+ *
+ * Possible errors:
+ *
+ * * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * The ProductSet resource for which to retrieve Products.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Array, ?Object, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is Array of [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ *
+ * When autoPaginate: false is specified through options, it contains the result
+ * in a single response. If the response indicates the next page exists, the third
+ * parameter is set to be used for the next request object. The fourth parameter keeps
+ * the raw response object of an object representing [ListProductsInProductSetResponse]{@link google.cloud.vision.v1p4beta1.ListProductsInProductSetResponse}.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is Array of [Product]{@link google.cloud.vision.v1p4beta1.Product}.
+ *
+ * When autoPaginate: false is specified through options, the array has three elements.
+ * The first element is Array of [Product]{@link google.cloud.vision.v1p4beta1.Product} in a single response.
+ * The second element is the next request object if the response
+ * indicates the next page exists, or null. The third element is
+ * an object representing [ListProductsInProductSetResponse]{@link google.cloud.vision.v1p4beta1.ListProductsInProductSetResponse}.
+ *
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * // Iterate over all elements.
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ *
+ * client.listProductsInProductSet({name: formattedName})
+ * .then(responses => {
+ * const resources = responses[0];
+ * for (const resource of resources) {
+ * // doThingsWith(resource)
+ * }
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * // Or obtain the paged response.
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ *
+ *
+ * const options = {autoPaginate: false};
+ * const callback = responses => {
+ * // The actual resources in a response.
+ * const resources = responses[0];
+ * // The next request if the response shows that there are more responses.
+ * const nextRequest = responses[1];
+ * // The actual response object, if necessary.
+ * // const rawResponse = responses[2];
+ * for (const resource of resources) {
+ * // doThingsWith(resource);
+ * }
+ * if (nextRequest) {
+ * // Fetch the next page.
+ * return client.listProductsInProductSet(nextRequest, options).then(callback);
+ * }
+ * }
+ * client.listProductsInProductSet({name: formattedName}, options)
+ * .then(callback)
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ listProductsInProductSet(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.listProductsInProductSet(
+ request,
+ options,
+ callback
+ );
+ }
+
+ /**
+ * Equivalent to {@link listProductsInProductSet}, but returns a NodeJS Stream object.
+ *
+ * This fetches the paged responses for {@link listProductsInProductSet} continuously
+ * and invokes the callback registered for 'data' event for each element in the
+ * responses.
+ *
+ * The returned object has 'end' method when no more elements are required.
+ *
+ * autoPaginate option will be ignored.
+ *
+ * @see {@link https://nodejs.org/api/stream.html}
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.name
+ * The ProductSet resource for which to retrieve Products.
+ *
+ * Format is:
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`
+ * @param {number} [request.pageSize]
+ * The maximum number of resources contained in the underlying API
+ * response. If page streaming is performed per-resource, this
+ * parameter does not affect the return value. If page streaming is
+ * performed per-page, this determines the maximum number of
+ * resources in a page.
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @returns {Stream}
+ * An object stream which emits an object representing [Product]{@link google.cloud.vision.v1p4beta1.Product} on 'data' event.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedName = client.productSetPath('[PROJECT]', '[LOCATION]', '[PRODUCT_SET]');
+ * client.listProductsInProductSetStream({name: formattedName})
+ * .on('data', element => {
+ * // doThingsWith(element)
+ * }).on('error', err => {
+ * console.log(err);
+ * });
+ */
+ listProductsInProductSetStream(request, options) {
+ options = options || {};
+
+ return this._descriptors.page.listProductsInProductSet.createStream(
+ this._innerApiCalls.listProductsInProductSet,
+ request,
+ options
+ );
+ }
+
+ /**
+ * Asynchronous API that imports a list of reference images to specified
+ * product sets based on a list of image information.
+ *
+ * The google.longrunning.Operation API can be used to keep track of the
+ * progress and results of the request.
+ * `Operation.metadata` contains `BatchOperationMetadata`. (progress)
+ * `Operation.response` contains `ImportProductSetsResponse`. (results)
+ *
+ * The input source of this method is a csv file on Google Cloud Storage.
+ * For the format of the csv file please see
+ * ImportProductSetsGcsSource.csv_file_uri.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {string} request.parent
+ * The project in which the ProductSets should be imported.
+ *
+ * Format is `projects/PROJECT_ID/locations/LOC_ID`.
+ * @param {Object} request.inputConfig
+ * The input content for the list of requests.
+ *
+ * This object should have the same structure as [ImportProductSetsInputConfig]{@link google.cloud.vision.v1p4beta1.ImportProductSetsInputConfig}
+ * @param {Object} [options]
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)} [callback]
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object.
+ * @returns {Promise} - The promise which resolves to an array.
+ * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * const vision = require('vision.v1p4beta1');
+ *
+ * const client = new vision.v1p4beta1.ProductSearchClient({
+ * // optional auth parameters.
+ * });
+ *
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ * const inputConfig = {};
+ * const request = {
+ * parent: formattedParent,
+ * inputConfig: inputConfig,
+ * };
+ *
+ * // Handle the operation using the promise pattern.
+ * client.importProductSets(request)
+ * .then(responses => {
+ * const [operation, initialApiResponse] = responses;
+ *
+ * // Operation#promise starts polling for the completion of the LRO.
+ * return operation.promise();
+ * })
+ * .then(responses => {
+ * const result = responses[0];
+ * const metadata = responses[1];
+ * const finalApiResponse = responses[2];
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ *
+ * const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ * const inputConfig = {};
+ * const request = {
+ * parent: formattedParent,
+ * inputConfig: inputConfig,
+ * };
+ *
+ * // Handle the operation using the event emitter pattern.
+ * client.importProductSets(request)
+ * .then(responses => {
+ * const [operation, initialApiResponse] = responses;
+ *
+ * // Adding a listener for the "complete" event starts polling for the
+ * // completion of the operation.
+ * operation.on('complete', (result, metadata, finalApiResponse) => {
+ * // doSomethingWith(result);
+ * });
+ *
+ * // Adding a listener for the "progress" event causes the callback to be
+ * // called on any change in metadata when the operation is polled.
+ * operation.on('progress', (metadata, apiResponse) => {
+ * // doSomethingWith(metadata)
+ * });
+ *
+ * // Adding a listener for the "error" event handles any errors found during polling.
+ * operation.on('error', err => {
+ * // throw(err);
+ * });
+ * })
+ * .catch(err => {
+ * console.error(err);
+ * });
+ */
+ importProductSets(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ options = options || {};
+
+ return this._innerApiCalls.importProductSets(request, options, callback);
+ }
+
+ // --------------------
+ // -- Path templates --
+ // --------------------
+
+ /**
+ * Return a fully-qualified location resource name string.
+ *
+ * @param {String} project
+ * @param {String} location
+ * @returns {String}
+ */
+ locationPath(project, location) {
+ return this._pathTemplates.locationPathTemplate.render({
+ project: project,
+ location: location,
+ });
+ }
+
+ /**
+ * Return a fully-qualified product_set resource name string.
+ *
+ * @param {String} project
+ * @param {String} location
+ * @param {String} productSet
+ * @returns {String}
+ */
+ productSetPath(project, location, productSet) {
+ return this._pathTemplates.productSetPathTemplate.render({
+ project: project,
+ location: location,
+ product_set: productSet,
+ });
+ }
+
+ /**
+ * Return a fully-qualified product resource name string.
+ *
+ * @param {String} project
+ * @param {String} location
+ * @param {String} product
+ * @returns {String}
+ */
+ productPath(project, location, product) {
+ return this._pathTemplates.productPathTemplate.render({
+ project: project,
+ location: location,
+ product: product,
+ });
+ }
+
+ /**
+ * Return a fully-qualified reference_image resource name string.
+ *
+ * @param {String} project
+ * @param {String} location
+ * @param {String} product
+ * @param {String} referenceImage
+ * @returns {String}
+ */
+ referenceImagePath(project, location, product, referenceImage) {
+ return this._pathTemplates.referenceImagePathTemplate.render({
+ project: project,
+ location: location,
+ product: product,
+ reference_image: referenceImage,
+ });
+ }
+
+ /**
+ * Parse the locationName from a location resource.
+ *
+ * @param {String} locationName
+ * A fully-qualified path representing a location resources.
+ * @returns {String} - A string representing the project.
+ */
+ matchProjectFromLocationName(locationName) {
+ return this._pathTemplates.locationPathTemplate.match(locationName).project;
+ }
+
+ /**
+ * Parse the locationName from a location resource.
+ *
+ * @param {String} locationName
+ * A fully-qualified path representing a location resources.
+ * @returns {String} - A string representing the location.
+ */
+ matchLocationFromLocationName(locationName) {
+ return this._pathTemplates.locationPathTemplate.match(locationName)
+ .location;
+ }
+
+ /**
+ * Parse the productSetName from a product_set resource.
+ *
+ * @param {String} productSetName
+ * A fully-qualified path representing a product_set resources.
+ * @returns {String} - A string representing the project.
+ */
+ matchProjectFromProductSetName(productSetName) {
+ return this._pathTemplates.productSetPathTemplate.match(productSetName)
+ .project;
+ }
+
+ /**
+ * Parse the productSetName from a product_set resource.
+ *
+ * @param {String} productSetName
+ * A fully-qualified path representing a product_set resources.
+ * @returns {String} - A string representing the location.
+ */
+ matchLocationFromProductSetName(productSetName) {
+ return this._pathTemplates.productSetPathTemplate.match(productSetName)
+ .location;
+ }
+
+ /**
+ * Parse the productSetName from a product_set resource.
+ *
+ * @param {String} productSetName
+ * A fully-qualified path representing a product_set resources.
+ * @returns {String} - A string representing the product_set.
+ */
+ matchProductSetFromProductSetName(productSetName) {
+ return this._pathTemplates.productSetPathTemplate.match(productSetName)
+ .product_set;
+ }
+
+ /**
+ * Parse the productName from a product resource.
+ *
+ * @param {String} productName
+ * A fully-qualified path representing a product resources.
+ * @returns {String} - A string representing the project.
+ */
+ matchProjectFromProductName(productName) {
+ return this._pathTemplates.productPathTemplate.match(productName).project;
+ }
+
+ /**
+ * Parse the productName from a product resource.
+ *
+ * @param {String} productName
+ * A fully-qualified path representing a product resources.
+ * @returns {String} - A string representing the location.
+ */
+ matchLocationFromProductName(productName) {
+ return this._pathTemplates.productPathTemplate.match(productName).location;
+ }
+
+ /**
+ * Parse the productName from a product resource.
+ *
+ * @param {String} productName
+ * A fully-qualified path representing a product resources.
+ * @returns {String} - A string representing the product.
+ */
+ matchProductFromProductName(productName) {
+ return this._pathTemplates.productPathTemplate.match(productName).product;
+ }
+
+ /**
+ * Parse the referenceImageName from a reference_image resource.
+ *
+ * @param {String} referenceImageName
+ * A fully-qualified path representing a reference_image resources.
+ * @returns {String} - A string representing the project.
+ */
+ matchProjectFromReferenceImageName(referenceImageName) {
+ return this._pathTemplates.referenceImagePathTemplate.match(
+ referenceImageName
+ ).project;
+ }
+
+ /**
+ * Parse the referenceImageName from a reference_image resource.
+ *
+ * @param {String} referenceImageName
+ * A fully-qualified path representing a reference_image resources.
+ * @returns {String} - A string representing the location.
+ */
+ matchLocationFromReferenceImageName(referenceImageName) {
+ return this._pathTemplates.referenceImagePathTemplate.match(
+ referenceImageName
+ ).location;
+ }
+
+ /**
+ * Parse the referenceImageName from a reference_image resource.
+ *
+ * @param {String} referenceImageName
+ * A fully-qualified path representing a reference_image resources.
+ * @returns {String} - A string representing the product.
+ */
+ matchProductFromReferenceImageName(referenceImageName) {
+ return this._pathTemplates.referenceImagePathTemplate.match(
+ referenceImageName
+ ).product;
+ }
+
+ /**
+ * Parse the referenceImageName from a reference_image resource.
+ *
+ * @param {String} referenceImageName
+ * A fully-qualified path representing a reference_image resources.
+ * @returns {String} - A string representing the reference_image.
+ */
+ matchReferenceImageFromReferenceImageName(referenceImageName) {
+ return this._pathTemplates.referenceImagePathTemplate.match(
+ referenceImageName
+ ).reference_image;
+ }
+}
+
+module.exports = ProductSearchClient;
diff --git a/packages/google-cloud-vision/src/v1p4beta1/product_search_client_config.json b/packages/google-cloud-vision/src/v1p4beta1/product_search_client_config.json
new file mode 100644
index 00000000000..37cb6d2a79a
--- /dev/null
+++ b/packages/google-cloud-vision/src/v1p4beta1/product_search_client_config.json
@@ -0,0 +1,116 @@
+{
+ "interfaces": {
+ "google.cloud.vision.v1p4beta1.ProductSearch": {
+ "retry_codes": {
+ "idempotent": [
+ "DEADLINE_EXCEEDED",
+ "UNAVAILABLE"
+ ],
+ "non_idempotent": []
+ },
+ "retry_params": {
+ "default": {
+ "initial_retry_delay_millis": 100,
+ "retry_delay_multiplier": 1.3,
+ "max_retry_delay_millis": 60000,
+ "initial_rpc_timeout_millis": 20000,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 20000,
+ "total_timeout_millis": 600000
+ }
+ },
+ "methods": {
+ "CreateProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "ListProductSets": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "GetProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "UpdateProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "DeleteProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "CreateProduct": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "ListProducts": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "GetProduct": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "UpdateProduct": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "DeleteProduct": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "CreateReferenceImage": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "DeleteReferenceImage": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "ListReferenceImages": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "GetReferenceImage": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "AddProductToProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "RemoveProductFromProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ },
+ "ListProductsInProductSet": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
+ "ImportProductSets": {
+ "timeout_millis": 60000,
+ "retry_codes_name": "non_idempotent",
+ "retry_params_name": "default"
+ }
+ }
+ }
+ }
+}
diff --git a/packages/google-cloud-vision/synth.py b/packages/google-cloud-vision/synth.py
index e30cd8ac326..2039a148c40 100644
--- a/packages/google-cloud-vision/synth.py
+++ b/packages/google-cloud-vision/synth.py
@@ -23,7 +23,7 @@
# Run the gapic generator
gapic = gcp.GAPICGenerator()
-versions = ['v1', 'v1p1beta1', 'v1p2beta1', 'v1p3beta1']
+versions = ['v1', 'v1p1beta1', 'v1p2beta1', 'v1p3beta1', 'v1p4beta1']
for version in versions:
library = gapic.node_library('vision', version)
s.copy(library, excludes=['src/index.js', 'README.md', 'package.json'])
diff --git a/packages/google-cloud-vision/test/gapic-v1p4beta1.js b/packages/google-cloud-vision/test/gapic-v1p4beta1.js
new file mode 100644
index 00000000000..566eb306fcb
--- /dev/null
+++ b/packages/google-cloud-vision/test/gapic-v1p4beta1.js
@@ -0,0 +1,1604 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+'use strict';
+
+const assert = require('assert');
+
+const visionModule = require('../src');
+
+const FAKE_STATUS_CODE = 1;
+const error = new Error();
+error.code = FAKE_STATUS_CODE;
+
+describe('ProductSearchClient', () => {
+ describe('createProductSet', () => {
+ it('invokes createProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const productSet = {};
+ const productSetId = 'productSetId4216680';
+ const request = {
+ parent: formattedParent,
+ productSet: productSet,
+ productSetId: productSetId,
+ };
+
+ // Mock response
+ const name = 'name3373707';
+ const displayName = 'displayName1615086568';
+ const expectedResponse = {
+ name: name,
+ displayName: displayName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.createProductSet = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.createProductSet(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes createProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const productSet = {};
+ const productSetId = 'productSetId4216680';
+ const request = {
+ parent: formattedParent,
+ productSet: productSet,
+ productSetId: productSetId,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.createProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.createProductSet(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('listProductSets', () => {
+ it('invokes listProductSets without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const request = {
+ parent: formattedParent,
+ };
+
+ // Mock response
+ const nextPageToken = '';
+ const productSetsElement = {};
+ const productSets = [productSetsElement];
+ const expectedResponse = {
+ nextPageToken: nextPageToken,
+ productSets: productSets,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listProductSets = (
+ actualRequest,
+ options,
+ callback
+ ) => {
+ assert.deepStrictEqual(actualRequest, request);
+ callback(null, expectedResponse.productSets);
+ };
+
+ client.listProductSets(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse.productSets);
+ done();
+ });
+ });
+
+ it('invokes listProductSets with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const request = {
+ parent: formattedParent,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listProductSets = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.listProductSets(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('getProductSet', () => {
+ it('invokes getProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock response
+ const name2 = 'name2-1052831874';
+ const displayName = 'displayName1615086568';
+ const expectedResponse = {
+ name: name2,
+ displayName: displayName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.getProductSet = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.getProductSet(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes getProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.getProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.getProductSet(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('updateProductSet', () => {
+ it('invokes updateProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const productSet = {};
+ const updateMask = {};
+ const request = {
+ productSet: productSet,
+ updateMask: updateMask,
+ };
+
+ // Mock response
+ const name = 'name3373707';
+ const displayName = 'displayName1615086568';
+ const expectedResponse = {
+ name: name,
+ displayName: displayName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.updateProductSet = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.updateProductSet(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes updateProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const productSet = {};
+ const updateMask = {};
+ const request = {
+ productSet: productSet,
+ updateMask: updateMask,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.updateProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.updateProductSet(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('deleteProductSet', () => {
+ it('invokes deleteProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.deleteProductSet = mockSimpleGrpcMethod(request);
+
+ client.deleteProductSet(request, err => {
+ assert.ifError(err);
+ done();
+ });
+ });
+
+ it('invokes deleteProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.deleteProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.deleteProductSet(request, err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
+ describe('createProduct', () => {
+ it('invokes createProduct without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const product = {};
+ const productId = 'productId1753008747';
+ const request = {
+ parent: formattedParent,
+ product: product,
+ productId: productId,
+ };
+
+ // Mock response
+ const name = 'name3373707';
+ const displayName = 'displayName1615086568';
+ const description = 'description-1724546052';
+ const productCategory = 'productCategory-1607451058';
+ const expectedResponse = {
+ name: name,
+ displayName: displayName,
+ description: description,
+ productCategory: productCategory,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.createProduct = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.createProduct(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes createProduct with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const product = {};
+ const productId = 'productId1753008747';
+ const request = {
+ parent: formattedParent,
+ product: product,
+ productId: productId,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.createProduct = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.createProduct(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('listProducts', () => {
+ it('invokes listProducts without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const request = {
+ parent: formattedParent,
+ };
+
+ // Mock response
+ const nextPageToken = '';
+ const productsElement = {};
+ const products = [productsElement];
+ const expectedResponse = {
+ nextPageToken: nextPageToken,
+ products: products,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listProducts = (
+ actualRequest,
+ options,
+ callback
+ ) => {
+ assert.deepStrictEqual(actualRequest, request);
+ callback(null, expectedResponse.products);
+ };
+
+ client.listProducts(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse.products);
+ done();
+ });
+ });
+
+ it('invokes listProducts with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const request = {
+ parent: formattedParent,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listProducts = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.listProducts(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('getProduct', () => {
+ it('invokes getProduct without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock response
+ const name2 = 'name2-1052831874';
+ const displayName = 'displayName1615086568';
+ const description = 'description-1724546052';
+ const productCategory = 'productCategory-1607451058';
+ const expectedResponse = {
+ name: name2,
+ displayName: displayName,
+ description: description,
+ productCategory: productCategory,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.getProduct = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.getProduct(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes getProduct with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.getProduct = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.getProduct(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('updateProduct', () => {
+ it('invokes updateProduct without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const product = {};
+ const updateMask = {};
+ const request = {
+ product: product,
+ updateMask: updateMask,
+ };
+
+ // Mock response
+ const name = 'name3373707';
+ const displayName = 'displayName1615086568';
+ const description = 'description-1724546052';
+ const productCategory = 'productCategory-1607451058';
+ const expectedResponse = {
+ name: name,
+ displayName: displayName,
+ description: description,
+ productCategory: productCategory,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.updateProduct = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.updateProduct(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes updateProduct with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const product = {};
+ const updateMask = {};
+ const request = {
+ product: product,
+ updateMask: updateMask,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.updateProduct = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.updateProduct(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('deleteProduct', () => {
+ it('invokes deleteProduct without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.deleteProduct = mockSimpleGrpcMethod(request);
+
+ client.deleteProduct(request, err => {
+ assert.ifError(err);
+ done();
+ });
+ });
+
+ it('invokes deleteProduct with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.deleteProduct = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.deleteProduct(request, err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
+ describe('createReferenceImage', () => {
+ it('invokes createReferenceImage without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const referenceImage = {};
+ const referenceImageId = 'referenceImageId1946713331';
+ const request = {
+ parent: formattedParent,
+ referenceImage: referenceImage,
+ referenceImageId: referenceImageId,
+ };
+
+ // Mock response
+ const name = 'name3373707';
+ const uri = 'uri116076';
+ const expectedResponse = {
+ name: name,
+ uri: uri,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.createReferenceImage = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.createReferenceImage(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes createReferenceImage with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const referenceImage = {};
+ const referenceImageId = 'referenceImageId1946713331';
+ const request = {
+ parent: formattedParent,
+ referenceImage: referenceImage,
+ referenceImageId: referenceImageId,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.createReferenceImage = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.createReferenceImage(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('deleteReferenceImage', () => {
+ it('invokes deleteReferenceImage without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.referenceImagePath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]',
+ '[REFERENCE_IMAGE]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.deleteReferenceImage = mockSimpleGrpcMethod(
+ request
+ );
+
+ client.deleteReferenceImage(request, err => {
+ assert.ifError(err);
+ done();
+ });
+ });
+
+ it('invokes deleteReferenceImage with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.referenceImagePath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]',
+ '[REFERENCE_IMAGE]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.deleteReferenceImage = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.deleteReferenceImage(request, err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
+ describe('listReferenceImages', () => {
+ it('invokes listReferenceImages without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const request = {
+ parent: formattedParent,
+ };
+
+ // Mock response
+ const pageSize = 883849137;
+ const nextPageToken = '';
+ const referenceImagesElement = {};
+ const referenceImages = [referenceImagesElement];
+ const expectedResponse = {
+ pageSize: pageSize,
+ nextPageToken: nextPageToken,
+ referenceImages: referenceImages,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listReferenceImages = (
+ actualRequest,
+ options,
+ callback
+ ) => {
+ assert.deepStrictEqual(actualRequest, request);
+ callback(null, expectedResponse.referenceImages);
+ };
+
+ client.listReferenceImages(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse.referenceImages);
+ done();
+ });
+ });
+
+ it('invokes listReferenceImages with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.productPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]'
+ );
+ const request = {
+ parent: formattedParent,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listReferenceImages = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.listReferenceImages(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('getReferenceImage', () => {
+ it('invokes getReferenceImage without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.referenceImagePath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]',
+ '[REFERENCE_IMAGE]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock response
+ const name2 = 'name2-1052831874';
+ const uri = 'uri116076';
+ const expectedResponse = {
+ name: name2,
+ uri: uri,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.getReferenceImage = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.getReferenceImage(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes getReferenceImage with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.referenceImagePath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT]',
+ '[REFERENCE_IMAGE]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.getReferenceImage = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.getReferenceImage(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('addProductToProductSet', () => {
+ it('invokes addProductToProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const product = 'product-309474065';
+ const request = {
+ name: formattedName,
+ product: product,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.addProductToProductSet = mockSimpleGrpcMethod(
+ request
+ );
+
+ client.addProductToProductSet(request, err => {
+ assert.ifError(err);
+ done();
+ });
+ });
+
+ it('invokes addProductToProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const product = 'product-309474065';
+ const request = {
+ name: formattedName,
+ product: product,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.addProductToProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.addProductToProductSet(request, err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
+ describe('removeProductFromProductSet', () => {
+ it('invokes removeProductFromProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const product = 'product-309474065';
+ const request = {
+ name: formattedName,
+ product: product,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.removeProductFromProductSet = mockSimpleGrpcMethod(
+ request
+ );
+
+ client.removeProductFromProductSet(request, err => {
+ assert.ifError(err);
+ done();
+ });
+ });
+
+ it('invokes removeProductFromProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const product = 'product-309474065';
+ const request = {
+ name: formattedName,
+ product: product,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.removeProductFromProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.removeProductFromProductSet(request, err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
+ describe('listProductsInProductSet', () => {
+ it('invokes listProductsInProductSet without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock response
+ const nextPageToken = '';
+ const productsElement = {};
+ const products = [productsElement];
+ const expectedResponse = {
+ nextPageToken: nextPageToken,
+ products: products,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listProductsInProductSet = (
+ actualRequest,
+ options,
+ callback
+ ) => {
+ assert.deepStrictEqual(actualRequest, request);
+ callback(null, expectedResponse.products);
+ };
+
+ client.listProductsInProductSet(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse.products);
+ done();
+ });
+ });
+
+ it('invokes listProductsInProductSet with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedName = client.productSetPath(
+ '[PROJECT]',
+ '[LOCATION]',
+ '[PRODUCT_SET]'
+ );
+ const request = {
+ name: formattedName,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.listProductsInProductSet = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.listProductsInProductSet(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('importProductSets', function() {
+ it('invokes importProductSets without error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const inputConfig = {};
+ const request = {
+ parent: formattedParent,
+ inputConfig: inputConfig,
+ };
+
+ // Mock response
+ const expectedResponse = {};
+
+ // Mock Grpc layer
+ client._innerApiCalls.importProductSets = mockLongRunningGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client
+ .importProductSets(request)
+ .then(responses => {
+ const operation = responses[0];
+ return operation.promise();
+ })
+ .then(responses => {
+ assert.deepStrictEqual(responses[0], expectedResponse);
+ done();
+ })
+ .catch(err => {
+ done(err);
+ });
+ });
+
+ it('invokes importProductSets with error', done => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const formattedParent = client.locationPath('[PROJECT]', '[LOCATION]');
+ const inputConfig = {};
+ const request = {
+ parent: formattedParent,
+ inputConfig: inputConfig,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.importProductSets = mockLongRunningGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client
+ .importProductSets(request)
+ .then(responses => {
+ const operation = responses[0];
+ return operation.promise();
+ })
+ .then(() => {
+ assert.fail();
+ })
+ .catch(err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+
+ it('has longrunning decoder functions', () => {
+ const client = new visionModule.v1p4beta1.ProductSearchClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+ assert(
+ client._descriptors.longrunning.importProductSets
+ .responseDecoder instanceof Function
+ );
+ assert(
+ client._descriptors.longrunning.importProductSets
+ .metadataDecoder instanceof Function
+ );
+ });
+ });
+});
+describe('ImageAnnotatorClient', () => {
+ describe('batchAnnotateImages', () => {
+ it('invokes batchAnnotateImages without error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const request = {
+ requests: requests,
+ };
+
+ // Mock response
+ const expectedResponse = {};
+
+ // Mock Grpc layer
+ client._innerApiCalls.batchAnnotateImages = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.batchAnnotateImages(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes batchAnnotateImages with error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const request = {
+ requests: requests,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.batchAnnotateImages = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.batchAnnotateImages(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('batchAnnotateFiles', () => {
+ it('invokes batchAnnotateFiles without error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const request = {
+ requests: requests,
+ };
+
+ // Mock response
+ const expectedResponse = {};
+
+ // Mock Grpc layer
+ client._innerApiCalls.batchAnnotateFiles = mockSimpleGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client.batchAnnotateFiles(request, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes batchAnnotateFiles with error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const request = {
+ requests: requests,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.batchAnnotateFiles = mockSimpleGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client.batchAnnotateFiles(request, (err, response) => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ assert(typeof response === 'undefined');
+ done();
+ });
+ });
+ });
+
+ describe('asyncBatchAnnotateImages', function() {
+ it('invokes asyncBatchAnnotateImages without error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const outputConfig = {};
+ const request = {
+ requests: requests,
+ outputConfig: outputConfig,
+ };
+
+ // Mock response
+ const expectedResponse = {};
+
+ // Mock Grpc layer
+ client._innerApiCalls.asyncBatchAnnotateImages = mockLongRunningGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client
+ .asyncBatchAnnotateImages(request)
+ .then(responses => {
+ const operation = responses[0];
+ return operation.promise();
+ })
+ .then(responses => {
+ assert.deepStrictEqual(responses[0], expectedResponse);
+ done();
+ })
+ .catch(err => {
+ done(err);
+ });
+ });
+
+ it('invokes asyncBatchAnnotateImages with error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const outputConfig = {};
+ const request = {
+ requests: requests,
+ outputConfig: outputConfig,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.asyncBatchAnnotateImages = mockLongRunningGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client
+ .asyncBatchAnnotateImages(request)
+ .then(responses => {
+ const operation = responses[0];
+ return operation.promise();
+ })
+ .then(() => {
+ assert.fail();
+ })
+ .catch(err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+
+ it('has longrunning decoder functions', () => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+ assert(
+ client._descriptors.longrunning.asyncBatchAnnotateImages
+ .responseDecoder instanceof Function
+ );
+ assert(
+ client._descriptors.longrunning.asyncBatchAnnotateImages
+ .metadataDecoder instanceof Function
+ );
+ });
+ });
+
+ describe('asyncBatchAnnotateFiles', function() {
+ it('invokes asyncBatchAnnotateFiles without error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const request = {
+ requests: requests,
+ };
+
+ // Mock response
+ const expectedResponse = {};
+
+ // Mock Grpc layer
+ client._innerApiCalls.asyncBatchAnnotateFiles = mockLongRunningGrpcMethod(
+ request,
+ expectedResponse
+ );
+
+ client
+ .asyncBatchAnnotateFiles(request)
+ .then(responses => {
+ const operation = responses[0];
+ return operation.promise();
+ })
+ .then(responses => {
+ assert.deepStrictEqual(responses[0], expectedResponse);
+ done();
+ })
+ .catch(err => {
+ done(err);
+ });
+ });
+
+ it('invokes asyncBatchAnnotateFiles with error', done => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
+ // Mock request
+ const requests = [];
+ const request = {
+ requests: requests,
+ };
+
+ // Mock Grpc layer
+ client._innerApiCalls.asyncBatchAnnotateFiles = mockLongRunningGrpcMethod(
+ request,
+ null,
+ error
+ );
+
+ client
+ .asyncBatchAnnotateFiles(request)
+ .then(responses => {
+ const operation = responses[0];
+ return operation.promise();
+ })
+ .then(() => {
+ assert.fail();
+ })
+ .catch(err => {
+ assert(err instanceof Error);
+ assert.strictEqual(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+
+ it('has longrunning decoder functions', () => {
+ const client = new visionModule.v1p4beta1.ImageAnnotatorClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+ assert(
+ client._descriptors.longrunning.asyncBatchAnnotateFiles
+ .responseDecoder instanceof Function
+ );
+ assert(
+ client._descriptors.longrunning.asyncBatchAnnotateFiles
+ .metadataDecoder instanceof Function
+ );
+ });
+ });
+});
+
+function mockSimpleGrpcMethod(expectedRequest, response, error) {
+ return function(actualRequest, options, callback) {
+ assert.deepStrictEqual(actualRequest, expectedRequest);
+ if (error) {
+ callback(error);
+ } else if (response) {
+ callback(null, response);
+ } else {
+ callback(null);
+ }
+ };
+}
+
+function mockLongRunningGrpcMethod(expectedRequest, response, error) {
+ return request => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise: function() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}