diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_payload.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_payload.proto
index 980c0e368c5..a81feaf104c 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_payload.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_payload.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_spec.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_spec.proto
index b88a495b45f..9eb61579e0b 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_spec.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/annotation_spec.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,12 +11,12 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
+import "google/api/resource.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
@@ -28,16 +28,19 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// A definition of an annotation spec.
message AnnotationSpec {
+ option (google.api.resource) = {
+ type: "automl.googleapis.com/AnnotationSpec"
+ pattern: "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}"
+ };
+
// Output only. Resource name of the annotation spec.
// Form:
//
// 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}'
string name = 1;
- // Required.
- // The name of the annotation spec to show in the interface. The name can be
+ // Required. The name of the annotation spec to show in the interface. The name can be
// up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`.
- // (_), and ASCII digits 0-9.
string display_name = 2;
// Output only. The number of examples in the parent dataset
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/classification.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/classification.proto
index b42370b3fe1..9213bfde891 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/classification.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/classification.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -50,6 +49,8 @@ message ClassificationAnnotation {
}
// Model evaluation metrics for classification problems.
+// Note: For Video Classification this metrics only describe quality of the
+// Video Classification predictions of "segment_classification" type.
message ClassificationEvaluationMetrics {
// Metrics for a single confidence threshold.
message ConfidenceMetricsEntry {
@@ -90,10 +91,7 @@ message ClassificationEvaluationMetrics {
// for each example.
float false_positive_rate_at1 = 9;
- // Output only. The harmonic mean of
- // [recall_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1]
- // and
- // [precision_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
+ // Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
float f1_score_at1 = 7;
// Output only. The number of model created labels that match a ground truth
@@ -120,17 +118,24 @@ message ClassificationEvaluationMetrics {
// Output only. Value of the specific cell in the confusion matrix.
// The number of values each row has (i.e. the length of the row) is equal
// to the length of the `annotation_spec_id` field or, if that one is not
- // populated, length of the
- // [display_name][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name]
- // field.
+ // populated, length of the [display_name][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field.
repeated int32 example_count = 1;
}
// Output only. IDs of the annotation specs used in the confusion matrix.
+ // For Tables CLASSIFICATION
+ //
+ // [prediction_type][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]
+ // only list of [annotation_spec_display_name-s][] is populated.
repeated string annotation_spec_id = 1;
// Output only. Display name of the annotation specs used in the confusion
- // matrix, as they were at the moment of the evaluation.
+ // matrix, as they were at the moment of the evaluation. For Tables
+ // CLASSIFICATION
+ //
+ // [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type],
+ // distinct values of the target column at the moment of the model
+ // evaluation are populated here.
repeated string display_name = 3;
// Output only. Rows in the confusion matrix. The number of rows is equal to
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/data_items.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/data_items.proto
index d995bba7688..63896ba922f 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/data_items.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/data_items.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,19 +11,18 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/cloud/automl/v1/geometry.proto";
import "google/cloud/automl/v1/io.proto";
import "google/cloud/automl/v1/text_segment.proto";
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -36,6 +35,7 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// Only images up to 30MB in size are supported.
message Image {
// Input only. The data representing the image.
+ // For Predict calls [image_bytes][google.cloud.automl.v1.Image.image_bytes] must be set .
oneof data {
// Image content represented as a stream of bytes.
// Note: As with all `bytes` fields, protobuffers use a pure binary
@@ -53,11 +53,9 @@ message TextSnippet {
// characters long.
string content = 1;
- // Optional. The format of
- // [content][google.cloud.automl.v1.TextSnippet.content]. Currently the only
- // two allowed values are "text/html" and "text/plain". If left blank, the
- // format is automatically determined from the type of the uploaded
- // [content][google.cloud.automl.v1.TextSnippet.content].
+ // Optional. The format of [content][google.cloud.automl.v1.TextSnippet.content]. Currently the only two allowed
+ // values are "text/html" and "text/plain". If left blank, the format is
+ // automatically determined from the type of the uploaded [content][google.cloud.automl.v1.TextSnippet.content].
string mime_type = 2;
// Output only. HTTP URI where you can download the content.
@@ -93,9 +91,7 @@ message DocumentDimensions {
// A structured text document e.g. a PDF.
message Document {
- // Describes the layout information of a
- // [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the
- // document.
+ // Describes the layout information of a [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the document.
message Layout {
// The type of TextSegment in the context of the original document.
enum TextSegmentType {
@@ -141,29 +137,25 @@ message Document {
}
// Text Segment that represents a segment in
- // [document_text][google.cloud.automl.v1.Document.document_text].
+ // [document_text][google.cloud.automl.v1p1beta.Document.document_text].
TextSegment text_segment = 1;
- // Page number of the
- // [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in
- // the original document, starts from 1.
+ // Page number of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the original document, starts
+ // from 1.
int32 page_number = 2;
- // The position of the
- // [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in
- // the page. Contains exactly 4
+ // The position of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the page.
+ // Contains exactly 4
//
- // [normalized_vertices][google.cloud.automl.v1.BoundingPoly.normalized_vertices]
+ // [normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices]
// and they are connected by edges in the order provided, which will
// represent a rectangle parallel to the frame. The
- // [NormalizedVertex-s][google.cloud.automl.v1.NormalizedVertex] are
+ // [NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex] are
// relative to the page.
// Coordinates are based on top-left as point (0,0).
BoundingPoly bounding_poly = 3;
- // The type of the
- // [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in
- // document.
+ // The type of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in document.
TextSegmentType text_segment_type = 4;
}
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/dataset.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/dataset.proto
index 830ac214dc4..2040ec96a32 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/dataset.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/dataset.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -34,6 +33,11 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// A workspace for solving a single, particular machine learning (ML) problem.
// A workspace contains examples that may be annotated.
message Dataset {
+ option (google.api.resource) = {
+ type: "automl.googleapis.com/Dataset"
+ pattern: "projects/{project}/locations/{location}/datasets/{dataset}"
+ };
+
// Required.
// The dataset metadata that is specific to the problem type.
oneof dataset_metadata {
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/detection.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/detection.proto
index 496ffa90b0b..13fe5935fd1 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/detection.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/detection.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,15 +11,14 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/cloud/automl/v1/geometry.proto";
import "google/protobuf/duration.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -33,8 +32,8 @@ message ImageObjectDetectionAnnotation {
// Output only. The rectangle representing the object location.
BoundingPoly bounding_box = 1;
- // Output only. The confidence that this annotation is positive for the parent
- // example, value in [0, 1], higher means higher positivity confidence.
+ // Output only. The confidence that this annotation is positive for the parent example,
+ // value in [0, 1], higher means higher positivity confidence.
float score = 2;
}
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/geometry.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/geometry.proto
index 699e75001ca..a6d97e805da 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/geometry.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/geometry.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/image.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/image.proto
index c4e05552665..5269d22c0d4 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/image.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/image.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,17 +11,16 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/api/resource.proto";
import "google/cloud/automl/v1/annotation_spec.proto";
import "google/cloud/automl/v1/classification.proto";
import "google/protobuf/timestamp.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -38,7 +37,9 @@ message ImageClassificationDatasetMetadata {
}
// Dataset metadata specific to image object detection.
-message ImageObjectDetectionDatasetMetadata {}
+message ImageObjectDetectionDatasetMetadata {
+
+}
// Model metadata for image classification.
message ImageClassificationModelMetadata {
@@ -79,38 +80,34 @@ message ImageClassificationModelMetadata {
// This is the default value.
// * `mobile-low-latency-1` - A model that, in addition to providing
// prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel])
- // and used on a mobile or edge device with TensorFlow
- // afterwards. Expected to have low latency, but may have lower
- // prediction quality than other models.
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
+ // with TensorFlow afterwards. Expected to have low latency, but
+ // may have lower prediction quality than other models.
// * `mobile-versatile-1` - A model that, in addition to providing
// prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel])
- // and used on a mobile or edge device with TensorFlow
- // afterwards.
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
+ // with TensorFlow afterwards.
// * `mobile-high-accuracy-1` - A model that, in addition to providing
// prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel])
- // and used on a mobile or edge device with TensorFlow
- // afterwards. Expected to have a higher latency, but should
- // also have a higher prediction quality than other models.
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
+ // with TensorFlow afterwards. Expected to have a higher
+ // latency, but should also have a higher prediction quality
+ // than other models.
// * `mobile-core-ml-low-latency-1` - A model that, in addition to providing
// prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel])
- // and used on a mobile device with Core ML afterwards. Expected
- // to have low latency, but may have lower prediction quality
- // than other models.
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
+ // ML afterwards. Expected to have low latency, but may have
+ // lower prediction quality than other models.
// * `mobile-core-ml-versatile-1` - A model that, in addition to providing
// prediction via AutoML API, can also be exported (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel])
- // and used on a mobile device with Core ML afterwards.
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
+ // ML afterwards.
// * `mobile-core-ml-high-accuracy-1` - A model that, in addition to
// providing prediction via AutoML API, can also be exported
- // (see
- // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel])
- // and used on a mobile device with Core ML afterwards. Expected
- // to have a higher latency, but should also have a higher
- // prediction quality than other models.
+ // (see [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with
+ // Core ML afterwards. Expected to have a higher latency, but
+ // should also have a higher prediction quality than other
+ // models.
string model_type = 7;
// Output only. An approximate number of online prediction QPS that can
@@ -133,6 +130,21 @@ message ImageObjectDetectionModelMetadata {
// * `cloud-low-latency-1` - A model to be used via prediction
// calls to AutoML API. Expected to have low latency, but may
// have lower prediction quality than other models.
+ // * `mobile-low-latency-1` - A model that, in addition to providing
+ // prediction via AutoML API, can also be exported (see
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
+ // with TensorFlow afterwards. Expected to have low latency, but
+ // may have lower prediction quality than other models.
+ // * `mobile-versatile-1` - A model that, in addition to providing
+ // prediction via AutoML API, can also be exported (see
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
+ // with TensorFlow afterwards.
+ // * `mobile-high-accuracy-1` - A model that, in addition to providing
+ // prediction via AutoML API, can also be exported (see
+ // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
+ // with TensorFlow afterwards. Expected to have a higher
+ // latency, but should also have a higher prediction quality
+ // than other models.
string model_type = 1;
// Output only. The number of nodes this model is deployed on. A node is an
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/io.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/io.proto
index 84548fc9159..c6ac8a35bc7 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/io.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/io.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,14 +11,13 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/api/field_behavior.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -27,8 +26,7 @@ option java_package = "com.google.cloud.automl.v1";
option php_namespace = "Google\\Cloud\\AutoMl\\V1";
option ruby_package = "Google::Cloud::AutoML::V1";
-// Input configuration for
-// [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
+// Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
//
// The format of input depends on dataset_metadata the Dataset into which
// the import is happening has. As input source the
@@ -126,6 +124,107 @@ option ruby_package = "Google::Cloud::AutoML::V1";
//
//
//
+//
AutoML Video Intelligence
+//
+//
+// Classification
+//
+// See [Preparing your training
+// data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
+// more information.
+//
+// CSV file(s) with each line in format:
+//
+// ML_USE,GCS_FILE_PATH
+//
+// For `ML_USE`, do not use `VALIDATE`.
+//
+// `GCS_FILE_PATH` is the path to another .csv file that describes training
+// example for a given `ML_USE`, using the following row format:
+//
+// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
+//
+// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
+// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+//
+// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
+// length of the video, and the end time must be after the start time. Any
+// segment of a video which has one or more labels on it, is considered a
+// hard negative for all other labels. Any segment with no labels on
+// it is considered to be unknown. If a whole video is unknown, then
+// it should be mentioned just once with ",," in place of `LABEL,
+// TIME_SEGMENT_START,TIME_SEGMENT_END`.
+//
+// Sample top level CSV file:
+//
+// TRAIN,gs://folder/train_videos.csv
+// TEST,gs://folder/test_videos.csv
+// UNASSIGNED,gs://folder/other_videos.csv
+//
+// Sample rows of a CSV file for a particular ML_USE:
+//
+// gs://folder/video1.avi,car,120,180.000021
+// gs://folder/video1.avi,bike,150,180.000021
+// gs://folder/vid2.avi,car,0,60.5
+// gs://folder/vid3.avi,,,
+//
+//
+//
+// Object Tracking
+//
+// See [Preparing your training
+// data](/video-intelligence/automl/object-tracking/docs/prepare) for more
+// information.
+//
+// CSV file(s) with each line in format:
+//
+// ML_USE,GCS_FILE_PATH
+//
+// For `ML_USE`, do not use `VALIDATE`.
+//
+// `GCS_FILE_PATH` is the path to another .csv file that describes training
+// example for a given `ML_USE`, using the following row format:
+//
+// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
+//
+// or
+//
+// GCS_FILE_PATH,,,,,,,,,,
+//
+// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
+// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+// Providing `INSTANCE_ID`s can help to obtain a better model. When
+// a specific labeled entity leaves the video frame, and shows up
+// afterwards it is not required, albeit preferable, that the same
+// `INSTANCE_ID` is given to it.
+//
+// `TIMESTAMP` must be within the length of the video, the
+// `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
+// to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
+// to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
+// frame are allowed. If a whole video is unknown, then it should be
+// mentioned just once with ",,,,,,,,,," in place of `LABEL,
+// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
+//
+// Sample top level CSV file:
+//
+// TRAIN,gs://folder/train_videos.csv
+// TEST,gs://folder/test_videos.csv
+// UNASSIGNED,gs://folder/other_videos.csv
+//
+// Seven sample rows of a CSV file for a particular ML_USE:
+//
+// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
+// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
+// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
+// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
+// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
+// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
+// gs://folder/video2.avi,,,,,,,,,,,
+//
+//
+//
+//
// AutoML Natural Language
//
//
@@ -223,9 +322,11 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// **JSONL files that reference documents**
//
// .JSONL files contain, per line, a JSON document that wraps a
-// `input_config` that contains the path to a source PDF document.
+// `input_config` that contains the path to a source document.
// Multiple JSON documents can be separated using line breaks (\n).
//
+// Supported document extensions: .PDF, .TIF, .TIFF
+//
// For example:
//
// {
@@ -239,19 +340,19 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// {
// "document": {
// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
// }
// }
// }
// }
//
-// **In-line JSONL files with PDF layout information**
+// **In-line JSONL files with document layout information**
//
-// **Note:** You can only annotate PDF files using the UI. The format described
-// below applies to annotated PDF files exported using the UI or `exportData`.
+// **Note:** You can only annotate documents using the UI. The format described
+// below applies to annotated documents exported using the UI or `exportData`.
//
-// In-line .JSONL files for PDF documents contain, per line, a JSON document
-// that wraps a `document` field that provides the textual content of the PDF
+// In-line .JSONL files for documents contain, per line, a JSON document
+// that wraps a `document` field that provides the textual content of the
// document and the layout information.
//
// For example:
@@ -342,8 +443,9 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// 10MB or less in size.
//
// For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
+//
// The `ML_USE` and `LABEL` columns are optional.
-// Supported file extensions: .TXT, .PDF, .ZIP
+// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
//
// A maximum of 100 unique labels are allowed per CSV row.
//
@@ -388,7 +490,7 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// 128kB or less in size.
//
// The `ML_USE` and `SENTIMENT` columns are optional.
-// Supported file extensions: .TXT, .PDF, .ZIP
+// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
//
// * `SENTIMENT` - An integer between 0 and
// Dataset.text_sentiment_dataset_metadata.sentiment_max
@@ -417,6 +519,54 @@ option ruby_package = "Google::Cloud::AutoML::V1";
//
//
//
+//
+// AutoML Tables
+//
+// See [Preparing your training
+// data](https://cloud.google.com/automl-tables/docs/prepare) for more
+// information.
+//
+// You can use either
+// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
+// [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
+// All input is concatenated into a
+// single
+//
+// [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
+//
+// **For gcs_source:**
+//
+// CSV file(s), where the first row of the first file is the header,
+// containing unique column names. If the first row of a subsequent
+// file is the same as the header, then it is also treated as a
+// header. All other rows contain values for the corresponding
+// columns.
+//
+// Each .CSV file by itself must be 10GB or smaller, and their total
+// size must be 100GB or smaller.
+//
+// First three sample rows of a CSV file:
+//
+// "Id","First Name","Last Name","Dob","Addresses"
+//
+// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+//
+// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+//
+// **For bigquery_source:**
+//
+// An URI of a BigQuery table. The user data size of the BigQuery
+// table must be 100GB or smaller.
+//
+// An imported table must have between 2 and 1,000 columns, inclusive,
+// and between 1000 and 100,000,000 rows, inclusive. There are at most 5
+// import data running in parallel.
+//
+//
+//
+//
+//
// **Input field definitions:**
//
// `ML_USE`
@@ -435,6 +585,11 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// For each label an AnnotationSpec is created which display_name
// becomes the label; AnnotationSpecs are given back in predictions.
//
+// `INSTANCE_ID`
+// : A positive integer that identifies a specific instance of a
+// labeled entity on an example. Used e.g. to track two cars on
+// a video while being able to tell apart which one is which.
+//
// `BOUNDING_BOX`
// : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
// A rectangle parallel to the frame of the example (image,
@@ -452,6 +607,23 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
// Point 0,0 is in top left.
//
+// `TIME_SEGMENT_START`
+// : (`TIME_OFFSET`)
+// Expresses a beginning, inclusive, of a time segment
+// within an example that has a time dimension
+// (e.g. video).
+//
+// `TIME_SEGMENT_END`
+// : (`TIME_OFFSET`)
+// Expresses an end, exclusive, of a time segment within
+// n example that has a time dimension (e.g. video).
+//
+// `TIME_OFFSET`
+// : A number of seconds as measured from the start of an
+// example (e.g. video). Fractions are allowed, up to a
+// microsecond precision. "inf" is allowed, and it means the end
+// of the example.
+//
// `TEXT_SNIPPET`
// : The content of a text snippet, UTF-8 encoded, enclosed within
// double quotes ("").
@@ -473,15 +645,22 @@ message InputConfig {
// The source of the input.
oneof source {
// The Google Cloud Storage location for the input content.
- // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData],
- // `gcs_source` points to a CSV file with a structure described in
- // [InputConfig][google.cloud.automl.v1.InputConfig].
+ // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
+ // a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
GcsSource gcs_source = 1;
}
// Additional domain-specific parameters describing the semantic of the
// imported data, any string must be up to 25000
// characters long.
+ //
+ // AutoML Tables
+ //
+ // `schema_inference_version`
+ // : (integer) This value must be supplied.
+ // The version of the
+ // algorithm to use for the initial inference of the
+ // column data types of the imported table. Allowed values: "1".
map params = 2;
}
@@ -496,6 +675,82 @@ message InputConfig {
// non-terminal symbols defined near the end of this comment. The formats
// are:
//
+// AutoML Vision
+// Classification
+//
+// One or more CSV files where each line is a single column:
+//
+// GCS_FILE_PATH
+//
+// The Google Cloud Storage location of an image of up to
+// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
+// This path is treated as the ID in the batch predict output.
+//
+// Sample rows:
+//
+// gs://folder/image1.jpeg
+// gs://folder/image2.gif
+// gs://folder/image3.png
+//
+// Object Detection
+//
+// One or more CSV files where each line is a single column:
+//
+// GCS_FILE_PATH
+//
+// The Google Cloud Storage location of an image of up to
+// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
+// This path is treated as the ID in the batch predict output.
+//
+// Sample rows:
+//
+// gs://folder/image1.jpeg
+// gs://folder/image2.gif
+// gs://folder/image3.png
+//
+//
+//
+// AutoML Video Intelligence
+// Classification
+//
+// One or more CSV files where each line is a single column:
+//
+// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
+//
+// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
+// size and up to 3h in duration duration.
+// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+//
+// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
+// length of the video, and the end time must be after the start time.
+//
+// Sample rows:
+//
+// gs://folder/video1.mp4,10,40
+// gs://folder/video1.mp4,20,60
+// gs://folder/vid2.mov,0,inf
+//
+// Object Tracking
+//
+// One or more CSV files where each line is a single column:
+//
+// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
+//
+// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
+// size and up to 3h in duration duration.
+// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+//
+// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
+// length of the video, and the end time must be after the start time.
+//
+// Sample rows:
+//
+// gs://folder/video1.mp4,10,40
+// gs://folder/video1.mp4,20,60
+// gs://folder/vid2.mov,0,inf
+//
+//
+//
// AutoML Natural Language
// Classification
//
@@ -504,13 +759,15 @@ message InputConfig {
// GCS_FILE_PATH
//
// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
-// Supported file extensions: .TXT, .PDF
+// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
+//
// Text files can be no larger than 10MB in size.
//
// Sample rows:
//
// gs://folder/text1.txt
// gs://folder/text2.pdf
+// gs://folder/text3.tif
//
// Sentiment Analysis
// One or more CSV files where each line is a single column:
@@ -518,13 +775,15 @@ message InputConfig {
// GCS_FILE_PATH
//
// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
-// Supported file extensions: .TXT, .PDF
+// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
+//
// Text files can be no larger than 128kB in size.
//
// Sample rows:
//
// gs://folder/text1.txt
// gs://folder/text2.pdf
+// gs://folder/text3.tif
//
// Entity Extraction
//
@@ -540,9 +799,10 @@ message InputConfig {
// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
// unique.
//
-// Each document JSONL file contains, per line, a proto that wraps a
-// Document proto with `input_config` set. Only PDF documents are
-// currently supported, and each PDF document cannot exceed 2MB in size.
+// Each document JSONL file contains, per line, a proto that wraps a Document
+// proto with `input_config` set. Each document cannot exceed 2MB in size.
+//
+// Supported document extensions: .PDF, .TIF, .TIFF
//
// Each JSONL file must not exceed 100MB in size, and no more than 20
// JSONL files may be passed.
@@ -590,7 +850,7 @@ message InputConfig {
// {
// "document": {
// "input_config": {
-// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
// }
// }
// }
@@ -598,12 +858,83 @@ message InputConfig {
//
//
//
+// AutoML Tables
+//
+// See [Preparing your training
+// data](https://cloud.google.com/automl-tables/docs/predict-batch) for more
+// information.
+//
+// You can use either
+// [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source]
+// or
+// [bigquery_source][BatchPredictInputConfig.bigquery_source].
+//
+// **For gcs_source:**
+//
+// CSV file(s), each by itself 10GB or smaller and total size must be
+// 100GB or smaller, where first file must have a header containing
+// column names. If the first row of a subsequent file is the same as
+// the header, then it is also treated as a header. All other rows
+// contain values for the corresponding columns.
+//
+// The column names must contain the model's
+//
+// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
+// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
+// (order doesn't matter). The columns corresponding to the model's
+// input feature column specs must contain values compatible with the
+// column spec's data types. Prediction on all the rows, i.e. the CSV
+// lines, will be attempted.
+//
+//
+// Sample rows from a CSV file:
+//
+// "First Name","Last Name","Dob","Addresses"
+//
+// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+//
+// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+//
+// **For bigquery_source:**
+//
+// The URI of a BigQuery table. The user data size of the BigQuery
+// table must be 100GB or smaller.
+//
+// The column names must contain the model's
+//
+// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
+// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
+// (order doesn't matter). The columns corresponding to the model's
+// input feature column specs must contain values compatible with the
+// column spec's data types. Prediction on all the rows of the table
+// will be attempted.
+//
+//
+//
// **Input field definitions:**
//
// `GCS_FILE_PATH`
// : The path to a file on Google Cloud Storage. For example,
// "gs://folder/video.avi".
//
+// `TIME_SEGMENT_START`
+// : (`TIME_OFFSET`)
+// Expresses a beginning, inclusive, of a time segment
+// within an example that has a time dimension
+// (e.g. video).
+//
+// `TIME_SEGMENT_END`
+// : (`TIME_OFFSET`)
+// Expresses an end, exclusive, of a time segment within
+// n example that has a time dimension (e.g. video).
+//
+// `TIME_OFFSET`
+// : A number of seconds as measured from the start of an
+// example (e.g. video). Fractions are allowed, up to a
+// microsecond precision. "inf" is allowed, and it means the end
+// of the example.
+//
// **Errors:**
//
// If any of the provided CSV files can't be parsed or if more than certain
@@ -630,82 +961,43 @@ message DocumentInputConfig {
GcsSource gcs_source = 1;
}
-// Output configuration for ExportData.
-//
-// As destination the
-// [gcs_destination][google.cloud.automl.v1.OutputConfig.gcs_destination]
-// must be set unless specified otherwise for a domain. If gcs_destination is
-// set then in the given directory a new directory is created. Its name
-// will be "export_data--",
-// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
-// Only ground truth annotations are exported (not approved annotations are
-// not exported).
-//
-// The outputs correspond to how the data was imported, and may be used as
-// input to import data. The output formats are represented as EBNF with literal
-// commas and same non-terminal symbols definitions are these in import data's
-// [InputConfig][google.cloud.automl.v1.InputConfig]:
-//
-// * For Image Classification:
-// CSV file(s) `image_classification_1.csv`,
-// `image_classification_2.csv`,...,`image_classification_N.csv`with
-// each line in format:
-// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
-// where GCS_FILE_PATHs point at the original, source locations of the
-// imported images.
-// For MULTICLASS classification type, there can be at most one LABEL
-// per example.
-//
-// * For Image Object Detection:
-// CSV file(s) `image_object_detection_1.csv`,
-// `image_object_detection_2.csv`,...,`image_object_detection_N.csv`
-// with each line in format:
-// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
-// where GCS_FILE_PATHs point at the original, source locations of the
-// imported images.
-//
-// * For Text Classification:
-// In the created directory CSV file(s) `text_classification_1.csv`,
-// `text_classification_2.csv`, ...,`text_classification_N.csv` will be
-// created where N depends on the total number of examples exported.
-// Each line in the CSV is of the format:
-// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
-// where GCS_FILE_PATHs point at the exported .txt files containing
-// the text content of the imported example. For MULTICLASS
-// classification type, there will be at most one LABEL per example.
-//
-// * For Text Sentiment:
-// In the created directory CSV file(s) `text_sentiment_1.csv`,
-// `text_sentiment_2.csv`, ...,`text_sentiment_N.csv` will be
-// created where N depends on the total number of examples exported.
-// Each line in the CSV is of the format:
-// ML_USE,GCS_FILE_PATH,SENTIMENT
-// where GCS_FILE_PATHs point at the exported .txt files containing
-// the text content of the imported example.
-//
-// * For Text Extraction:
-// CSV file `text_extraction.csv`, with each line in format:
-// ML_USE,GCS_FILE_PATH
-// GCS_FILE_PATH leads to a .JSONL (i.e. JSON Lines) file which
-// contains, per line, a proto that wraps a TextSnippet proto (in json
-// representation) followed by AnnotationPayload protos (called
-// annotations). If initially documents had been imported, the JSONL
-// will point at the original, source locations of the imported
-// documents.
-//
-// * For Translation:
+// * For Translation:
// CSV file `translation.csv`, with each line in format:
// ML_USE,GCS_FILE_PATH
// GCS_FILE_PATH leads to a .TSV file which describes examples that have
// given ML_USE, using the following row format per line:
// TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
// language)
+//
+// * For Tables:
+// Output depends on whether the dataset was imported from Google Cloud
+// Storage or BigQuery.
+// Google Cloud Storage case:
+//
+// [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
+// must be set. Exported are CSV file(s) `tables_1.csv`,
+// `tables_2.csv`,...,`tables_N.csv` with each having as header line
+// the table's column names, and all other lines contain values for
+// the header columns.
+// BigQuery case:
+//
+// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
+// pointing to a BigQuery project must be set. In the given project a
+// new dataset will be created with name
+//
+// `export_data__`
+// where will be made
+// BigQuery-dataset-name compatible (e.g. most special characters will
+// become underscores), and timestamp will be in
+// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
+// dataset a new table called `primary_table` will be created, and
+// filled with precisely the same data as this obtained on import.
message OutputConfig {
// The destination of the output.
oneof destination {
- // Required. The Google Cloud Storage location where the output is to be
- // written to. For Image Object Detection, Text Extraction in the given
- // directory a new directory will be created with name:
+ // Required. The Google Cloud Storage location where the output is to be written to.
+ // For Image Object Detection, Text Extraction, Video Classification and
+ // Tables, in the given directory a new directory will be created with name:
// export_data-- where
// timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
// output will be written into that directory.
@@ -725,6 +1017,101 @@ message OutputConfig {
// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
// of it depends on the ML problem the predictions are made for.
//
+// * For Image Classification:
+// In the created directory files `image_classification_1.jsonl`,
+// `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
+// will be created, where N may be 1, and depends on the
+// total number of the successfully predicted images and annotations.
+// A single image will be listed only once with all its annotations,
+// and its annotations will never be split across files.
+// Each .JSONL file will contain, per line, a JSON representation of a
+// proto that wraps image's "ID" : "" followed by a list of
+// zero or more AnnotationPayload protos (called annotations), which
+// have classification detail populated.
+// If prediction for any image failed (partially or completely), then an
+// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
+// files will be created (N depends on total number of failed
+// predictions). These files will have a JSON representation of a proto
+// that wraps the same "ID" : "" but here followed by
+// exactly one
+//
+// [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// containing only `code` and `message`fields.
+//
+// * For Image Object Detection:
+// In the created directory files `image_object_detection_1.jsonl`,
+// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
+// will be created, where N may be 1, and depends on the
+// total number of the successfully predicted images and annotations.
+// Each .JSONL file will contain, per line, a JSON representation of a
+// proto that wraps image's "ID" : "" followed by a list of
+// zero or more AnnotationPayload protos (called annotations), which
+// have image_object_detection detail populated. A single image will
+// be listed only once with all its annotations, and its annotations
+// will never be split across files.
+// If prediction for any image failed (partially or completely), then
+// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
+// files will be created (N depends on total number of failed
+// predictions). These files will have a JSON representation of a proto
+// that wraps the same "ID" : "" but here followed by
+// exactly one
+//
+// [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// containing only `code` and `message`fields.
+// * For Video Classification:
+// In the created directory a video_classification.csv file, and a .JSON
+// file per each video classification requested in the input (i.e. each
+// line in given CSV(s)), will be created.
+//
+// The format of video_classification.csv is:
+//
+// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
+// where:
+// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
+// the prediction input lines (i.e. video_classification.csv has
+// precisely the same number of lines as the prediction input had.)
+// JSON_FILE_NAME = Name of .JSON file in the output directory, which
+// contains prediction responses for the video time segment.
+// STATUS = "OK" if prediction completed successfully, or an error code
+// with message otherwise. If STATUS is not "OK" then the .JSON file
+// for that line may not exist or be empty.
+//
+// Each .JSON file, assuming STATUS is "OK", will contain a list of
+// AnnotationPayload protos in JSON format, which are the predictions
+// for the video time segment the file is assigned to in the
+// video_classification.csv. All AnnotationPayload protos will have
+// video_classification field set, and will be sorted by
+// video_classification.type field (note that the returned types are
+// governed by `classifaction_types` parameter in
+// [PredictService.BatchPredictRequest.params][]).
+//
+// * For Video Object Tracking:
+// In the created directory a video_object_tracking.csv file will be
+// created, and multiple files video_object_trackinng_1.json,
+// video_object_trackinng_2.json,..., video_object_trackinng_N.json,
+// where N is the number of requests in the input (i.e. the number of
+// lines in given CSV(s)).
+//
+// The format of video_object_tracking.csv is:
+//
+// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
+// where:
+// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
+// the prediction input lines (i.e. video_object_tracking.csv has
+// precisely the same number of lines as the prediction input had.)
+// JSON_FILE_NAME = Name of .JSON file in the output directory, which
+// contains prediction responses for the video time segment.
+// STATUS = "OK" if prediction completed successfully, or an error
+// code with message otherwise. If STATUS is not "OK" then the .JSON
+// file for that line may not exist or be empty.
+//
+// Each .JSON file, assuming STATUS is "OK", will contain a list of
+// AnnotationPayload protos in JSON format, which are the predictions
+// for each frame of the video time segment the file is assigned to in
+// video_object_tracking.csv. All AnnotationPayload protos will have
+// video_object_tracking field set.
// * For Text Classification:
// In the created directory files `text_classification_1.jsonl`,
// `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
@@ -732,18 +1119,18 @@ message OutputConfig {
// total number of inputs and annotations found.
//
// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps input text (or pdf) file in
+// proto that wraps input text file (or document) in
// the text snippet (or document) proto and a list of
// zero or more AnnotationPayload protos (called annotations), which
-// have classification detail populated. A single text (or pdf) file
-// will be listed only once with all its annotations, and its
+// have classification detail populated. A single text file (or
+// document) will be listed only once with all its annotations, and its
// annotations will never be split across files.
//
-// If prediction for any text (or pdf) file failed (partially or
+// If prediction for any input file (or document) failed (partially or
// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
// `errors_N.jsonl` files will be created (N depends on total number of
// failed predictions). These files will have a JSON representation of a
-// proto that wraps input text (or pdf) file followed by exactly one
+// proto that wraps input file followed by exactly one
//
// [`google.rpc.Status`](https:
// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
@@ -756,18 +1143,18 @@ message OutputConfig {
// total number of inputs and annotations found.
//
// Each .JSONL file will contain, per line, a JSON representation of a
-// proto that wraps input text (or pdf) file in
+// proto that wraps input text file (or document) in
// the text snippet (or document) proto and a list of
// zero or more AnnotationPayload protos (called annotations), which
-// have text_sentiment detail populated. A single text (or pdf) file
-// will be listed only once with all its annotations, and its
+// have text_sentiment detail populated. A single text file (or
+// document) will be listed only once with all its annotations, and its
// annotations will never be split across files.
//
-// If prediction for any text (or pdf) file failed (partially or
+// If prediction for any input file (or document) failed (partially or
// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
// `errors_N.jsonl` files will be created (N depends on total number of
// failed predictions). These files will have a JSON representation of a
-// proto that wraps input text (or pdf) file followed by exactly one
+// proto that wraps input file followed by exactly one
//
// [`google.rpc.Status`](https:
// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
@@ -803,14 +1190,108 @@ message OutputConfig {
// failed predictions). These files will have a JSON representation of a
// proto that wraps either the "id" : "" (in case of inline)
// or the document proto (in case of document) but here followed by
-// exactly one [`google.rpc.Status`](https:
+// exactly one
+//
+// [`google.rpc.Status`](https:
// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
// containing only `code` and `message`.
+//
+// * For Tables:
+// Output depends on whether
+//
+// [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
+// or
+//
+// [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
+// is set (either is allowed).
+// Google Cloud Storage case:
+// In the created directory files `tables_1.csv`, `tables_2.csv`,...,
+// `tables_N.csv` will be created, where N may be 1, and depends on
+// the total number of the successfully predicted rows.
+// For all CLASSIFICATION
+//
+// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
+// Each .csv file will contain a header, listing all columns'
+//
+// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
+// given on input followed by M target column names in the format of
+//
+// "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+//
+// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>__score" where M is the number of distinct target values,
+// i.e. number of distinct values in the target column of the table
+// used to train the model. Subsequent lines will contain the
+// respective values of successfully predicted rows, with the last,
+// i.e. the target, columns having the corresponding prediction
+// [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score].
+// For REGRESSION and FORECASTING
+//
+// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
+// Each .csv file will contain a header, listing all columns'
+// [display_name-s][google.cloud.automl.v1p1beta.display_name]
+// given on input followed by the predicted target column with name
+// in the format of
+//
+// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+//
+// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
+// Subsequent lines will contain the respective values of
+// successfully predicted rows, with the last, i.e. the target,
+// column having the predicted target value.
+// If prediction for any rows failed, then an additional
+// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
+// created (N depends on total number of failed rows). These files
+// will have analogous format as `tables_*.csv`, but always with a
+// single target column having
+//
+// [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// represented as a JSON string, and containing only `code` and
+// `message`.
+// BigQuery case:
+//
+// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
+// pointing to a BigQuery project must be set. In the given project a
+// new dataset will be created with name
+// `prediction__`
+// where will be made
+// BigQuery-dataset-name compatible (e.g. most special characters will
+// become underscores), and timestamp will be in
+// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
+// two tables will be created, `predictions`, and `errors`.
+// The `predictions` table's column names will be the input columns'
+//
+// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
+// followed by the target column with name in the format of
+//
+// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+//
+// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
+// The input feature columns will contain the respective values of
+// successfully predicted rows, with the target column having an
+// ARRAY of
+//
+// [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
+// represented as STRUCT-s, containing
+// [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
+// The `errors` table contains rows for which the prediction has
+// failed, it has analogous input columns while the target column name
+// is in the format of
+//
+// "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+//
+// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
+// and as a value has
+//
+// [`google.rpc.Status`](https:
+// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+// represented as a STRUCT, and containing only `code` and `message`.
message BatchPredictOutputConfig {
// The destination of the output.
oneof destination {
- // Required. The Google Cloud Storage location of the directory where the
- // output is to be written to.
+ // Required. The Google Cloud Storage location of the directory where the output is to
+ // be written to.
GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED];
}
}
@@ -819,9 +1300,8 @@ message BatchPredictOutputConfig {
message ModelExportOutputConfig {
// The destination of the output.
oneof destination {
- // Required. The Google Cloud Storage location where the model is to be
- // written to. This location may only be set for the following model
- // formats:
+ // Required. The Google Cloud Storage location where the model is to be written to.
+ // This location may only be set for the following model formats:
// "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
//
// Under the directory given as the destination a new one with name
@@ -839,7 +1319,8 @@ message ModelExportOutputConfig {
//
// * For Image Classification mobile-low-latency-1, mobile-versatile-1,
// mobile-high-accuracy-1:
- // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js".
+ // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
+ // "docker".
//
// * For Image Classification mobile-core-ml-low-latency-1,
// mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
@@ -855,13 +1336,24 @@ message ModelExportOutputConfig {
// devices.
// * tf_saved_model - A tensorflow model in SavedModel format.
// * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
- // be used in the browser and in Node.js using JavaScript.x`
+ // be used in the browser and in Node.js using JavaScript.
+ // * docker - Used for Docker containers. Use the params field to customize
+ // the container. The container is verified to work correctly on
+ // ubuntu 16.04 operating system. See more at
+ // [containers
+ //
+ // quickstart](https:
+ // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
// * core_ml - Used for iOS mobile devices.
string model_format = 4;
// Additional model-type and format specific parameters describing the
// requirements for the to be exported model files, any string must be up to
// 25000 characters long.
+ //
+ // * For `docker` format:
+ // `cpu_architecture` - (string) "x86_64" (default).
+ // `gpu_architecture` - (string) "none" (default), "nvidia".
map params = 2;
}
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/model.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/model.proto
index ee080684d59..f53689377d2 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/model.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/model.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -35,7 +34,7 @@ option ruby_package = "Google::Cloud::AutoML::V1";
message Model {
option (google.api.resource) = {
type: "automl.googleapis.com/Model"
- pattern: "projects/{project_id}/locations/{location_id}/models/{model_id}"
+ pattern: "projects/{project}/locations/{location}/models/{model}"
};
// Deployment state of the model.
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/model_evaluation.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/model_evaluation.proto
index 8c768adc3be..601389f7af5 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/model_evaluation.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/model_evaluation.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,13 +11,11 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/api/resource.proto";
import "google/cloud/automl/v1/classification.proto";
import "google/cloud/automl/v1/detection.proto";
@@ -25,6 +23,7 @@ import "google/cloud/automl/v1/text_extraction.proto";
import "google/cloud/automl/v1/text_sentiment.proto";
import "google/cloud/automl/v1/translation.proto";
import "google/protobuf/timestamp.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -35,17 +34,24 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// Evaluation results of a model.
message ModelEvaluation {
+ option (google.api.resource) = {
+ type: "automl.googleapis.com/ModelEvaluation"
+ pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}"
+ };
+
// Output only. Problem type specific evaluation metrics.
oneof metrics {
- // Model evaluation metrics for image, text classification.
+ // Model evaluation metrics for image, text, video and tables
+ // classification.
+ // Tables problem is considered a classification when the target column
+ // is CATEGORY DataType.
ClassificationEvaluationMetrics classification_evaluation_metrics = 8;
// Model evaluation metrics for translation.
TranslationEvaluationMetrics translation_evaluation_metrics = 9;
// Model evaluation metrics for image object detection.
- ImageObjectDetectionEvaluationMetrics
- image_object_detection_evaluation_metrics = 12;
+ ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12;
// Evaluation metrics for text sentiment models.
TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11;
@@ -60,8 +66,15 @@ message ModelEvaluation {
// `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`
string name = 1;
- // Output only. The ID of the annotation spec that the model evaluation
- // applies to. The The ID is empty for the overall model evaluation.
+ // Output only. The ID of the annotation spec that the model evaluation applies to. The
+ // The ID is empty for the overall model evaluation.
+ // For Tables annotation specs in the dataset do not exist and this ID is
+ // always not set, but for CLASSIFICATION
+ //
+ // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
+ // the
+ // [display_name][google.cloud.automl.v1.ModelEvaluation.display_name]
+ // field is used.
string annotation_spec_id = 2;
// Output only. The value of
@@ -69,7 +82,12 @@ message ModelEvaluation {
// at the moment when the model was trained. Because this field returns a
// value at model training time, for different models trained from the same
// dataset, the values may differ, since display names could had been changed
- // between the two model's trainings.
+ // between the two model's trainings. For Tables CLASSIFICATION
+ //
+ // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
+ // distinct values of the target column at the moment of the model evaluation
+ // are populated here.
+ // The display_name is empty for the overall model evaluation.
string display_name = 15;
// Output only. Timestamp when this model evaluation was created.
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/operations.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/operations.proto
index dfe20eb4520..d6a99870808 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/operations.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/operations.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/prediction_service.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/prediction_service.proto
index d38008d7c98..d12f9276c21 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/prediction_service.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/prediction_service.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -19,6 +18,7 @@ package google.cloud.automl.v1;
import "google/api/annotations.proto";
import "google/api/client.proto";
+import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/automl/v1/annotation_payload.proto";
import "google/cloud/automl/v1/data_items.proto";
@@ -40,168 +40,288 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// snake_case or kebab-case, either of those cases is accepted.
service PredictionService {
option (google.api.default_host) = "automl.googleapis.com";
- option (google.api.oauth_scopes) =
- "https://www.googleapis.com/auth/cloud-platform";
+ option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
- // Perform an online prediction. The prediction result will be directly
+ // Perform an online prediction. The prediction result is directly
// returned in the response.
- // Available for following ML problems, and their expected request payloads:
- // * Image Classification - Image in .JPEG, .GIF or .PNG format, image_bytes
- // up to 30MB.
- // * Image Object Detection - Image in .JPEG, .GIF or .PNG format, image_bytes
- // up to 30MB.
- // * Text Classification - TextSnippet, content up to 60,000 characters,
- // UTF-8 encoded.
- // * Text Extraction - TextSnippet, content up to 30,000 characters,
- // UTF-8 NFC encoded.
- // * Translation - TextSnippet, content up to 25,000 characters, UTF-8
- // encoded.
- // * Text Sentiment - TextSnippet, content up 500 characters, UTF-8
- // encoded.
+ // Available for following ML scenarios, and their expected request payloads:
+ //
+ //
+ //
+ // AutoML Vision Classification |
+ // An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. |
+ //
+ //
+ // AutoML Vision Object Detection |
+ // An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. |
+ //
+ //
+ // AutoML Natural Language Classification |
+ // A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
+ // .PDF, .TIF or .TIFF format with size upto 2MB. |
+ //
+ //
+ // AutoML Natural Language Entity Extraction |
+ // A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document
+ // in .PDF, .TIF or .TIFF format with size upto 20MB. |
+ //
+ //
+ // AutoML Natural Language Sentiment Analysis |
+ // A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
+ // .PDF, .TIF or .TIFF format with size upto 2MB. |
+ //
+ //
+ // AutoML Translation |
+ // A TextSnippet up to 25,000 characters, UTF-8 encoded. |
+ //
+ //
+ // AutoML Tables |
+ // A row with column values matching
+ // the columns of the model, up to 5MB. Not available for FORECASTING
+ // `prediction_type`.
+ // |
+ //
+ //
rpc Predict(PredictRequest) returns (PredictResponse) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/models/*}:predict"
body: "*"
};
+ option (google.api.method_signature) = "name,payload,params";
}
- // Perform a batch prediction. Unlike the online
- // [Predict][google.cloud.automl.v1.PredictionService.Predict], batch
+ // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch
// prediction result won't be immediately available in the response. Instead,
// a long running operation object is returned. User can poll the operation
// result via [GetOperation][google.longrunning.Operations.GetOperation]
- // method. Once the operation is done,
- // [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned
- // in the [response][google.longrunning.Operation.response] field. Available
- // for following ML problems:
- // * Image Classification
- // * Image Object Detection
- // * Text Extraction
+ // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in
+ // the [response][google.longrunning.Operation.response] field.
+ // Available for following ML scenarios:
+ //
+ // * AutoML Vision Classification
+ // * AutoML Vision Object Detection
+ // * AutoML Video Intelligence Classification
+ // * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification
+ // * AutoML Natural Language Entity Extraction
+ // * AutoML Natural Language Sentiment Analysis
+ // * AutoML Tables
rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/models/*}:batchPredict"
body: "*"
};
+ option (google.api.method_signature) = "name,input_config,output_config,params";
+ option (google.longrunning.operation_info) = {
+ response_type: "BatchPredictResult"
+ metadata_type: "OperationMetadata"
+ };
}
}
-// Request message for
-// [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
+// Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
message PredictRequest {
- // Name of the model requested to serve the prediction.
- string name = 1;
+ // Required. Name of the model requested to serve the prediction.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
// Required. Payload to perform a prediction on. The payload must match the
// problem type that the model was trained to solve.
- ExamplePayload payload = 2;
+ ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED];
// Additional domain-specific parameters, any string must be up to 25000
// characters long.
//
- // * For Image Classification:
+ // AutoML Vision Classification
+ //
+ // `score_threshold`
+ // : (float) A value from 0.0 to 1.0. When the model
+ // makes predictions for an image, it will only produce results that have
+ // at least this confidence score. The default is 0.5.
+ //
+ // AutoML Vision Object Detection
+ //
+ // `score_threshold`
+ // : (float) When Model detects objects on the image,
+ // it will only produce bounding boxes which have at least this
+ // confidence score. Value in 0 to 1 range, default is 0.5.
+ //
+ // `max_bounding_box_count`
+ // : (int64) The maximum number of bounding
+ // boxes returned. The default is 100. The
+ // number of returned bounding boxes might be limited by the server.
//
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for an image, it will only produce results that have
- // at least this confidence score. The default is 0.5.
+ // AutoML Tables
//
- // * For Image Object Detection:
- // `score_threshold` - (float) When Model detects objects on the image,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- // `max_bounding_box_count` - (int64) No more than this number of bounding
- // boxes will be returned in the response. Default is 100, the
- // requested value may be limited by server.
+ // `feature_importance`
+ // : (boolean) Whether
+ //
+ // [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
+ // is populated in the returned list of
+ // [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
+ // objects. The default is false.
map params = 3;
}
-// Response message for
-// [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
+// Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
message PredictResponse {
// Prediction result.
- // Translation and Text Sentiment will return precisely one payload.
+ // AutoML Translation and AutoML Natural Language Sentiment Analysis
+ // return precisely one payload.
repeated AnnotationPayload payload = 1;
// The preprocessed example that AutoML actually makes prediction on.
// Empty if AutoML does not preprocess the input example.
- // * For Text Extraction:
- // If the input is a .pdf file, the OCR'ed text will be provided in
- // [document_text][google.cloud.automl.v1.Document.document_text].
- //
- // * For Text Classification:
- // If the input is a .pdf file, the OCR'ed trucated text will be provided in
- // [document_text][google.cloud.automl.v1.Document.document_text].
//
- // * For Text Sentiment:
- // If the input is a .pdf file, the OCR'ed trucated text will be provided in
- // [document_text][google.cloud.automl.v1.Document.document_text].
+ // For AutoML Natural Language (Classification, Entity Extraction, and
+ // Sentiment Analysis), if the input is a document, the recognized text is
+ // returned in the
+ // [document_text][google.cloud.automl.v1.Document.document_text]
+ // property.
ExamplePayload preprocessed_input = 3;
// Additional domain-specific prediction response metadata.
//
- // * For Image Object Detection:
- // `max_bounding_box_count` - (int64) At most that many bounding boxes per
- // image could have been returned.
- //
- // * For Text Sentiment:
- // `sentiment_score` - (float, deprecated) A value between -1 and 1,
- // -1 maps to least positive sentiment, while 1 maps to the most positive
- // one and the higher the score, the more positive the sentiment in the
- // document is. Yet these values are relative to the training data, so
- // e.g. if all data was positive then -1 will be also positive (though
- // the least).
- // The sentiment_score shouldn't be confused with "score" or "magnitude"
- // from the previous Natural Language Sentiment Analysis API.
+ // AutoML Vision Object Detection
+ //
+ // `max_bounding_box_count`
+ // : (int64) The maximum number of bounding boxes to return per image.
+ //
+ // AutoML Natural Language Sentiment Analysis
+ //
+ // `sentiment_score`
+ // : (float, deprecated) A value between -1 and 1,
+ // -1 maps to least positive sentiment, while 1 maps to the most positive
+ // one and the higher the score, the more positive the sentiment in the
+ // document is. Yet these values are relative to the training data, so
+ // e.g. if all data was positive then -1 is also positive (though
+ // the least).
+ // `sentiment_score` is not the same as "score" and "magnitude"
+ // from Sentiment Analysis in the Natural Language API.
map metadata = 2;
}
-// Request message for
-// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
+// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
message BatchPredictRequest {
- // Name of the model requested to serve the batch prediction.
- string name = 1;
+ // Required. Name of the model requested to serve the batch prediction.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
// Required. The input configuration for batch prediction.
- BatchPredictInputConfig input_config = 3;
+ BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The Configuration specifying where output predictions should
// be written.
- BatchPredictOutputConfig output_config = 4;
+ BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED];
// Additional domain-specific parameters for the predictions, any string must
// be up to 25000 characters long.
//
- // * For Text Classification:
+ // AutoML Natural Language Classification
+ //
+ // `score_threshold`
+ // : (float) A value from 0.0 to 1.0. When the model
+ // makes predictions for a text snippet, it will only produce results
+ // that have at least this confidence score. The default is 0.5.
+ //
//
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for a text snippet, it will only produce results
- // that have at least this confidence score. The default is 0.5.
+ // AutoML Vision Classification
//
- // * For Image Classification:
+ // `score_threshold`
+ // : (float) A value from 0.0 to 1.0. When the model
+ // makes predictions for an image, it will only produce results that
+ // have at least this confidence score. The default is 0.5.
//
- // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- // makes predictions for an image, it will only produce results that
- // have at least this confidence score. The default is 0.5.
+ // AutoML Vision Object Detection
//
- // * For Image Object Detection:
+ // `score_threshold`
+ // : (float) When Model detects objects on the image,
+ // it will only produce bounding boxes which have at least this
+ // confidence score. Value in 0 to 1 range, default is 0.5.
+ //
+ // `max_bounding_box_count`
+ // : (int64) The maximum number of bounding
+ // boxes returned per image. The default is 100, the
+ // number of bounding boxes returned might be limited by the server.
+ // AutoML Video Intelligence Classification
+ //
+ // `score_threshold`
+ // : (float) A value from 0.0 to 1.0. When the model
+ // makes predictions for a video, it will only produce results that
+ // have at least this confidence score. The default is 0.5.
+ //
+ // `segment_classification`
+ // : (boolean) Set to true to request
+ // segment-level classification. AutoML Video Intelligence returns
+ // labels and their confidence scores for the entire segment of the
+ // video that user specified in the request configuration.
+ // The default is true.
+ //
+ // `shot_classification`
+ // : (boolean) Set to true to request shot-level
+ // classification. AutoML Video Intelligence determines the boundaries
+ // for each camera shot in the entire segment of the video that user
+ // specified in the request configuration. AutoML Video Intelligence
+ // then returns labels and their confidence scores for each detected
+ // shot, along with the start and end time of the shot.
+ // The default is false.
+ //
+ // WARNING: Model evaluation is not done for this classification type,
+ // the quality of it depends on training data, but there are no metrics
+ // provided to describe that quality.
+ //
+ // `1s_interval_classification`
+ // : (boolean) Set to true to request
+ // classification for a video at one-second intervals. AutoML Video
+ // Intelligence returns labels and their confidence scores for each
+ // second of the entire segment of the video that user specified in the
+ // request configuration. The default is false.
+ //
+ // WARNING: Model evaluation is not done for this classification
+ // type, the quality of it depends on training data, but there are no
+ // metrics provided to describe that quality.
+ //
+ // AutoML Video Intelligence Object Tracking
+ //
+ // `score_threshold`
+ // : (float) When Model detects objects on video frames,
+ // it will only produce bounding boxes which have at least this
+ // confidence score. Value in 0 to 1 range, default is 0.5.
+ //
+ // `max_bounding_box_count`
+ // : (int64) The maximum number of bounding
+ // boxes returned per image. The default is 100, the
+ // number of bounding boxes returned might be limited by the server.
+ //
+ // `min_bounding_box_size`
+ // : (float) Only bounding boxes with shortest edge
+ // at least that long as a relative value of video frame size are
+ // returned. Value in 0 to 1 range. Default is 0.
//
- // `score_threshold` - (float) When Model detects objects on the image,
- // it will only produce bounding boxes which have at least this
- // confidence score. Value in 0 to 1 range, default is 0.5.
- // `max_bounding_box_count` - (int64) No more than this number of bounding
- // boxes will be produced per image. Default is 100, the
- // requested value may be limited by server.
map params = 5;
}
// Result of the Batch Predict. This message is returned in
// [response][google.longrunning.Operation.response] of the operation returned
-// by the
-// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
+// by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
message BatchPredictResult {
// Additional domain-specific prediction response metadata.
//
- // * For Image Object Detection:
- // `max_bounding_box_count` - (int64) At most that many bounding boxes per
- // image could have been returned.
+ // AutoML Vision Object Detection
+ //
+ // `max_bounding_box_count`
+ // : (int64) The maximum number of bounding boxes returned per image.
+ //
+ // AutoML Video Intelligence Object Tracking
+ //
+ // `max_bounding_box_count`
+ // : (int64) The maximum number of bounding boxes returned per frame.
map metadata = 1;
}
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/service.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/service.proto
index bf51bdb15f2..7c73243ccfe 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/service.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/service.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -19,6 +18,7 @@ package google.cloud.automl.v1;
import "google/api/annotations.proto";
import "google/api/client.proto";
+import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/automl/v1/annotation_payload.proto";
import "google/cloud/automl/v1/annotation_spec.proto";
@@ -55,16 +55,19 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// snake_case or kebab-case, either of those cases is accepted.
service AutoMl {
option (google.api.default_host) = "automl.googleapis.com";
- option (google.api.oauth_scopes) =
- "https://www.googleapis.com/auth/cloud-platform";
+ option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
// Creates a dataset.
- rpc CreateDataset(CreateDatasetRequest)
- returns (google.longrunning.Operation) {
+ rpc CreateDataset(CreateDatasetRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/locations/*}/datasets"
body: "dataset"
};
+ option (google.api.method_signature) = "parent,dataset";
+ option (google.longrunning.operation_info) = {
+ response_type: "Dataset"
+ metadata_type: "OperationMetadata"
+ };
}
// Gets a dataset.
@@ -72,6 +75,7 @@ service AutoMl {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/datasets/*}"
};
+ option (google.api.method_signature) = "name";
}
// Lists datasets in a project.
@@ -79,6 +83,7 @@ service AutoMl {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*}/datasets"
};
+ option (google.api.method_signature) = "parent";
}
// Updates a dataset.
@@ -87,6 +92,7 @@ service AutoMl {
patch: "/v1/{dataset.name=projects/*/locations/*/datasets/*}"
body: "dataset"
};
+ option (google.api.method_signature) = "dataset,update_mask";
}
// Deletes a dataset and all of its contents.
@@ -94,19 +100,36 @@ service AutoMl {
// [response][google.longrunning.Operation.response] field when it completes,
// and `delete_details` in the
// [metadata][google.longrunning.Operation.metadata] field.
- rpc DeleteDataset(DeleteDatasetRequest)
- returns (google.longrunning.Operation) {
+ rpc DeleteDataset(DeleteDatasetRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1/{name=projects/*/locations/*/datasets/*}"
};
+ option (google.api.method_signature) = "name";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Imports data into a dataset.
+ // For Tables this method can only be called on an empty Dataset.
+ //
+ // For Tables:
+ // * A
+ // [schema_inference_version][google.cloud.automl.v1.InputConfig.params]
+ // parameter must be explicitly set.
+ // Returns an empty response in the
+ // [response][google.longrunning.Operation.response] field when it completes.
rpc ImportData(ImportDataRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/datasets/*}:importData"
body: "*"
};
+ option (google.api.method_signature) = "name,input_config";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Exports dataset's data to the provided output location.
@@ -117,14 +140,19 @@ service AutoMl {
post: "/v1/{name=projects/*/locations/*/datasets/*}:exportData"
body: "*"
};
+ option (google.api.method_signature) = "name,output_config";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Gets an annotation spec.
- rpc GetAnnotationSpec(GetAnnotationSpecRequest)
- returns (google.cloud.automl.v1.AnnotationSpec) {
+ rpc GetAnnotationSpec(GetAnnotationSpecRequest) returns (AnnotationSpec) {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}"
};
+ option (google.api.method_signature) = "name";
}
// Creates a model.
@@ -137,6 +165,11 @@ service AutoMl {
post: "/v1/{parent=projects/*/locations/*}/models"
body: "model"
};
+ option (google.api.method_signature) = "parent,model";
+ option (google.longrunning.operation_info) = {
+ response_type: "Model"
+ metadata_type: "OperationMetadata"
+ };
}
// Gets a model.
@@ -144,6 +177,7 @@ service AutoMl {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/models/*}"
};
+ option (google.api.method_signature) = "name";
}
// Lists models.
@@ -151,6 +185,7 @@ service AutoMl {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*}/models"
};
+ option (google.api.method_signature) = "parent";
}
// Deletes a model.
@@ -162,6 +197,11 @@ service AutoMl {
option (google.api.http) = {
delete: "/v1/{name=projects/*/locations/*/models/*}"
};
+ option (google.api.method_signature) = "name";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Updates a model.
@@ -170,17 +210,18 @@ service AutoMl {
patch: "/v1/{model.name=projects/*/locations/*/models/*}"
body: "model"
};
+ option (google.api.method_signature) = "model,update_mask";
}
// Deploys a model. If a model is already deployed, deploying it with the
// same parameters has no effect. Deploying with different parametrs
// (as e.g. changing
//
- // [node_number][google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadata.node_number])
+ // [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])
// will reset the deployment state without pausing the model's availability.
//
- // Only applicable for Text Classification, Image Object Detection; all other
- // domains manage deployment automatically.
+ // Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage
+ // deployment automatically.
//
// Returns an empty response in the
// [response][google.longrunning.Operation.response] field when it completes.
@@ -189,21 +230,30 @@ service AutoMl {
post: "/v1/{name=projects/*/locations/*/models/*}:deploy"
body: "*"
};
+ option (google.api.method_signature) = "name";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Undeploys a model. If the model is not deployed this method has no effect.
//
- // Only applicable for Text Classification, Image Object Detection;
+ // Only applicable for Text Classification, Image Object Detection and Tables;
// all other domains manage deployment automatically.
//
// Returns an empty response in the
// [response][google.longrunning.Operation.response] field when it completes.
- rpc UndeployModel(UndeployModelRequest)
- returns (google.longrunning.Operation) {
+ rpc UndeployModel(UndeployModelRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/models/*}:undeploy"
body: "*"
};
+ option (google.api.method_signature) = "name";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Exports a trained, "export-able", model to a user specified Google Cloud
@@ -218,6 +268,11 @@ service AutoMl {
post: "/v1/{name=projects/*/locations/*/models/*}:export"
body: "*"
};
+ option (google.api.method_signature) = "name,output_config";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "OperationMetadata"
+ };
}
// Gets a model evaluation.
@@ -225,45 +280,57 @@ service AutoMl {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}"
};
+ option (google.api.method_signature) = "name";
}
// Lists model evaluations.
- rpc ListModelEvaluations(ListModelEvaluationsRequest)
- returns (ListModelEvaluationsResponse) {
+ rpc ListModelEvaluations(ListModelEvaluationsRequest) returns (ListModelEvaluationsResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations"
};
+ option (google.api.method_signature) = "parent,filter";
}
}
-// Request message for
-// [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset].
+// Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset].
message CreateDatasetRequest {
- // The resource name of the project to create the dataset for.
- string parent = 1;
-
- // The dataset to create.
- Dataset dataset = 2;
+ // Required. The resource name of the project to create the dataset for.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "locations.googleapis.com/Location"
+ }
+ ];
+
+ // Required. The dataset to create.
+ Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset].
+// Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset].
message GetDatasetRequest {
- // The resource name of the dataset to retrieve.
- string name = 1;
+ // Required. The resource name of the dataset to retrieve.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Dataset"
+ }
+ ];
}
-// Request message for
-// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
+// Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
message ListDatasetsRequest {
- // The resource name of the project from which to list datasets.
- string parent = 1;
+ // Required. The resource name of the project from which to list datasets.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "locations.googleapis.com/Location"
+ }
+ ];
// An expression for filtering the results of the request.
//
// * `dataset_metadata` - for existence of the case (e.g.
- // image_classification_dataset_metadata:*). Some examples of
- // using the filter are:
+ // image_classification_dataset_metadata:*). Some examples of using the filter are:
//
// * `translation_dataset_metadata:*` --> The dataset has
// translation_dataset_metadata.
@@ -275,98 +342,121 @@ message ListDatasetsRequest {
// A token identifying a page of results for the server to return
// Typically obtained via
- // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token]
- // of the previous
+ // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] of the previous
// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] call.
string page_token = 6;
}
-// Response message for
-// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
+// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
message ListDatasetsResponse {
// The datasets read.
repeated Dataset datasets = 1;
// A token to retrieve next page of results.
- // Pass to
- // [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token]
- // to obtain that page.
+ // Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] to obtain that page.
string next_page_token = 2;
}
-// Request message for
-// [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset]
+// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset]
message UpdateDatasetRequest {
- // The dataset which replaces the resource on the server.
- Dataset dataset = 1;
+ // Required. The dataset which replaces the resource on the server.
+ Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2;
+ google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset].
+// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset].
message DeleteDatasetRequest {
- // The resource name of the dataset to delete.
- string name = 1;
+ // Required. The resource name of the dataset to delete.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Dataset"
+ }
+ ];
}
-// Request message for
-// [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData].
+// Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData].
message ImportDataRequest {
// Required. Dataset name. Dataset must already exist. All imported
// annotations and examples will be added.
- string name = 1;
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Dataset"
+ }
+ ];
// Required. The desired input location and its domain specific semantics,
// if any.
- InputConfig input_config = 3;
+ InputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData].
+// Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData].
message ExportDataRequest {
// Required. The resource name of the dataset.
- string name = 1;
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Dataset"
+ }
+ ];
// Required. The desired output location.
- OutputConfig output_config = 3;
+ OutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec].
+// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec].
message GetAnnotationSpecRequest {
- // The resource name of the annotation spec to retrieve.
- string name = 1;
+ // Required. The resource name of the annotation spec to retrieve.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/AnnotationSpec"
+ }
+ ];
}
-// Request message for
-// [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel].
+// Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel].
message CreateModelRequest {
- // Resource name of the parent project where the model is being created.
- string parent = 1;
-
- // The model to create.
- Model model = 4;
+ // Required. Resource name of the parent project where the model is being created.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "locations.googleapis.com/Location"
+ }
+ ];
+
+ // Required. The model to create.
+ Model model = 4 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel].
+// Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel].
message GetModelRequest {
- // Resource name of the model.
- string name = 1;
+ // Required. Resource name of the model.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
}
-// Request message for
-// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
+// Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
message ListModelsRequest {
- // Resource name of the project, from which to list the models.
- string parent = 1;
+ // Required. Resource name of the project, from which to list the models.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "locations.googleapis.com/Location"
+ }
+ ];
// An expression for filtering the results of the request.
//
// * `model_metadata` - for existence of the case (e.g.
- // image_classification_model_metadata:*).
+ // video_classification_model_metadata:*).
// * `dataset_id` - for = or !=. Some examples of using the filter are:
//
// * `image_classification_model_metadata:*` --> The model has
@@ -379,94 +469,112 @@ message ListModelsRequest {
// A token identifying a page of results for the server to return
// Typically obtained via
- // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token]
- // of the previous
+ // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] of the previous
// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call.
string page_token = 6;
}
-// Response message for
-// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
+// Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
message ListModelsResponse {
// List of models in the requested page.
repeated Model model = 1;
// A token to retrieve next page of results.
- // Pass to
- // [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token]
- // to obtain that page.
+ // Pass to [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] to obtain that page.
string next_page_token = 2;
}
-// Request message for
-// [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel].
+// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel].
message DeleteModelRequest {
- // Resource name of the model being deleted.
- string name = 1;
+ // Required. Resource name of the model being deleted.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
}
-// Request message for
-// [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel]
+// Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel]
message UpdateModelRequest {
- // The model which replaces the resource on the server.
- Model model = 1;
+ // Required. The model which replaces the resource on the server.
+ Model model = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource.
- google.protobuf.FieldMask update_mask = 2;
+ google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel].
+// Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel].
message DeployModelRequest {
// The per-domain specific deployment parameters.
oneof model_deployment_metadata {
// Model deployment metadata specific to Image Object Detection.
- ImageObjectDetectionModelDeploymentMetadata
- image_object_detection_model_deployment_metadata = 2;
+ ImageObjectDetectionModelDeploymentMetadata image_object_detection_model_deployment_metadata = 2;
// Model deployment metadata specific to Image Classification.
- ImageClassificationModelDeploymentMetadata
- image_classification_model_deployment_metadata = 4;
+ ImageClassificationModelDeploymentMetadata image_classification_model_deployment_metadata = 4;
}
- // Resource name of the model to deploy.
- string name = 1;
+ // Required. Resource name of the model to deploy.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
}
-// Request message for
-// [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel].
+// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel].
message UndeployModelRequest {
- // Resource name of the model to undeploy.
- string name = 1;
+ // Required. Resource name of the model to undeploy.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
}
-// Request message for
-// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need
-// to be enabled for exporting, otherwise an error code will be returned.
+// Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel].
+// Models need to be enabled for exporting, otherwise an error code will be
+// returned.
message ExportModelRequest {
// Required. The resource name of the model to export.
- string name = 1;
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
// Required. The desired output location and configuration.
- ModelExportOutputConfig output_config = 3;
+ ModelExportOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED];
}
-// Request message for
-// [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation].
+// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation].
message GetModelEvaluationRequest {
- // Resource name for the model evaluation.
- string name = 1;
+ // Required. Resource name for the model evaluation.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/ModelEvaluation"
+ }
+ ];
}
-// Request message for
-// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
+// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
message ListModelEvaluationsRequest {
- // Resource name of the model to list the model evaluations for.
+ // Required. Resource name of the model to list the model evaluations for.
// If modelId is set as "-", this will list model evaluations from across all
// models of the parent location.
- string parent = 1;
-
- // An expression for filtering the results of the request.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
+
+ // Required. An expression for filtering the results of the request.
//
// * `annotation_spec_id` - for =, != or existence. See example below for
// the last.
@@ -477,31 +585,25 @@ message ListModelEvaluationsRequest {
// annotation spec with ID different than 4.
// * `NOT annotation_spec_id:*` --> The model evaluation was done for
// aggregate of all annotation specs.
- string filter = 3;
+ string filter = 3 [(google.api.field_behavior) = REQUIRED];
// Requested page size.
int32 page_size = 4;
// A token identifying a page of results for the server to return.
// Typically obtained via
- // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token]
- // of the previous
- // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]
- // call.
+ // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] of the previous
+ // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call.
string page_token = 6;
}
-// Response message for
-// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
+// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
message ListModelEvaluationsResponse {
// List of model evaluations in the requested page.
repeated ModelEvaluation model_evaluation = 1;
// A token to retrieve next page of results.
- // Pass to the
- // [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token]
- // field of a new
- // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]
- // request to obtain that page.
+ // Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] field of a new
+ // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] request to obtain that page.
string next_page_token = 2;
}
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text.proto
index bffe9634f62..667031b87ef 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,14 +11,13 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/cloud/automl/v1/classification.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -41,20 +40,27 @@ message TextClassificationModelMetadata {
}
// Dataset metadata that is specific to text extraction
-message TextExtractionDatasetMetadata {}
+message TextExtractionDatasetMetadata {
+
+}
// Model metadata that is specific to text extraction.
-message TextExtractionModelMetadata {}
+message TextExtractionModelMetadata {
+
+}
// Dataset metadata for text sentiment.
message TextSentimentDatasetMetadata {
- // Required. A sentiment is expressed as an integer ordinal, where higher
- // value means a more positive sentiment. The range of sentiments that will be
- // used is between 0 and sentiment_max (inclusive on both ends), and all the
- // values in the range must be represented in the dataset before a model can
- // be created. sentiment_max value must be between 1 and 10 (inclusive).
+ // Required. A sentiment is expressed as an integer ordinal, where higher value
+ // means a more positive sentiment. The range of sentiments that will be used
+ // is between 0 and sentiment_max (inclusive on both ends), and all the values
+ // in the range must be represented in the dataset before a model can be
+ // created.
+ // sentiment_max value must be between 1 and 10 (inclusive).
int32 sentiment_max = 1;
}
// Model metadata that is specific to text sentiment.
-message TextSentimentModelMetadata {}
+message TextSentimentModelMetadata {
+
+}
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_extraction.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_extraction.proto
index 02119f5c3ef..37a31e7155b 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_extraction.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_extraction.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_segment.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_segment.proto
index c24b83fb210..be7eb1543af 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_segment.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_segment.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_sentiment.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_sentiment.proto
index bfd3c3c1100..c68b9ed3d89 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_sentiment.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/text_sentiment.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,14 +11,13 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/cloud/automl/v1/classification.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -31,9 +30,9 @@ option ruby_package = "Google::Cloud::AutoML::V1";
// Contains annotation details specific to text sentiment.
message TextSentimentAnnotation {
// Output only. The sentiment with the semantic, as given to the
- // [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] when
- // populating the dataset from which the model used for the prediction had
- // been trained. The sentiment values are between 0 and
+ // [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] when populating the dataset from which the model used
+ // for the prediction had been trained.
+ // The sentiment values are between 0 and
// Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive),
// with higher value meaning more positive sentiment. They are completely
// relative, i.e. 0 means least positive sentiment and sentiment_max means
diff --git a/packages/google-cloud-automl/protos/google/cloud/automl/v1/translation.proto b/packages/google-cloud-automl/protos/google/cloud/automl/v1/translation.proto
index 0c75619e905..642894e85c6 100644
--- a/packages/google-cloud-automl/protos/google/cloud/automl/v1/translation.proto
+++ b/packages/google-cloud-automl/protos/google/cloud/automl/v1/translation.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,15 +11,14 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
package google.cloud.automl.v1;
-import "google/api/annotations.proto";
import "google/api/field_behavior.proto";
import "google/cloud/automl/v1/data_items.proto";
+import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
@@ -56,11 +55,11 @@ message TranslationModelMetadata {
string base_model = 1;
// Output only. Inferred from the dataset.
- // The source languge (The BCP-47 language code) that is used for training.
+ // The source language (The BCP-47 language code) that is used for training.
string source_language_code = 2;
- // Output only. The target languge (The BCP-47 language code) that is used for
- // training.
+ // Output only. The target language (The BCP-47 language code) that is used
+ // for training.
string target_language_code = 3;
}
diff --git a/packages/google-cloud-automl/protos/protos.json b/packages/google-cloud-automl/protos/protos.json
index 58ac0b5a18b..09d65eaf32b 100644
--- a/packages/google-cloud-automl/protos/protos.json
+++ b/packages/google-cloud-automl/protos/protos.json
@@ -180,6 +180,10 @@
}
},
"Dataset": {
+ "options": {
+ "(google.api.resource).type": "automl.googleapis.com/Dataset",
+ "(google.api.resource).pattern": "projects/{project}/locations/{location}/datasets/{dataset}"
+ },
"oneofs": {
"datasetMetadata": {
"oneof": [
@@ -336,6 +340,10 @@
}
},
"AnnotationSpec": {
+ "options": {
+ "(google.api.resource).type": "automl.googleapis.com/AnnotationSpec",
+ "(google.api.resource).pattern": "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}"
+ },
"fields": {
"name": {
"type": "string",
@@ -938,6 +946,10 @@
}
},
"ModelEvaluation": {
+ "options": {
+ "(google.api.resource).type": "automl.googleapis.com/ModelEvaluation",
+ "(google.api.resource).pattern": "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}"
+ },
"oneofs": {
"metrics": {
"oneof": [
@@ -1125,7 +1137,8 @@
"responseType": "PredictResponse",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/models/*}:predict",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name,payload,params"
}
},
"BatchPredict": {
@@ -1133,7 +1146,10 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/models/*}:batchPredict",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name,input_config,output_config,params",
+ "(google.longrunning.operation_info).response_type": "BatchPredictResult",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
}
}
@@ -1142,11 +1158,18 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
},
"payload": {
"type": "ExamplePayload",
- "id": 2
+ "id": 2,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
},
"params": {
"keyType": "string",
@@ -1177,15 +1200,25 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
},
"inputConfig": {
"type": "BatchPredictInputConfig",
- "id": 3
+ "id": 3,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
},
"outputConfig": {
"type": "BatchPredictOutputConfig",
- "id": 4
+ "id": 4,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
},
"params": {
"keyType": "string",
@@ -1249,7 +1282,7 @@
"Model": {
"options": {
"(google.api.resource).type": "automl.googleapis.com/Model",
- "(google.api.resource).pattern": "projects/{project_id}/locations/{location_id}/models/{model_id}"
+ "(google.api.resource).pattern": "projects/{project}/locations/{location}/models/{model}"
},
"oneofs": {
"modelMetadata": {
@@ -1343,21 +1376,26 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{parent=projects/*/locations/*}/datasets",
- "(google.api.http).body": "dataset"
+ "(google.api.http).body": "dataset",
+ "(google.api.method_signature)": "parent,dataset",
+ "(google.longrunning.operation_info).response_type": "Dataset",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"GetDataset": {
"requestType": "GetDatasetRequest",
"responseType": "Dataset",
"options": {
- "(google.api.http).get": "/v1/{name=projects/*/locations/*/datasets/*}"
+ "(google.api.http).get": "/v1/{name=projects/*/locations/*/datasets/*}",
+ "(google.api.method_signature)": "name"
}
},
"ListDatasets": {
"requestType": "ListDatasetsRequest",
"responseType": "ListDatasetsResponse",
"options": {
- "(google.api.http).get": "/v1/{parent=projects/*/locations/*}/datasets"
+ "(google.api.http).get": "/v1/{parent=projects/*/locations/*}/datasets",
+ "(google.api.method_signature)": "parent"
}
},
"UpdateDataset": {
@@ -1365,14 +1403,18 @@
"responseType": "Dataset",
"options": {
"(google.api.http).patch": "/v1/{dataset.name=projects/*/locations/*/datasets/*}",
- "(google.api.http).body": "dataset"
+ "(google.api.http).body": "dataset",
+ "(google.api.method_signature)": "dataset,update_mask"
}
},
"DeleteDataset": {
"requestType": "DeleteDatasetRequest",
"responseType": "google.longrunning.Operation",
"options": {
- "(google.api.http).delete": "/v1/{name=projects/*/locations/*/datasets/*}"
+ "(google.api.http).delete": "/v1/{name=projects/*/locations/*/datasets/*}",
+ "(google.api.method_signature)": "name",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"ImportData": {
@@ -1380,7 +1422,10 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/datasets/*}:importData",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name,input_config",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"ExportData": {
@@ -1388,14 +1433,18 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/datasets/*}:exportData",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name,output_config",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"GetAnnotationSpec": {
"requestType": "GetAnnotationSpecRequest",
- "responseType": "google.cloud.automl.v1.AnnotationSpec",
+ "responseType": "AnnotationSpec",
"options": {
- "(google.api.http).get": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}"
+ "(google.api.http).get": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}",
+ "(google.api.method_signature)": "name"
}
},
"CreateModel": {
@@ -1403,28 +1452,36 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{parent=projects/*/locations/*}/models",
- "(google.api.http).body": "model"
+ "(google.api.http).body": "model",
+ "(google.api.method_signature)": "parent,model",
+ "(google.longrunning.operation_info).response_type": "Model",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"GetModel": {
"requestType": "GetModelRequest",
"responseType": "Model",
"options": {
- "(google.api.http).get": "/v1/{name=projects/*/locations/*/models/*}"
+ "(google.api.http).get": "/v1/{name=projects/*/locations/*/models/*}",
+ "(google.api.method_signature)": "name"
}
},
"ListModels": {
"requestType": "ListModelsRequest",
"responseType": "ListModelsResponse",
"options": {
- "(google.api.http).get": "/v1/{parent=projects/*/locations/*}/models"
+ "(google.api.http).get": "/v1/{parent=projects/*/locations/*}/models",
+ "(google.api.method_signature)": "parent"
}
},
"DeleteModel": {
"requestType": "DeleteModelRequest",
"responseType": "google.longrunning.Operation",
"options": {
- "(google.api.http).delete": "/v1/{name=projects/*/locations/*/models/*}"
+ "(google.api.http).delete": "/v1/{name=projects/*/locations/*/models/*}",
+ "(google.api.method_signature)": "name",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"UpdateModel": {
@@ -1432,7 +1489,8 @@
"responseType": "Model",
"options": {
"(google.api.http).patch": "/v1/{model.name=projects/*/locations/*/models/*}",
- "(google.api.http).body": "model"
+ "(google.api.http).body": "model",
+ "(google.api.method_signature)": "model,update_mask"
}
},
"DeployModel": {
@@ -1440,7 +1498,10 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/models/*}:deploy",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"UndeployModel": {
@@ -1448,7 +1509,10 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/models/*}:undeploy",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"ExportModel": {
@@ -1456,21 +1520,26 @@
"responseType": "google.longrunning.Operation",
"options": {
"(google.api.http).post": "/v1/{name=projects/*/locations/*/models/*}:export",
- "(google.api.http).body": "*"
+ "(google.api.http).body": "*",
+ "(google.api.method_signature)": "name,output_config",
+ "(google.longrunning.operation_info).response_type": "google.protobuf.Empty",
+ "(google.longrunning.operation_info).metadata_type": "OperationMetadata"
}
},
"GetModelEvaluation": {
"requestType": "GetModelEvaluationRequest",
"responseType": "ModelEvaluation",
"options": {
- "(google.api.http).get": "/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}"
+ "(google.api.http).get": "/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}",
+ "(google.api.method_signature)": "name"
}
},
"ListModelEvaluations": {
"requestType": "ListModelEvaluationsRequest",
"responseType": "ListModelEvaluationsResponse",
"options": {
- "(google.api.http).get": "/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations"
+ "(google.api.http).get": "/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations",
+ "(google.api.method_signature)": "parent,filter"
}
}
}
@@ -1479,11 +1548,18 @@
"fields": {
"parent": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "locations.googleapis.com/Location"
+ }
},
"dataset": {
"type": "Dataset",
- "id": 2
+ "id": 2,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1491,7 +1567,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Dataset"
+ }
}
}
},
@@ -1499,7 +1579,11 @@
"fields": {
"parent": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "locations.googleapis.com/Location"
+ }
},
"filter": {
"type": "string",
@@ -1532,11 +1616,17 @@
"fields": {
"dataset": {
"type": "Dataset",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
},
"updateMask": {
"type": "google.protobuf.FieldMask",
- "id": 2
+ "id": 2,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1544,7 +1634,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Dataset"
+ }
}
}
},
@@ -1552,11 +1646,18 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Dataset"
+ }
},
"inputConfig": {
"type": "InputConfig",
- "id": 3
+ "id": 3,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1564,11 +1665,18 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Dataset"
+ }
},
"outputConfig": {
"type": "OutputConfig",
- "id": 3
+ "id": 3,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1576,7 +1684,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/AnnotationSpec"
+ }
}
}
},
@@ -1584,11 +1696,18 @@
"fields": {
"parent": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "locations.googleapis.com/Location"
+ }
},
"model": {
"type": "Model",
- "id": 4
+ "id": 4,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1596,7 +1715,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
}
}
},
@@ -1604,7 +1727,11 @@
"fields": {
"parent": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "locations.googleapis.com/Location"
+ }
},
"filter": {
"type": "string",
@@ -1637,7 +1764,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
}
}
},
@@ -1645,11 +1776,17 @@
"fields": {
"model": {
"type": "Model",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
},
"updateMask": {
"type": "google.protobuf.FieldMask",
- "id": 2
+ "id": 2,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1673,7 +1810,11 @@
},
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
}
}
},
@@ -1681,7 +1822,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
}
}
},
@@ -1689,11 +1834,18 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
},
"outputConfig": {
"type": "ModelExportOutputConfig",
- "id": 3
+ "id": 3,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
}
}
},
@@ -1701,7 +1853,11 @@
"fields": {
"name": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/ModelEvaluation"
+ }
}
}
},
@@ -1709,11 +1865,18 @@
"fields": {
"parent": {
"type": "string",
- "id": 1
+ "id": 1,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED",
+ "(google.api.resource_reference).type": "automl.googleapis.com/Model"
+ }
},
"filter": {
"type": "string",
- "id": 3
+ "id": 3,
+ "options": {
+ "(google.api.field_behavior)": "REQUIRED"
+ }
},
"pageSize": {
"type": "int32",
diff --git a/packages/google-cloud-automl/src/v1/auto_ml_client.js b/packages/google-cloud-automl/src/v1/auto_ml_client.js
index 6f01eeb16b3..e01cce35b06 100644
--- a/packages/google-cloud-automl/src/v1/auto_ml_client.js
+++ b/packages/google-cloud-automl/src/v1/auto_ml_client.js
@@ -392,9 +392,9 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * The resource name of the project to create the dataset for.
+ * Required. The resource name of the project to create the dataset for.
* @param {Object} request.dataset
- * The dataset to create.
+ * Required. The dataset to create.
*
* This object should have the same structure as [Dataset]{@link google.cloud.automl.v1.Dataset}
* @param {Object} [options]
@@ -509,7 +509,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {Object} request.dataset
- * The dataset which replaces the resource on the server.
+ * Required. The dataset which replaces the resource on the server.
*
* This object should have the same structure as [Dataset]{@link google.cloud.automl.v1.Dataset}
* @param {Object} request.updateMask
@@ -574,7 +574,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * The resource name of the dataset to retrieve.
+ * Required. The resource name of the dataset to retrieve.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -628,13 +628,12 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * The resource name of the project from which to list datasets.
+ * Required. The resource name of the project from which to list datasets.
* @param {string} [request.filter]
* An expression for filtering the results of the request.
*
* * `dataset_metadata` - for existence of the case (e.g.
- * image_classification_dataset_metadata:*). Some examples of
- * using the filter are:
+ * image_classification_dataset_metadata:*). Some examples of using the filter are:
*
* * `translation_dataset_metadata:*` --> The dataset has
* translation_dataset_metadata.
@@ -749,13 +748,12 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * The resource name of the project from which to list datasets.
+ * Required. The resource name of the project from which to list datasets.
* @param {string} [request.filter]
* An expression for filtering the results of the request.
*
* * `dataset_metadata` - for existence of the case (e.g.
- * image_classification_dataset_metadata:*). Some examples of
- * using the filter are:
+ * image_classification_dataset_metadata:*). Some examples of using the filter are:
*
* * `translation_dataset_metadata:*` --> The dataset has
* translation_dataset_metadata.
@@ -807,7 +805,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * The resource name of the dataset to delete.
+ * Required. The resource name of the dataset to delete.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -901,6 +899,14 @@ class AutoMlClient {
/**
* Imports data into a dataset.
+ * For Tables this method can only be called on an empty Dataset.
+ *
+ * For Tables:
+ * * A
+ * schema_inference_version
+ * parameter must be explicitly set.
+ * Returns an empty response in the
+ * response field when it completes.
*
* @param {Object} request
* The request object that will be sent.
@@ -1143,7 +1149,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * The resource name of the annotation spec to retrieve.
+ * Required. The resource name of the annotation spec to retrieve.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -1201,9 +1207,9 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * Resource name of the parent project where the model is being created.
+ * Required. Resource name of the parent project where the model is being created.
* @param {Object} request.model
- * The model to create.
+ * Required. The model to create.
*
* This object should have the same structure as [Model]{@link google.cloud.automl.v1.Model}
* @param {Object} [options]
@@ -1318,7 +1324,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Resource name of the model.
+ * Required. Resource name of the model.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -1372,7 +1378,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {Object} request.model
- * The model which replaces the resource on the server.
+ * Required. The model which replaces the resource on the server.
*
* This object should have the same structure as [Model]{@link google.cloud.automl.v1.Model}
* @param {Object} request.updateMask
@@ -1437,12 +1443,12 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * Resource name of the project, from which to list the models.
+ * Required. Resource name of the project, from which to list the models.
* @param {string} [request.filter]
* An expression for filtering the results of the request.
*
* * `model_metadata` - for existence of the case (e.g.
- * image_classification_model_metadata:*).
+ * video_classification_model_metadata:*).
* * `dataset_id` - for = or !=. Some examples of using the filter are:
*
* * `image_classification_model_metadata:*` --> The model has
@@ -1559,12 +1565,12 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * Resource name of the project, from which to list the models.
+ * Required. Resource name of the project, from which to list the models.
* @param {string} [request.filter]
* An expression for filtering the results of the request.
*
* * `model_metadata` - for existence of the case (e.g.
- * image_classification_model_metadata:*).
+ * video_classification_model_metadata:*).
* * `dataset_id` - for = or !=. Some examples of using the filter are:
*
* * `image_classification_model_metadata:*` --> The model has
@@ -1618,7 +1624,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Resource name of the model being deleted.
+ * Required. Resource name of the model being deleted.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -1718,8 +1724,8 @@ class AutoMlClient {
* node_number)
* will reset the deployment state without pausing the model's availability.
*
- * Only applicable for Text Classification, Image Object Detection; all other
- * domains manage deployment automatically.
+ * Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage
+ * deployment automatically.
*
* Returns an empty response in the
* response field when it completes.
@@ -1727,7 +1733,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Resource name of the model to deploy.
+ * Required. Resource name of the model to deploy.
* @param {Object} [request.imageObjectDetectionModelDeploymentMetadata]
* Model deployment metadata specific to Image Object Detection.
*
@@ -1830,7 +1836,7 @@ class AutoMlClient {
/**
* Undeploys a model. If the model is not deployed this method has no effect.
*
- * Only applicable for Text Classification, Image Object Detection;
+ * Only applicable for Text Classification, Image Object Detection and Tables;
* all other domains manage deployment automatically.
*
* Returns an empty response in the
@@ -1839,7 +1845,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Resource name of the model to undeploy.
+ * Required. Resource name of the model to undeploy.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -2060,7 +2066,7 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Resource name for the model evaluation.
+ * Required. Resource name for the model evaluation.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -2114,11 +2120,11 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * Resource name of the model to list the model evaluations for.
+ * Required. Resource name of the model to list the model evaluations for.
* If modelId is set as "-", this will list model evaluations from across all
* models of the parent location.
* @param {string} request.filter
- * An expression for filtering the results of the request.
+ * Required. An expression for filtering the results of the request.
*
* * `annotation_spec_id` - for =, != or existence. See example below for
* the last.
@@ -2250,11 +2256,11 @@ class AutoMlClient {
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
- * Resource name of the model to list the model evaluations for.
+ * Required. Resource name of the model to list the model evaluations for.
* If modelId is set as "-", this will list model evaluations from across all
* models of the parent location.
* @param {string} request.filter
- * An expression for filtering the results of the request.
+ * Required. An expression for filtering the results of the request.
*
* * `annotation_spec_id` - for =, != or existence. See example below for
* the last.
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_annotation_spec.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_annotation_spec.js
index 40f0ca3a7e7..e919675dac3 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_annotation_spec.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_annotation_spec.js
@@ -25,10 +25,8 @@
* 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}'
*
* @property {string} displayName
- * Required.
- * The name of the annotation spec to show in the interface. The name can be
+ * Required. The name of the annotation spec to show in the interface. The name can be
* up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`.
- * (_), and ASCII digits 0-9.
*
* @property {number} exampleCount
* Output only. The number of examples in the parent dataset
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_classification.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_classification.js
index 8d5122c7e29..f580cb77b22 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_classification.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_classification.js
@@ -35,6 +35,8 @@ const ClassificationAnnotation = {
/**
* Model evaluation metrics for classification problems.
+ * Note: For Video Classification this metrics only describe quality of the
+ * Video Classification predictions of "segment_classification" type.
*
* @property {number} auPrc
* Output only. The Area Under Precision-Recall Curve metric. Micro-averaged
@@ -117,10 +119,7 @@ const ClassificationEvaluationMetrics = {
* for each example.
*
* @property {number} f1ScoreAt1
- * Output only. The harmonic mean of
- * recall_at1
- * and
- * precision_at1.
+ * Output only. The harmonic mean of recall_at1 and precision_at1.
*
* @property {number} truePositiveCount
* Output only. The number of model created labels that match a ground truth
@@ -151,10 +150,19 @@ const ClassificationEvaluationMetrics = {
*
* @property {string[]} annotationSpecId
* Output only. IDs of the annotation specs used in the confusion matrix.
+ * For Tables CLASSIFICATION
+ *
+ * prediction_type
+ * only list of annotation_spec_display_name-s is populated.
*
* @property {string[]} displayName
* Output only. Display name of the annotation specs used in the confusion
- * matrix, as they were at the moment of the evaluation.
+ * matrix, as they were at the moment of the evaluation. For Tables
+ * CLASSIFICATION
+ *
+ * prediction_type-s,
+ * distinct values of the target column at the moment of the model
+ * evaluation are populated here.
*
* @property {Object[]} row
* Output only. Rows in the confusion matrix. The number of rows is equal to
@@ -179,9 +187,7 @@ const ClassificationEvaluationMetrics = {
* Output only. Value of the specific cell in the confusion matrix.
* The number of values each row has (i.e. the length of the row) is equal
* to the length of the `annotation_spec_id` field or, if that one is not
- * populated, length of the
- * display_name
- * field.
+ * populated, length of the display_name field.
*
* @typedef Row
* @memberof google.cloud.automl.v1
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_data_items.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_data_items.js
index 316bce06a44..0da53a1fd33 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_data_items.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_data_items.js
@@ -43,11 +43,9 @@ const Image = {
* characters long.
*
* @property {string} mimeType
- * Optional. The format of
- * content. Currently the only
- * two allowed values are "text/html" and "text/plain". If left blank, the
- * format is automatically determined from the type of the uploaded
- * content.
+ * Optional. The format of content. Currently the only two allowed
+ * values are "text/html" and "text/plain". If left blank, the format is
+ * automatically determined from the type of the uploaded content.
*
* @property {string} contentUri
* Output only. HTTP URI where you can download the content.
@@ -146,9 +144,7 @@ const Document = {
// This is for documentation. Actual contents will be loaded by gRPC.
/**
- * Describes the layout information of a
- * text_segment in the
- * document.
+ * Describes the layout information of a text_segment in the document.
*
* @property {Object} textSegment
* Text Segment that represents a segment in
@@ -157,14 +153,12 @@ const Document = {
* This object should have the same structure as [TextSegment]{@link google.cloud.automl.v1.TextSegment}
*
* @property {number} pageNumber
- * Page number of the
- * text_segment in
- * the original document, starts from 1.
+ * Page number of the text_segment in the original document, starts
+ * from 1.
*
* @property {Object} boundingPoly
- * The position of the
- * text_segment in
- * the page. Contains exactly 4
+ * The position of the text_segment in the page.
+ * Contains exactly 4
*
* normalized_vertices
* and they are connected by edges in the order provided, which will
@@ -176,9 +170,7 @@ const Document = {
* This object should have the same structure as [BoundingPoly]{@link google.cloud.automl.v1.BoundingPoly}
*
* @property {number} textSegmentType
- * The type of the
- * text_segment in
- * document.
+ * The type of the text_segment in document.
*
* The number should be among the values of [TextSegmentType]{@link google.cloud.automl.v1.TextSegmentType}
*
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_detection.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_detection.js
index 9f4e0b7fef3..c3a34e56311 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_detection.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_detection.js
@@ -24,8 +24,8 @@
* This object should have the same structure as [BoundingPoly]{@link google.cloud.automl.v1.BoundingPoly}
*
* @property {number} score
- * Output only. The confidence that this annotation is positive for the parent
- * example, value in [0, 1], higher means higher positivity confidence.
+ * Output only. The confidence that this annotation is positive for the parent example,
+ * value in [0, 1], higher means higher positivity confidence.
*
* @typedef ImageObjectDetectionAnnotation
* @memberof google.cloud.automl.v1
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_image.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_image.js
index 5b7f0fa8593..0ef7fd89239 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_image.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_image.js
@@ -82,38 +82,34 @@ const ImageObjectDetectionDatasetMetadata = {
* This is the default value.
* * `mobile-low-latency-1` - A model that, in addition to providing
* prediction via AutoML API, can also be exported (see
- * AutoMl.ExportModel)
- * and used on a mobile or edge device with TensorFlow
- * afterwards. Expected to have low latency, but may have lower
- * prediction quality than other models.
+ * AutoMl.ExportModel) and used on a mobile or edge device
+ * with TensorFlow afterwards. Expected to have low latency, but
+ * may have lower prediction quality than other models.
* * `mobile-versatile-1` - A model that, in addition to providing
* prediction via AutoML API, can also be exported (see
- * AutoMl.ExportModel)
- * and used on a mobile or edge device with TensorFlow
- * afterwards.
+ * AutoMl.ExportModel) and used on a mobile or edge device
+ * with TensorFlow afterwards.
* * `mobile-high-accuracy-1` - A model that, in addition to providing
* prediction via AutoML API, can also be exported (see
- * AutoMl.ExportModel)
- * and used on a mobile or edge device with TensorFlow
- * afterwards. Expected to have a higher latency, but should
- * also have a higher prediction quality than other models.
+ * AutoMl.ExportModel) and used on a mobile or edge device
+ * with TensorFlow afterwards. Expected to have a higher
+ * latency, but should also have a higher prediction quality
+ * than other models.
* * `mobile-core-ml-low-latency-1` - A model that, in addition to providing
* prediction via AutoML API, can also be exported (see
- * AutoMl.ExportModel)
- * and used on a mobile device with Core ML afterwards. Expected
- * to have low latency, but may have lower prediction quality
- * than other models.
+ * AutoMl.ExportModel) and used on a mobile device with Core
+ * ML afterwards. Expected to have low latency, but may have
+ * lower prediction quality than other models.
* * `mobile-core-ml-versatile-1` - A model that, in addition to providing
* prediction via AutoML API, can also be exported (see
- * AutoMl.ExportModel)
- * and used on a mobile device with Core ML afterwards.
+ * AutoMl.ExportModel) and used on a mobile device with Core
+ * ML afterwards.
* * `mobile-core-ml-high-accuracy-1` - A model that, in addition to
* providing prediction via AutoML API, can also be exported
- * (see
- * AutoMl.ExportModel)
- * and used on a mobile device with Core ML afterwards. Expected
- * to have a higher latency, but should also have a higher
- * prediction quality than other models.
+ * (see AutoMl.ExportModel) and used on a mobile device with
+ * Core ML afterwards. Expected to have a higher latency, but
+ * should also have a higher prediction quality than other
+ * models.
*
* @property {number} nodeQps
* Output only. An approximate number of online prediction QPS that can
@@ -144,6 +140,21 @@ const ImageClassificationModelMetadata = {
* * `cloud-low-latency-1` - A model to be used via prediction
* calls to AutoML API. Expected to have low latency, but may
* have lower prediction quality than other models.
+ * * `mobile-low-latency-1` - A model that, in addition to providing
+ * prediction via AutoML API, can also be exported (see
+ * AutoMl.ExportModel) and used on a mobile or edge device
+ * with TensorFlow afterwards. Expected to have low latency, but
+ * may have lower prediction quality than other models.
+ * * `mobile-versatile-1` - A model that, in addition to providing
+ * prediction via AutoML API, can also be exported (see
+ * AutoMl.ExportModel) and used on a mobile or edge device
+ * with TensorFlow afterwards.
+ * * `mobile-high-accuracy-1` - A model that, in addition to providing
+ * prediction via AutoML API, can also be exported (see
+ * AutoMl.ExportModel) and used on a mobile or edge device
+ * with TensorFlow afterwards. Expected to have a higher
+ * latency, but should also have a higher prediction quality
+ * than other models.
*
* @property {number} nodeCount
* Output only. The number of nodes this model is deployed on. A node is an
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_io.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_io.js
index c4313d26f5d..8d03bb159b6 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_io.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_io.js
@@ -16,8 +16,7 @@
// to be loaded as the JS file.
/**
- * Input configuration for
- * AutoMl.ImportData action.
+ * Input configuration for AutoMl.ImportData action.
*
* The format of input depends on dataset_metadata the Dataset into which
* the import is happening has. As input source the
@@ -115,6 +114,107 @@
*
*
*
+ * AutoML Video Intelligence
+ *
+ *
+ * Classification
+ *
+ * See [Preparing your training
+ * data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
+ * more information.
+ *
+ * CSV file(s) with each line in format:
+ *
+ * ML_USE,GCS_FILE_PATH
+ *
+ * For `ML_USE`, do not use `VALIDATE`.
+ *
+ * `GCS_FILE_PATH` is the path to another .csv file that describes training
+ * example for a given `ML_USE`, using the following row format:
+ *
+ * GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
+ *
+ * Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
+ * to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+ *
+ * `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
+ * length of the video, and the end time must be after the start time. Any
+ * segment of a video which has one or more labels on it, is considered a
+ * hard negative for all other labels. Any segment with no labels on
+ * it is considered to be unknown. If a whole video is unknown, then
+ * it should be mentioned just once with ",," in place of `LABEL,
+ * TIME_SEGMENT_START,TIME_SEGMENT_END`.
+ *
+ * Sample top level CSV file:
+ *
+ * TRAIN,gs://folder/train_videos.csv
+ * TEST,gs://folder/test_videos.csv
+ * UNASSIGNED,gs://folder/other_videos.csv
+ *
+ * Sample rows of a CSV file for a particular ML_USE:
+ *
+ * gs://folder/video1.avi,car,120,180.000021
+ * gs://folder/video1.avi,bike,150,180.000021
+ * gs://folder/vid2.avi,car,0,60.5
+ * gs://folder/vid3.avi,,,
+ *
+ *
+ *
+ * Object Tracking
+ *
+ * See [Preparing your training
+ * data](https://cloud.google.com/video-intelligence/automl/object-tracking/docs/prepare) for more
+ * information.
+ *
+ * CSV file(s) with each line in format:
+ *
+ * ML_USE,GCS_FILE_PATH
+ *
+ * For `ML_USE`, do not use `VALIDATE`.
+ *
+ * `GCS_FILE_PATH` is the path to another .csv file that describes training
+ * example for a given `ML_USE`, using the following row format:
+ *
+ * GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
+ *
+ * or
+ *
+ * GCS_FILE_PATH,,,,,,,,,,
+ *
+ * Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
+ * to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+ * Providing `INSTANCE_ID`s can help to obtain a better model. When
+ * a specific labeled entity leaves the video frame, and shows up
+ * afterwards it is not required, albeit preferable, that the same
+ * `INSTANCE_ID` is given to it.
+ *
+ * `TIMESTAMP` must be within the length of the video, the
+ * `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
+ * to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
+ * to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
+ * frame are allowed. If a whole video is unknown, then it should be
+ * mentioned just once with ",,,,,,,,,," in place of `LABEL,
+ * [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
+ *
+ * Sample top level CSV file:
+ *
+ * TRAIN,gs://folder/train_videos.csv
+ * TEST,gs://folder/test_videos.csv
+ * UNASSIGNED,gs://folder/other_videos.csv
+ *
+ * Seven sample rows of a CSV file for a particular ML_USE:
+ *
+ * gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
+ * gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
+ * gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
+ * gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
+ * gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
+ * gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
+ * gs://folder/video2.avi,,,,,,,,,,,
+ *
+ *
+ *
+ *
* AutoML Natural Language
*
*
@@ -212,9 +312,11 @@
* **JSONL files that reference documents**
*
* .JSONL files contain, per line, a JSON document that wraps a
- * `input_config` that contains the path to a source PDF document.
+ * `input_config` that contains the path to a source document.
* Multiple JSON documents can be separated using line breaks (\n).
*
+ * Supported document extensions: .PDF, .TIF, .TIFF
+ *
* For example:
*
* {
@@ -228,19 +330,19 @@
* {
* "document": {
* "input_config": {
- * "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+ * "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
* }
* }
* }
* }
*
- * **In-line JSONL files with PDF layout information**
+ * **In-line JSONL files with document layout information**
*
- * **Note:** You can only annotate PDF files using the UI. The format described
- * below applies to annotated PDF files exported using the UI or `exportData`.
+ * **Note:** You can only annotate documents using the UI. The format described
+ * below applies to annotated documents exported using the UI or `exportData`.
*
- * In-line .JSONL files for PDF documents contain, per line, a JSON document
- * that wraps a `document` field that provides the textual content of the PDF
+ * In-line .JSONL files for documents contain, per line, a JSON document
+ * that wraps a `document` field that provides the textual content of the
* document and the layout information.
*
* For example:
@@ -331,8 +433,9 @@
* 10MB or less in size.
*
* For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
+ *
* The `ML_USE` and `LABEL` columns are optional.
- * Supported file extensions: .TXT, .PDF, .ZIP
+ * Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
*
* A maximum of 100 unique labels are allowed per CSV row.
*
@@ -377,7 +480,7 @@
* 128kB or less in size.
*
* The `ML_USE` and `SENTIMENT` columns are optional.
- * Supported file extensions: .TXT, .PDF, .ZIP
+ * Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
*
* * `SENTIMENT` - An integer between 0 and
* Dataset.text_sentiment_dataset_metadata.sentiment_max
@@ -406,6 +509,54 @@
*
*
*
+ *
+ * AutoML Tables
+ *
+ * See [Preparing your training
+ * data](https://cloud.google.com/automl-tables/docs/prepare) for more
+ * information.
+ *
+ * You can use either
+ * gcs_source or
+ * bigquery_source.
+ * All input is concatenated into a
+ * single
+ *
+ * primary_table_spec_id
+ *
+ * **For gcs_source:**
+ *
+ * CSV file(s), where the first row of the first file is the header,
+ * containing unique column names. If the first row of a subsequent
+ * file is the same as the header, then it is also treated as a
+ * header. All other rows contain values for the corresponding
+ * columns.
+ *
+ * Each .CSV file by itself must be 10GB or smaller, and their total
+ * size must be 100GB or smaller.
+ *
+ * First three sample rows of a CSV file:
+ *
+ * "Id","First Name","Last Name","Dob","Addresses"
+ *
+ * "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+ *
+ * "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+ *
+ * **For bigquery_source:**
+ *
+ * An URI of a BigQuery table. The user data size of the BigQuery
+ * table must be 100GB or smaller.
+ *
+ * An imported table must have between 2 and 1,000 columns, inclusive,
+ * and between 1000 and 100,000,000 rows, inclusive. There are at most 5
+ * import data running in parallel.
+ *
+ *
+ *
+ *
+ *
* **Input field definitions:**
*
* `ML_USE`
@@ -424,6 +575,11 @@
* For each label an AnnotationSpec is created which display_name
* becomes the label; AnnotationSpecs are given back in predictions.
*
+ * `INSTANCE_ID`
+ * : A positive integer that identifies a specific instance of a
+ * labeled entity on an example. Used e.g. to track two cars on
+ * a video while being able to tell apart which one is which.
+ *
* `BOUNDING_BOX`
* : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
* A rectangle parallel to the frame of the example (image,
@@ -441,6 +597,23 @@
* leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
* Point 0,0 is in top left.
*
+ * `TIME_SEGMENT_START`
+ * : (`TIME_OFFSET`)
+ * Expresses a beginning, inclusive, of a time segment
+ * within an example that has a time dimension
+ * (e.g. video).
+ *
+ * `TIME_SEGMENT_END`
+ * : (`TIME_OFFSET`)
+ * Expresses an end, exclusive, of a time segment within
+ * n example that has a time dimension (e.g. video).
+ *
+ * `TIME_OFFSET`
+ * : A number of seconds as measured from the start of an
+ * example (e.g. video). Fractions are allowed, up to a
+ * microsecond precision. "inf" is allowed, and it means the end
+ * of the example.
+ *
* `TEXT_SNIPPET`
* : The content of a text snippet, UTF-8 encoded, enclosed within
* double quotes ("").
@@ -460,9 +633,8 @@
*
* @property {Object} gcsSource
* The Google Cloud Storage location for the input content.
- * For AutoMl.ImportData,
- * `gcs_source` points to a CSV file with a structure described in
- * InputConfig.
+ * For AutoMl.ImportData, `gcs_source` points to a CSV file with
+ * a structure described in InputConfig.
*
* This object should have the same structure as [GcsSource]{@link google.cloud.automl.v1.GcsSource}
*
@@ -471,6 +643,14 @@
* imported data, any string must be up to 25000
* characters long.
*
+ * AutoML Tables
+ *
+ * `schema_inference_version`
+ * : (integer) This value must be supplied.
+ * The version of the
+ * algorithm to use for the initial inference of the
+ * column data types of the imported table. Allowed values: "1".
+ *
* @typedef InputConfig
* @memberof google.cloud.automl.v1
* @see [google.cloud.automl.v1.InputConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/automl/v1/io.proto}
@@ -491,6 +671,82 @@ const InputConfig = {
* non-terminal symbols defined near the end of this comment. The formats
* are:
*
+ * AutoML Vision
+ * Classification
+ *
+ * One or more CSV files where each line is a single column:
+ *
+ * GCS_FILE_PATH
+ *
+ * The Google Cloud Storage location of an image of up to
+ * 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
+ * This path is treated as the ID in the batch predict output.
+ *
+ * Sample rows:
+ *
+ * gs://folder/image1.jpeg
+ * gs://folder/image2.gif
+ * gs://folder/image3.png
+ *
+ * Object Detection
+ *
+ * One or more CSV files where each line is a single column:
+ *
+ * GCS_FILE_PATH
+ *
+ * The Google Cloud Storage location of an image of up to
+ * 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
+ * This path is treated as the ID in the batch predict output.
+ *
+ * Sample rows:
+ *
+ * gs://folder/image1.jpeg
+ * gs://folder/image2.gif
+ * gs://folder/image3.png
+ *
+ *
+ *
+ * AutoML Video Intelligence
+ * Classification
+ *
+ * One or more CSV files where each line is a single column:
+ *
+ * GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
+ *
+ * `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
+ * size and up to 3h in duration duration.
+ * Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+ *
+ * `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
+ * length of the video, and the end time must be after the start time.
+ *
+ * Sample rows:
+ *
+ * gs://folder/video1.mp4,10,40
+ * gs://folder/video1.mp4,20,60
+ * gs://folder/vid2.mov,0,inf
+ *
+ * Object Tracking
+ *
+ * One or more CSV files where each line is a single column:
+ *
+ * GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
+ *
+ * `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
+ * size and up to 3h in duration duration.
+ * Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
+ *
+ * `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
+ * length of the video, and the end time must be after the start time.
+ *
+ * Sample rows:
+ *
+ * gs://folder/video1.mp4,10,40
+ * gs://folder/video1.mp4,20,60
+ * gs://folder/vid2.mov,0,inf
+ *
+ *
+ *
* AutoML Natural Language
* Classification
*
@@ -499,13 +755,15 @@ const InputConfig = {
* GCS_FILE_PATH
*
* `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
- * Supported file extensions: .TXT, .PDF
+ * Supported file extensions: .TXT, .PDF, .TIF, .TIFF
+ *
* Text files can be no larger than 10MB in size.
*
* Sample rows:
*
* gs://folder/text1.txt
* gs://folder/text2.pdf
+ * gs://folder/text3.tif
*
* Sentiment Analysis
* One or more CSV files where each line is a single column:
@@ -513,13 +771,15 @@ const InputConfig = {
* GCS_FILE_PATH
*
* `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
- * Supported file extensions: .TXT, .PDF
+ * Supported file extensions: .TXT, .PDF, .TIF, .TIFF
+ *
* Text files can be no larger than 128kB in size.
*
* Sample rows:
*
* gs://folder/text1.txt
* gs://folder/text2.pdf
+ * gs://folder/text3.tif
*
* Entity Extraction
*
@@ -535,9 +795,10 @@ const InputConfig = {
* be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
* unique.
*
- * Each document JSONL file contains, per line, a proto that wraps a
- * Document proto with `input_config` set. Only PDF documents are
- * currently supported, and each PDF document cannot exceed 2MB in size.
+ * Each document JSONL file contains, per line, a proto that wraps a Document
+ * proto with `input_config` set. Each document cannot exceed 2MB in size.
+ *
+ * Supported document extensions: .PDF, .TIF, .TIFF
*
* Each JSONL file must not exceed 100MB in size, and no more than 20
* JSONL files may be passed.
@@ -585,7 +846,7 @@ const InputConfig = {
* {
* "document": {
* "input_config": {
- * "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
+ * "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
* }
* }
* }
@@ -593,12 +854,83 @@ const InputConfig = {
*
*
*
+ * AutoML Tables
+ *
+ * See [Preparing your training
+ * data](https://cloud.google.com/automl-tables/docs/predict-batch) for more
+ * information.
+ *
+ * You can use either
+ * gcs_source
+ * or
+ * bigquery_source.
+ *
+ * **For gcs_source:**
+ *
+ * CSV file(s), each by itself 10GB or smaller and total size must be
+ * 100GB or smaller, where first file must have a header containing
+ * column names. If the first row of a subsequent file is the same as
+ * the header, then it is also treated as a header. All other rows
+ * contain values for the corresponding columns.
+ *
+ * The column names must contain the model's
+ *
+ * input_feature_column_specs'
+ * display_name-s
+ * (order doesn't matter). The columns corresponding to the model's
+ * input feature column specs must contain values compatible with the
+ * column spec's data types. Prediction on all the rows, i.e. the CSV
+ * lines, will be attempted.
+ *
+ *
+ * Sample rows from a CSV file:
+ *
+ * "First Name","Last Name","Dob","Addresses"
+ *
+ * "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+ *
+ * "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+ *
+ * **For bigquery_source:**
+ *
+ * The URI of a BigQuery table. The user data size of the BigQuery
+ * table must be 100GB or smaller.
+ *
+ * The column names must contain the model's
+ *
+ * input_feature_column_specs'
+ * display_name-s
+ * (order doesn't matter). The columns corresponding to the model's
+ * input feature column specs must contain values compatible with the
+ * column spec's data types. Prediction on all the rows of the table
+ * will be attempted.
+ *
+ *
+ *
* **Input field definitions:**
*
* `GCS_FILE_PATH`
* : The path to a file on Google Cloud Storage. For example,
* "gs://folder/video.avi".
*
+ * `TIME_SEGMENT_START`
+ * : (`TIME_OFFSET`)
+ * Expresses a beginning, inclusive, of a time segment
+ * within an example that has a time dimension
+ * (e.g. video).
+ *
+ * `TIME_SEGMENT_END`
+ * : (`TIME_OFFSET`)
+ * Expresses an end, exclusive, of a time segment within
+ * n example that has a time dimension (e.g. video).
+ *
+ * `TIME_OFFSET`
+ * : A number of seconds as measured from the start of an
+ * example (e.g. video). Fractions are allowed, up to a
+ * microsecond precision. "inf" is allowed, and it means the end
+ * of the example.
+ *
* **Errors:**
*
* If any of the provided CSV files can't be parsed or if more than certain
@@ -642,70 +974,7 @@ const DocumentInputConfig = {
};
/**
- * Output configuration for ExportData.
- *
- * As destination the
- * gcs_destination
- * must be set unless specified otherwise for a domain. If gcs_destination is
- * set then in the given directory a new directory is created. Its name
- * will be "export_data--",
- * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
- * Only ground truth annotations are exported (not approved annotations are
- * not exported).
- *
- * The outputs correspond to how the data was imported, and may be used as
- * input to import data. The output formats are represented as EBNF with literal
- * commas and same non-terminal symbols definitions are these in import data's
- * InputConfig:
- *
- * * For Image Classification:
- * CSV file(s) `image_classification_1.csv`,
- * `image_classification_2.csv`,...,`image_classification_N.csv`with
- * each line in format:
- * ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
- * where GCS_FILE_PATHs point at the original, source locations of the
- * imported images.
- * For MULTICLASS classification type, there can be at most one LABEL
- * per example.
- *
- * * For Image Object Detection:
- * CSV file(s) `image_object_detection_1.csv`,
- * `image_object_detection_2.csv`,...,`image_object_detection_N.csv`
- * with each line in format:
- * ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
- * where GCS_FILE_PATHs point at the original, source locations of the
- * imported images.
- *
- * * For Text Classification:
- * In the created directory CSV file(s) `text_classification_1.csv`,
- * `text_classification_2.csv`, ...,`text_classification_N.csv` will be
- * created where N depends on the total number of examples exported.
- * Each line in the CSV is of the format:
- * ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
- * where GCS_FILE_PATHs point at the exported .txt files containing
- * the text content of the imported example. For MULTICLASS
- * classification type, there will be at most one LABEL per example.
- *
- * * For Text Sentiment:
- * In the created directory CSV file(s) `text_sentiment_1.csv`,
- * `text_sentiment_2.csv`, ...,`text_sentiment_N.csv` will be
- * created where N depends on the total number of examples exported.
- * Each line in the CSV is of the format:
- * ML_USE,GCS_FILE_PATH,SENTIMENT
- * where GCS_FILE_PATHs point at the exported .txt files containing
- * the text content of the imported example.
- *
- * * For Text Extraction:
- * CSV file `text_extraction.csv`, with each line in format:
- * ML_USE,GCS_FILE_PATH
- * GCS_FILE_PATH leads to a .JSONL (i.e. JSON Lines) file which
- * contains, per line, a proto that wraps a TextSnippet proto (in json
- * representation) followed by AnnotationPayload protos (called
- * annotations). If initially documents had been imported, the JSONL
- * will point at the original, source locations of the imported
- * documents.
- *
- * * For Translation:
+ * * For Translation:
* CSV file `translation.csv`, with each line in format:
* ML_USE,GCS_FILE_PATH
* GCS_FILE_PATH leads to a .TSV file which describes examples that have
@@ -713,10 +982,34 @@ const DocumentInputConfig = {
* TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
* language)
*
+ * * For Tables:
+ * Output depends on whether the dataset was imported from Google Cloud
+ * Storage or BigQuery.
+ * Google Cloud Storage case:
+ *
+ * gcs_destination
+ * must be set. Exported are CSV file(s) `tables_1.csv`,
+ * `tables_2.csv`,...,`tables_N.csv` with each having as header line
+ * the table's column names, and all other lines contain values for
+ * the header columns.
+ * BigQuery case:
+ *
+ * bigquery_destination
+ * pointing to a BigQuery project must be set. In the given project a
+ * new dataset will be created with name
+ *
+ * `export_data__`
+ * where will be made
+ * BigQuery-dataset-name compatible (e.g. most special characters will
+ * become underscores), and timestamp will be in
+ * YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
+ * dataset a new table called `primary_table` will be created, and
+ * filled with precisely the same data as this obtained on import.
+ *
* @property {Object} gcsDestination
- * Required. The Google Cloud Storage location where the output is to be
- * written to. For Image Object Detection, Text Extraction in the given
- * directory a new directory will be created with name:
+ * Required. The Google Cloud Storage location where the output is to be written to.
+ * For Image Object Detection, Text Extraction, Video Classification and
+ * Tables, in the given directory a new directory will be created with name:
* export_data-- where
* timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
* output will be written into that directory.
@@ -744,6 +1037,101 @@ const OutputConfig = {
* where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
* of it depends on the ML problem the predictions are made for.
*
+ * * For Image Classification:
+ * In the created directory files `image_classification_1.jsonl`,
+ * `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
+ * will be created, where N may be 1, and depends on the
+ * total number of the successfully predicted images and annotations.
+ * A single image will be listed only once with all its annotations,
+ * and its annotations will never be split across files.
+ * Each .JSONL file will contain, per line, a JSON representation of a
+ * proto that wraps image's "ID" : "" followed by a list of
+ * zero or more AnnotationPayload protos (called annotations), which
+ * have classification detail populated.
+ * If prediction for any image failed (partially or completely), then an
+ * additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
+ * files will be created (N depends on total number of failed
+ * predictions). These files will have a JSON representation of a proto
+ * that wraps the same "ID" : "" but here followed by
+ * exactly one
+ *
+ * [`google.rpc.Status`](https:
+ * //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+ * containing only `code` and `message`fields.
+ *
+ * * For Image Object Detection:
+ * In the created directory files `image_object_detection_1.jsonl`,
+ * `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
+ * will be created, where N may be 1, and depends on the
+ * total number of the successfully predicted images and annotations.
+ * Each .JSONL file will contain, per line, a JSON representation of a
+ * proto that wraps image's "ID" : "" followed by a list of
+ * zero or more AnnotationPayload protos (called annotations), which
+ * have image_object_detection detail populated. A single image will
+ * be listed only once with all its annotations, and its annotations
+ * will never be split across files.
+ * If prediction for any image failed (partially or completely), then
+ * additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
+ * files will be created (N depends on total number of failed
+ * predictions). These files will have a JSON representation of a proto
+ * that wraps the same "ID" : "" but here followed by
+ * exactly one
+ *
+ * [`google.rpc.Status`](https:
+ * //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+ * containing only `code` and `message`fields.
+ * * For Video Classification:
+ * In the created directory a video_classification.csv file, and a .JSON
+ * file per each video classification requested in the input (i.e. each
+ * line in given CSV(s)), will be created.
+ *
+ * The format of video_classification.csv is:
+ *
+ * GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
+ * where:
+ * GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
+ * the prediction input lines (i.e. video_classification.csv has
+ * precisely the same number of lines as the prediction input had.)
+ * JSON_FILE_NAME = Name of .JSON file in the output directory, which
+ * contains prediction responses for the video time segment.
+ * STATUS = "OK" if prediction completed successfully, or an error code
+ * with message otherwise. If STATUS is not "OK" then the .JSON file
+ * for that line may not exist or be empty.
+ *
+ * Each .JSON file, assuming STATUS is "OK", will contain a list of
+ * AnnotationPayload protos in JSON format, which are the predictions
+ * for the video time segment the file is assigned to in the
+ * video_classification.csv. All AnnotationPayload protos will have
+ * video_classification field set, and will be sorted by
+ * video_classification.type field (note that the returned types are
+ * governed by `classifaction_types` parameter in
+ * PredictService.BatchPredictRequest.params).
+ *
+ * * For Video Object Tracking:
+ * In the created directory a video_object_tracking.csv file will be
+ * created, and multiple files video_object_trackinng_1.json,
+ * video_object_trackinng_2.json,..., video_object_trackinng_N.json,
+ * where N is the number of requests in the input (i.e. the number of
+ * lines in given CSV(s)).
+ *
+ * The format of video_object_tracking.csv is:
+ *
+ * GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
+ * where:
+ * GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
+ * the prediction input lines (i.e. video_object_tracking.csv has
+ * precisely the same number of lines as the prediction input had.)
+ * JSON_FILE_NAME = Name of .JSON file in the output directory, which
+ * contains prediction responses for the video time segment.
+ * STATUS = "OK" if prediction completed successfully, or an error
+ * code with message otherwise. If STATUS is not "OK" then the .JSON
+ * file for that line may not exist or be empty.
+ *
+ * Each .JSON file, assuming STATUS is "OK", will contain a list of
+ * AnnotationPayload protos in JSON format, which are the predictions
+ * for each frame of the video time segment the file is assigned to in
+ * video_object_tracking.csv. All AnnotationPayload protos will have
+ * video_object_tracking field set.
* * For Text Classification:
* In the created directory files `text_classification_1.jsonl`,
* `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
@@ -751,18 +1139,18 @@ const OutputConfig = {
* total number of inputs and annotations found.
*
* Each .JSONL file will contain, per line, a JSON representation of a
- * proto that wraps input text (or pdf) file in
+ * proto that wraps input text file (or document) in
* the text snippet (or document) proto and a list of
* zero or more AnnotationPayload protos (called annotations), which
- * have classification detail populated. A single text (or pdf) file
- * will be listed only once with all its annotations, and its
+ * have classification detail populated. A single text file (or
+ * document) will be listed only once with all its annotations, and its
* annotations will never be split across files.
*
- * If prediction for any text (or pdf) file failed (partially or
+ * If prediction for any input file (or document) failed (partially or
* completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
* `errors_N.jsonl` files will be created (N depends on total number of
* failed predictions). These files will have a JSON representation of a
- * proto that wraps input text (or pdf) file followed by exactly one
+ * proto that wraps input file followed by exactly one
*
* [`google.rpc.Status`](https:
* //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
@@ -775,18 +1163,18 @@ const OutputConfig = {
* total number of inputs and annotations found.
*
* Each .JSONL file will contain, per line, a JSON representation of a
- * proto that wraps input text (or pdf) file in
+ * proto that wraps input text file (or document) in
* the text snippet (or document) proto and a list of
* zero or more AnnotationPayload protos (called annotations), which
- * have text_sentiment detail populated. A single text (or pdf) file
- * will be listed only once with all its annotations, and its
+ * have text_sentiment detail populated. A single text file (or
+ * document) will be listed only once with all its annotations, and its
* annotations will never be split across files.
*
- * If prediction for any text (or pdf) file failed (partially or
+ * If prediction for any input file (or document) failed (partially or
* completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
* `errors_N.jsonl` files will be created (N depends on total number of
* failed predictions). These files will have a JSON representation of a
- * proto that wraps input text (or pdf) file followed by exactly one
+ * proto that wraps input file followed by exactly one
*
* [`google.rpc.Status`](https:
* //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
@@ -822,13 +1210,107 @@ const OutputConfig = {
* failed predictions). These files will have a JSON representation of a
* proto that wraps either the "id" : "" (in case of inline)
* or the document proto (in case of document) but here followed by
- * exactly one [`google.rpc.Status`](https:
+ * exactly one
+ *
+ * [`google.rpc.Status`](https:
* //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
* containing only `code` and `message`.
*
+ * * For Tables:
+ * Output depends on whether
+ *
+ * gcs_destination
+ * or
+ *
+ * bigquery_destination
+ * is set (either is allowed).
+ * Google Cloud Storage case:
+ * In the created directory files `tables_1.csv`, `tables_2.csv`,...,
+ * `tables_N.csv` will be created, where N may be 1, and depends on
+ * the total number of the successfully predicted rows.
+ * For all CLASSIFICATION
+ *
+ * prediction_type-s:
+ * Each .csv file will contain a header, listing all columns'
+ *
+ * display_name-s
+ * given on input followed by M target column names in the format of
+ *
+ * "__score" where M is the number of distinct target values,
+ * i.e. number of distinct values in the target column of the table
+ * used to train the model. Subsequent lines will contain the
+ * respective values of successfully predicted rows, with the last,
+ * i.e. the target, columns having the corresponding prediction
+ * scores.
+ * For REGRESSION and FORECASTING
+ *
+ * prediction_type-s:
+ * Each .csv file will contain a header, listing all columns'
+ * display_name-s
+ * given on input followed by the predicted target column with name
+ * in the format of
+ *
+ * "predicted_"
+ * Subsequent lines will contain the respective values of
+ * successfully predicted rows, with the last, i.e. the target,
+ * column having the predicted target value.
+ * If prediction for any rows failed, then an additional
+ * `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
+ * created (N depends on total number of failed rows). These files
+ * will have analogous format as `tables_*.csv`, but always with a
+ * single target column having
+ *
+ * [`google.rpc.Status`](https:
+ * //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+ * represented as a JSON string, and containing only `code` and
+ * `message`.
+ * BigQuery case:
+ *
+ * bigquery_destination
+ * pointing to a BigQuery project must be set. In the given project a
+ * new dataset will be created with name
+ * `prediction__`
+ * where will be made
+ * BigQuery-dataset-name compatible (e.g. most special characters will
+ * become underscores), and timestamp will be in
+ * YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
+ * two tables will be created, `predictions`, and `errors`.
+ * The `predictions` table's column names will be the input columns'
+ *
+ * display_name-s
+ * followed by the target column with name in the format of
+ *
+ * "predicted_"
+ * The input feature columns will contain the respective values of
+ * successfully predicted rows, with the target column having an
+ * ARRAY of
+ *
+ * AnnotationPayloads,
+ * represented as STRUCT-s, containing
+ * TablesAnnotation.
+ * The `errors` table contains rows for which the prediction has
+ * failed, it has analogous input columns while the target column name
+ * is in the format of
+ *
+ * "errors_",
+ * and as a value has
+ *
+ * [`google.rpc.Status`](https:
+ * //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+ * represented as a STRUCT, and containing only `code` and `message`.
+ *
* @property {Object} gcsDestination
- * Required. The Google Cloud Storage location of the directory where the
- * output is to be written to.
+ * Required. The Google Cloud Storage location of the directory where the output is to
+ * be written to.
*
* This object should have the same structure as [GcsDestination]{@link google.cloud.automl.v1.GcsDestination}
*
@@ -844,9 +1326,8 @@ const BatchPredictOutputConfig = {
* Output configuration for ModelExport Action.
*
* @property {Object} gcsDestination
- * Required. The Google Cloud Storage location where the model is to be
- * written to. This location may only be set for the following model
- * formats:
+ * Required. The Google Cloud Storage location where the model is to be written to.
+ * This location may only be set for the following model formats:
* "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
*
* Under the directory given as the destination a new one with name
@@ -865,7 +1346,8 @@ const BatchPredictOutputConfig = {
*
* * For Image Classification mobile-low-latency-1, mobile-versatile-1,
* mobile-high-accuracy-1:
- * "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js".
+ * "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
+ * "docker".
*
* * For Image Classification mobile-core-ml-low-latency-1,
* mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
@@ -881,7 +1363,14 @@ const BatchPredictOutputConfig = {
* devices.
* * tf_saved_model - A tensorflow model in SavedModel format.
* * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
- * be used in the browser and in Node.js using JavaScript.x`
+ * be used in the browser and in Node.js using JavaScript.
+ * * docker - Used for Docker containers. Use the params field to customize
+ * the container. The container is verified to work correctly on
+ * ubuntu 16.04 operating system. See more at
+ * [containers
+ *
+ * quickstart](https:
+ * //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
* * core_ml - Used for iOS mobile devices.
*
* @property {Object.} params
@@ -889,6 +1378,10 @@ const BatchPredictOutputConfig = {
* requirements for the to be exported model files, any string must be up to
* 25000 characters long.
*
+ * * For `docker` format:
+ * `cpu_architecture` - (string) "x86_64" (default).
+ * `gpu_architecture` - (string) "none" (default), "nvidia".
+ *
* @typedef ModelExportOutputConfig
* @memberof google.cloud.automl.v1
* @see [google.cloud.automl.v1.ModelExportOutputConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/automl/v1/io.proto}
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_model_evaluation.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_model_evaluation.js
index 4a163693b7f..c53dc4fa34d 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_model_evaluation.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_model_evaluation.js
@@ -19,7 +19,10 @@
* Evaluation results of a model.
*
* @property {Object} classificationEvaluationMetrics
- * Model evaluation metrics for image, text classification.
+ * Model evaluation metrics for image, text, video and tables
+ * classification.
+ * Tables problem is considered a classification when the target column
+ * is CATEGORY DataType.
*
* This object should have the same structure as [ClassificationEvaluationMetrics]{@link google.cloud.automl.v1.ClassificationEvaluationMetrics}
*
@@ -50,8 +53,15 @@
* `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`
*
* @property {string} annotationSpecId
- * Output only. The ID of the annotation spec that the model evaluation
- * applies to. The The ID is empty for the overall model evaluation.
+ * Output only. The ID of the annotation spec that the model evaluation applies to. The
+ * The ID is empty for the overall model evaluation.
+ * For Tables annotation specs in the dataset do not exist and this ID is
+ * always not set, but for CLASSIFICATION
+ *
+ * prediction_type-s
+ * the
+ * display_name
+ * field is used.
*
* @property {string} displayName
* Output only. The value of
@@ -59,7 +69,12 @@
* at the moment when the model was trained. Because this field returns a
* value at model training time, for different models trained from the same
* dataset, the values may differ, since display names could had been changed
- * between the two model's trainings.
+ * between the two model's trainings. For Tables CLASSIFICATION
+ *
+ * prediction_type-s
+ * distinct values of the target column at the moment of the model evaluation
+ * are populated here.
+ * The display_name is empty for the overall model evaluation.
*
* @property {Object} createTime
* Output only. Timestamp when this model evaluation was created.
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_prediction_service.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_prediction_service.js
index 84dcb285668..bdd84cd2c08 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_prediction_service.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_prediction_service.js
@@ -16,11 +16,10 @@
// to be loaded as the JS file.
/**
- * Request message for
- * PredictionService.Predict.
+ * Request message for PredictionService.Predict.
*
* @property {string} name
- * Name of the model requested to serve the prediction.
+ * Required. Name of the model requested to serve the prediction.
*
* @property {Object} payload
* Required. Payload to perform a prediction on. The payload must match the
@@ -32,19 +31,34 @@
* Additional domain-specific parameters, any string must be up to 25000
* characters long.
*
- * * For Image Classification:
+ * AutoML Vision Classification
*
- * `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- * makes predictions for an image, it will only produce results that have
- * at least this confidence score. The default is 0.5.
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for an image, it will only produce results that have
+ * at least this confidence score. The default is 0.5.
*
- * * For Image Object Detection:
- * `score_threshold` - (float) When Model detects objects on the image,
- * it will only produce bounding boxes which have at least this
- * confidence score. Value in 0 to 1 range, default is 0.5.
- * `max_bounding_box_count` - (int64) No more than this number of bounding
- * boxes will be returned in the response. Default is 100, the
- * requested value may be limited by server.
+ * AutoML Vision Object Detection
+ *
+ * `score_threshold`
+ * : (float) When Model detects objects on the image,
+ * it will only produce bounding boxes which have at least this
+ * confidence score. Value in 0 to 1 range, default is 0.5.
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding
+ * boxes returned. The default is 100. The
+ * number of returned bounding boxes might be limited by the server.
+ *
+ * AutoML Tables
+ *
+ * `feature_importance`
+ * : (boolean) Whether
+ *
+ * feature_importance
+ * is populated in the returned list of
+ * TablesAnnotation
+ * objects. The default is false.
*
* @typedef PredictRequest
* @memberof google.cloud.automl.v1
@@ -55,48 +69,46 @@ const PredictRequest = {
};
/**
- * Response message for
- * PredictionService.Predict.
+ * Response message for PredictionService.Predict.
*
* @property {Object[]} payload
* Prediction result.
- * Translation and Text Sentiment will return precisely one payload.
+ * AutoML Translation and AutoML Natural Language Sentiment Analysis
+ * return precisely one payload.
*
* This object should have the same structure as [AnnotationPayload]{@link google.cloud.automl.v1.AnnotationPayload}
*
* @property {Object} preprocessedInput
* The preprocessed example that AutoML actually makes prediction on.
* Empty if AutoML does not preprocess the input example.
- * * For Text Extraction:
- * If the input is a .pdf file, the OCR'ed text will be provided in
- * document_text.
- *
- * * For Text Classification:
- * If the input is a .pdf file, the OCR'ed trucated text will be provided in
- * document_text.
*
- * * For Text Sentiment:
- * If the input is a .pdf file, the OCR'ed trucated text will be provided in
- * document_text.
+ * For AutoML Natural Language (Classification, Entity Extraction, and
+ * Sentiment Analysis), if the input is a document, the recognized text is
+ * returned in the
+ * document_text
+ * property.
*
* This object should have the same structure as [ExamplePayload]{@link google.cloud.automl.v1.ExamplePayload}
*
* @property {Object.} metadata
* Additional domain-specific prediction response metadata.
*
- * * For Image Object Detection:
- * `max_bounding_box_count` - (int64) At most that many bounding boxes per
- * image could have been returned.
+ * AutoML Vision Object Detection
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding boxes to return per image.
+ *
+ * AutoML Natural Language Sentiment Analysis
*
- * * For Text Sentiment:
- * `sentiment_score` - (float, deprecated) A value between -1 and 1,
- * -1 maps to least positive sentiment, while 1 maps to the most positive
- * one and the higher the score, the more positive the sentiment in the
- * document is. Yet these values are relative to the training data, so
- * e.g. if all data was positive then -1 will be also positive (though
- * the least).
- * The sentiment_score shouldn't be confused with "score" or "magnitude"
- * from the previous Natural Language Sentiment Analysis API.
+ * `sentiment_score`
+ * : (float, deprecated) A value between -1 and 1,
+ * -1 maps to least positive sentiment, while 1 maps to the most positive
+ * one and the higher the score, the more positive the sentiment in the
+ * document is. Yet these values are relative to the training data, so
+ * e.g. if all data was positive then -1 is also positive (though
+ * the least).
+ * `sentiment_score` is not the same as "score" and "magnitude"
+ * from Sentiment Analysis in the Natural Language API.
*
* @typedef PredictResponse
* @memberof google.cloud.automl.v1
@@ -107,11 +119,10 @@ const PredictResponse = {
};
/**
- * Request message for
- * PredictionService.BatchPredict.
+ * Request message for PredictionService.BatchPredict.
*
* @property {string} name
- * Name of the model requested to serve the batch prediction.
+ * Required. Name of the model requested to serve the batch prediction.
*
* @property {Object} inputConfig
* Required. The input configuration for batch prediction.
@@ -128,26 +139,86 @@ const PredictResponse = {
* Additional domain-specific parameters for the predictions, any string must
* be up to 25000 characters long.
*
- * * For Text Classification:
- *
- * `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- * makes predictions for a text snippet, it will only produce results
- * that have at least this confidence score. The default is 0.5.
- *
- * * For Image Classification:
- *
- * `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- * makes predictions for an image, it will only produce results that
- * have at least this confidence score. The default is 0.5.
- *
- * * For Image Object Detection:
- *
- * `score_threshold` - (float) When Model detects objects on the image,
- * it will only produce bounding boxes which have at least this
- * confidence score. Value in 0 to 1 range, default is 0.5.
- * `max_bounding_box_count` - (int64) No more than this number of bounding
- * boxes will be produced per image. Default is 100, the
- * requested value may be limited by server.
+ * AutoML Natural Language Classification
+ *
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for a text snippet, it will only produce results
+ * that have at least this confidence score. The default is 0.5.
+ *
+ *
+ * AutoML Vision Classification
+ *
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for an image, it will only produce results that
+ * have at least this confidence score. The default is 0.5.
+ *
+ * AutoML Vision Object Detection
+ *
+ * `score_threshold`
+ * : (float) When Model detects objects on the image,
+ * it will only produce bounding boxes which have at least this
+ * confidence score. Value in 0 to 1 range, default is 0.5.
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding
+ * boxes returned per image. The default is 100, the
+ * number of bounding boxes returned might be limited by the server.
+ * AutoML Video Intelligence Classification
+ *
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for a video, it will only produce results that
+ * have at least this confidence score. The default is 0.5.
+ *
+ * `segment_classification`
+ * : (boolean) Set to true to request
+ * segment-level classification. AutoML Video Intelligence returns
+ * labels and their confidence scores for the entire segment of the
+ * video that user specified in the request configuration.
+ * The default is true.
+ *
+ * `shot_classification`
+ * : (boolean) Set to true to request shot-level
+ * classification. AutoML Video Intelligence determines the boundaries
+ * for each camera shot in the entire segment of the video that user
+ * specified in the request configuration. AutoML Video Intelligence
+ * then returns labels and their confidence scores for each detected
+ * shot, along with the start and end time of the shot.
+ * The default is false.
+ *
+ * WARNING: Model evaluation is not done for this classification type,
+ * the quality of it depends on training data, but there are no metrics
+ * provided to describe that quality.
+ *
+ * `1s_interval_classification`
+ * : (boolean) Set to true to request
+ * classification for a video at one-second intervals. AutoML Video
+ * Intelligence returns labels and their confidence scores for each
+ * second of the entire segment of the video that user specified in the
+ * request configuration. The default is false.
+ *
+ * WARNING: Model evaluation is not done for this classification
+ * type, the quality of it depends on training data, but there are no
+ * metrics provided to describe that quality.
+ *
+ * AutoML Video Intelligence Object Tracking
+ *
+ * `score_threshold`
+ * : (float) When Model detects objects on video frames,
+ * it will only produce bounding boxes which have at least this
+ * confidence score. Value in 0 to 1 range, default is 0.5.
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding
+ * boxes returned per image. The default is 100, the
+ * number of bounding boxes returned might be limited by the server.
+ *
+ * `min_bounding_box_size`
+ * : (float) Only bounding boxes with shortest edge
+ * at least that long as a relative value of video frame size are
+ * returned. Value in 0 to 1 range. Default is 0.
*
* @typedef BatchPredictRequest
* @memberof google.cloud.automl.v1
@@ -160,15 +231,20 @@ const BatchPredictRequest = {
/**
* Result of the Batch Predict. This message is returned in
* response of the operation returned
- * by the
- * PredictionService.BatchPredict.
+ * by the PredictionService.BatchPredict.
*
* @property {Object.} metadata
* Additional domain-specific prediction response metadata.
*
- * * For Image Object Detection:
- * `max_bounding_box_count` - (int64) At most that many bounding boxes per
- * image could have been returned.
+ * AutoML Vision Object Detection
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding boxes returned per image.
+ *
+ * AutoML Video Intelligence Object Tracking
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding boxes returned per frame.
*
* @typedef BatchPredictResult
* @memberof google.cloud.automl.v1
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_service.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_service.js
index 7e9ebeb1d8f..17be4f96c14 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_service.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_service.js
@@ -16,14 +16,13 @@
// to be loaded as the JS file.
/**
- * Request message for
- * AutoMl.CreateDataset.
+ * Request message for AutoMl.CreateDataset.
*
* @property {string} parent
- * The resource name of the project to create the dataset for.
+ * Required. The resource name of the project to create the dataset for.
*
* @property {Object} dataset
- * The dataset to create.
+ * Required. The dataset to create.
*
* This object should have the same structure as [Dataset]{@link google.cloud.automl.v1.Dataset}
*
@@ -36,11 +35,10 @@ const CreateDatasetRequest = {
};
/**
- * Request message for
- * AutoMl.GetDataset.
+ * Request message for AutoMl.GetDataset.
*
* @property {string} name
- * The resource name of the dataset to retrieve.
+ * Required. The resource name of the dataset to retrieve.
*
* @typedef GetDatasetRequest
* @memberof google.cloud.automl.v1
@@ -51,18 +49,16 @@ const GetDatasetRequest = {
};
/**
- * Request message for
- * AutoMl.ListDatasets.
+ * Request message for AutoMl.ListDatasets.
*
* @property {string} parent
- * The resource name of the project from which to list datasets.
+ * Required. The resource name of the project from which to list datasets.
*
* @property {string} filter
* An expression for filtering the results of the request.
*
* * `dataset_metadata` - for existence of the case (e.g.
- * image_classification_dataset_metadata:*). Some examples of
- * using the filter are:
+ * image_classification_dataset_metadata:*). Some examples of using the filter are:
*
* * `translation_dataset_metadata:*` --> The dataset has
* translation_dataset_metadata.
@@ -74,8 +70,7 @@ const GetDatasetRequest = {
* @property {string} pageToken
* A token identifying a page of results for the server to return
* Typically obtained via
- * ListDatasetsResponse.next_page_token
- * of the previous
+ * ListDatasetsResponse.next_page_token of the previous
* AutoMl.ListDatasets call.
*
* @typedef ListDatasetsRequest
@@ -87,8 +82,7 @@ const ListDatasetsRequest = {
};
/**
- * Response message for
- * AutoMl.ListDatasets.
+ * Response message for AutoMl.ListDatasets.
*
* @property {Object[]} datasets
* The datasets read.
@@ -97,9 +91,7 @@ const ListDatasetsRequest = {
*
* @property {string} nextPageToken
* A token to retrieve next page of results.
- * Pass to
- * ListDatasetsRequest.page_token
- * to obtain that page.
+ * Pass to ListDatasetsRequest.page_token to obtain that page.
*
* @typedef ListDatasetsResponse
* @memberof google.cloud.automl.v1
@@ -110,11 +102,10 @@ const ListDatasetsResponse = {
};
/**
- * Request message for
- * AutoMl.UpdateDataset
+ * Request message for AutoMl.UpdateDataset
*
* @property {Object} dataset
- * The dataset which replaces the resource on the server.
+ * Required. The dataset which replaces the resource on the server.
*
* This object should have the same structure as [Dataset]{@link google.cloud.automl.v1.Dataset}
*
@@ -132,11 +123,10 @@ const UpdateDatasetRequest = {
};
/**
- * Request message for
- * AutoMl.DeleteDataset.
+ * Request message for AutoMl.DeleteDataset.
*
* @property {string} name
- * The resource name of the dataset to delete.
+ * Required. The resource name of the dataset to delete.
*
* @typedef DeleteDatasetRequest
* @memberof google.cloud.automl.v1
@@ -147,8 +137,7 @@ const DeleteDatasetRequest = {
};
/**
- * Request message for
- * AutoMl.ImportData.
+ * Request message for AutoMl.ImportData.
*
* @property {string} name
* Required. Dataset name. Dataset must already exist. All imported
@@ -169,8 +158,7 @@ const ImportDataRequest = {
};
/**
- * Request message for
- * AutoMl.ExportData.
+ * Request message for AutoMl.ExportData.
*
* @property {string} name
* Required. The resource name of the dataset.
@@ -189,11 +177,10 @@ const ExportDataRequest = {
};
/**
- * Request message for
- * AutoMl.GetAnnotationSpec.
+ * Request message for AutoMl.GetAnnotationSpec.
*
* @property {string} name
- * The resource name of the annotation spec to retrieve.
+ * Required. The resource name of the annotation spec to retrieve.
*
* @typedef GetAnnotationSpecRequest
* @memberof google.cloud.automl.v1
@@ -204,14 +191,13 @@ const GetAnnotationSpecRequest = {
};
/**
- * Request message for
- * AutoMl.CreateModel.
+ * Request message for AutoMl.CreateModel.
*
* @property {string} parent
- * Resource name of the parent project where the model is being created.
+ * Required. Resource name of the parent project where the model is being created.
*
* @property {Object} model
- * The model to create.
+ * Required. The model to create.
*
* This object should have the same structure as [Model]{@link google.cloud.automl.v1.Model}
*
@@ -224,11 +210,10 @@ const CreateModelRequest = {
};
/**
- * Request message for
- * AutoMl.GetModel.
+ * Request message for AutoMl.GetModel.
*
* @property {string} name
- * Resource name of the model.
+ * Required. Resource name of the model.
*
* @typedef GetModelRequest
* @memberof google.cloud.automl.v1
@@ -239,17 +224,16 @@ const GetModelRequest = {
};
/**
- * Request message for
- * AutoMl.ListModels.
+ * Request message for AutoMl.ListModels.
*
* @property {string} parent
- * Resource name of the project, from which to list the models.
+ * Required. Resource name of the project, from which to list the models.
*
* @property {string} filter
* An expression for filtering the results of the request.
*
* * `model_metadata` - for existence of the case (e.g.
- * image_classification_model_metadata:*).
+ * video_classification_model_metadata:*).
* * `dataset_id` - for = or !=. Some examples of using the filter are:
*
* * `image_classification_model_metadata:*` --> The model has
@@ -262,8 +246,7 @@ const GetModelRequest = {
* @property {string} pageToken
* A token identifying a page of results for the server to return
* Typically obtained via
- * ListModelsResponse.next_page_token
- * of the previous
+ * ListModelsResponse.next_page_token of the previous
* AutoMl.ListModels call.
*
* @typedef ListModelsRequest
@@ -275,8 +258,7 @@ const ListModelsRequest = {
};
/**
- * Response message for
- * AutoMl.ListModels.
+ * Response message for AutoMl.ListModels.
*
* @property {Object[]} model
* List of models in the requested page.
@@ -285,9 +267,7 @@ const ListModelsRequest = {
*
* @property {string} nextPageToken
* A token to retrieve next page of results.
- * Pass to
- * ListModelsRequest.page_token
- * to obtain that page.
+ * Pass to ListModelsRequest.page_token to obtain that page.
*
* @typedef ListModelsResponse
* @memberof google.cloud.automl.v1
@@ -298,11 +278,10 @@ const ListModelsResponse = {
};
/**
- * Request message for
- * AutoMl.DeleteModel.
+ * Request message for AutoMl.DeleteModel.
*
* @property {string} name
- * Resource name of the model being deleted.
+ * Required. Resource name of the model being deleted.
*
* @typedef DeleteModelRequest
* @memberof google.cloud.automl.v1
@@ -313,11 +292,10 @@ const DeleteModelRequest = {
};
/**
- * Request message for
- * AutoMl.UpdateModel
+ * Request message for AutoMl.UpdateModel
*
* @property {Object} model
- * The model which replaces the resource on the server.
+ * Required. The model which replaces the resource on the server.
*
* This object should have the same structure as [Model]{@link google.cloud.automl.v1.Model}
*
@@ -335,8 +313,7 @@ const UpdateModelRequest = {
};
/**
- * Request message for
- * AutoMl.DeployModel.
+ * Request message for AutoMl.DeployModel.
*
* @property {Object} imageObjectDetectionModelDeploymentMetadata
* Model deployment metadata specific to Image Object Detection.
@@ -349,7 +326,7 @@ const UpdateModelRequest = {
* This object should have the same structure as [ImageClassificationModelDeploymentMetadata]{@link google.cloud.automl.v1.ImageClassificationModelDeploymentMetadata}
*
* @property {string} name
- * Resource name of the model to deploy.
+ * Required. Resource name of the model to deploy.
*
* @typedef DeployModelRequest
* @memberof google.cloud.automl.v1
@@ -360,11 +337,10 @@ const DeployModelRequest = {
};
/**
- * Request message for
- * AutoMl.UndeployModel.
+ * Request message for AutoMl.UndeployModel.
*
* @property {string} name
- * Resource name of the model to undeploy.
+ * Required. Resource name of the model to undeploy.
*
* @typedef UndeployModelRequest
* @memberof google.cloud.automl.v1
@@ -375,9 +351,9 @@ const UndeployModelRequest = {
};
/**
- * Request message for
- * AutoMl.ExportModel. Models need
- * to be enabled for exporting, otherwise an error code will be returned.
+ * Request message for AutoMl.ExportModel.
+ * Models need to be enabled for exporting, otherwise an error code will be
+ * returned.
*
* @property {string} name
* Required. The resource name of the model to export.
@@ -396,11 +372,10 @@ const ExportModelRequest = {
};
/**
- * Request message for
- * AutoMl.GetModelEvaluation.
+ * Request message for AutoMl.GetModelEvaluation.
*
* @property {string} name
- * Resource name for the model evaluation.
+ * Required. Resource name for the model evaluation.
*
* @typedef GetModelEvaluationRequest
* @memberof google.cloud.automl.v1
@@ -411,16 +386,15 @@ const GetModelEvaluationRequest = {
};
/**
- * Request message for
- * AutoMl.ListModelEvaluations.
+ * Request message for AutoMl.ListModelEvaluations.
*
* @property {string} parent
- * Resource name of the model to list the model evaluations for.
+ * Required. Resource name of the model to list the model evaluations for.
* If modelId is set as "-", this will list model evaluations from across all
* models of the parent location.
*
* @property {string} filter
- * An expression for filtering the results of the request.
+ * Required. An expression for filtering the results of the request.
*
* * `annotation_spec_id` - for =, != or existence. See example below for
* the last.
@@ -438,10 +412,8 @@ const GetModelEvaluationRequest = {
* @property {string} pageToken
* A token identifying a page of results for the server to return.
* Typically obtained via
- * ListModelEvaluationsResponse.next_page_token
- * of the previous
- * AutoMl.ListModelEvaluations
- * call.
+ * ListModelEvaluationsResponse.next_page_token of the previous
+ * AutoMl.ListModelEvaluations call.
*
* @typedef ListModelEvaluationsRequest
* @memberof google.cloud.automl.v1
@@ -452,8 +424,7 @@ const ListModelEvaluationsRequest = {
};
/**
- * Response message for
- * AutoMl.ListModelEvaluations.
+ * Response message for AutoMl.ListModelEvaluations.
*
* @property {Object[]} modelEvaluation
* List of model evaluations in the requested page.
@@ -462,11 +433,8 @@ const ListModelEvaluationsRequest = {
*
* @property {string} nextPageToken
* A token to retrieve next page of results.
- * Pass to the
- * ListModelEvaluationsRequest.page_token
- * field of a new
- * AutoMl.ListModelEvaluations
- * request to obtain that page.
+ * Pass to the ListModelEvaluationsRequest.page_token field of a new
+ * AutoMl.ListModelEvaluations request to obtain that page.
*
* @typedef ListModelEvaluationsResponse
* @memberof google.cloud.automl.v1
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text.js
index d2a8edd569e..6ca136cd227 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text.js
@@ -71,11 +71,12 @@ const TextExtractionModelMetadata = {
* Dataset metadata for text sentiment.
*
* @property {number} sentimentMax
- * Required. A sentiment is expressed as an integer ordinal, where higher
- * value means a more positive sentiment. The range of sentiments that will be
- * used is between 0 and sentiment_max (inclusive on both ends), and all the
- * values in the range must be represented in the dataset before a model can
- * be created. sentiment_max value must be between 1 and 10 (inclusive).
+ * Required. A sentiment is expressed as an integer ordinal, where higher value
+ * means a more positive sentiment. The range of sentiments that will be used
+ * is between 0 and sentiment_max (inclusive on both ends), and all the values
+ * in the range must be represented in the dataset before a model can be
+ * created.
+ * sentiment_max value must be between 1 and 10 (inclusive).
*
* @typedef TextSentimentDatasetMetadata
* @memberof google.cloud.automl.v1
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text_sentiment.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text_sentiment.js
index fdfb2c496a5..c9f57e368a8 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text_sentiment.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_text_sentiment.js
@@ -20,9 +20,9 @@
*
* @property {number} sentiment
* Output only. The sentiment with the semantic, as given to the
- * AutoMl.ImportData when
- * populating the dataset from which the model used for the prediction had
- * been trained. The sentiment values are between 0 and
+ * AutoMl.ImportData when populating the dataset from which the model used
+ * for the prediction had been trained.
+ * The sentiment values are between 0 and
* Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive),
* with higher value meaning more positive sentiment. They are completely
* relative, i.e. 0 means least positive sentiment and sentiment_max means
diff --git a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_translation.js b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_translation.js
index 56c2b9218a8..ff24352e6e2 100644
--- a/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_translation.js
+++ b/packages/google-cloud-automl/src/v1/doc/google/cloud/automl/v1/doc_translation.js
@@ -60,11 +60,11 @@ const TranslationEvaluationMetrics = {
*
* @property {string} sourceLanguageCode
* Output only. Inferred from the dataset.
- * The source languge (The BCP-47 language code) that is used for training.
+ * The source language (The BCP-47 language code) that is used for training.
*
* @property {string} targetLanguageCode
- * Output only. The target languge (The BCP-47 language code) that is used for
- * training.
+ * Output only. The target language (The BCP-47 language code) that is used
+ * for training.
*
* @typedef TranslationModelMetadata
* @memberof google.cloud.automl.v1
diff --git a/packages/google-cloud-automl/src/v1/prediction_service_client.js b/packages/google-cloud-automl/src/v1/prediction_service_client.js
index 50095aee0c0..50029e2bb35 100644
--- a/packages/google-cloud-automl/src/v1/prediction_service_client.js
+++ b/packages/google-cloud-automl/src/v1/prediction_service_client.js
@@ -246,26 +246,51 @@ class PredictionServiceClient {
// -------------------
/**
- * Perform an online prediction. The prediction result will be directly
+ * Perform an online prediction. The prediction result is directly
* returned in the response.
- * Available for following ML problems, and their expected request payloads:
- * * Image Classification - Image in .JPEG, .GIF or .PNG format, image_bytes
- * up to 30MB.
- * * Image Object Detection - Image in .JPEG, .GIF or .PNG format, image_bytes
- * up to 30MB.
- * * Text Classification - TextSnippet, content up to 60,000 characters,
- * UTF-8 encoded.
- * * Text Extraction - TextSnippet, content up to 30,000 characters,
- * UTF-8 NFC encoded.
- * * Translation - TextSnippet, content up to 25,000 characters, UTF-8
- * encoded.
- * * Text Sentiment - TextSnippet, content up 500 characters, UTF-8
- * encoded.
+ * Available for following ML scenarios, and their expected request payloads:
+ *
+ *
+ *
+ * AutoML Vision Classification |
+ * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. |
+ *
+ *
+ * AutoML Vision Object Detection |
+ * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. |
+ *
+ *
+ * AutoML Natural Language Classification |
+ * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
+ * .PDF, .TIF or .TIFF format with size upto 2MB. |
+ *
+ *
+ * AutoML Natural Language Entity Extraction |
+ * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document
+ * in .PDF, .TIF or .TIFF format with size upto 20MB. |
+ *
+ *
+ * AutoML Natural Language Sentiment Analysis |
+ * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
+ * .PDF, .TIF or .TIFF format with size upto 2MB. |
+ *
+ *
+ * AutoML Translation |
+ * A TextSnippet up to 25,000 characters, UTF-8 encoded. |
+ *
+ *
+ * AutoML Tables |
+ * A row with column values matching
+ * the columns of the model, up to 5MB. Not available for FORECASTING
+ * `prediction_type`.
+ * |
+ *
+ *
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Name of the model requested to serve the prediction.
+ * Required. Name of the model requested to serve the prediction.
* @param {Object} request.payload
* Required. Payload to perform a prediction on. The payload must match the
* problem type that the model was trained to solve.
@@ -275,19 +300,34 @@ class PredictionServiceClient {
* Additional domain-specific parameters, any string must be up to 25000
* characters long.
*
- * * For Image Classification:
+ * AutoML Vision Classification
*
- * `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- * makes predictions for an image, it will only produce results that have
- * at least this confidence score. The default is 0.5.
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for an image, it will only produce results that have
+ * at least this confidence score. The default is 0.5.
*
- * * For Image Object Detection:
- * `score_threshold` - (float) When Model detects objects on the image,
- * it will only produce bounding boxes which have at least this
- * confidence score. Value in 0 to 1 range, default is 0.5.
- * `max_bounding_box_count` - (int64) No more than this number of bounding
- * boxes will be returned in the response. Default is 100, the
- * requested value may be limited by server.
+ * AutoML Vision Object Detection
+ *
+ * `score_threshold`
+ * : (float) When Model detects objects on the image,
+ * it will only produce bounding boxes which have at least this
+ * confidence score. Value in 0 to 1 range, default is 0.5.
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding
+ * boxes returned. The default is 100. The
+ * number of returned bounding boxes might be limited by the server.
+ *
+ * AutoML Tables
+ *
+ * `feature_importance`
+ * : (boolean) Whether
+ *
+ * feature_importance
+ * is populated in the returned list of
+ * TablesAnnotation
+ * objects. The default is false.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
@@ -341,23 +381,26 @@ class PredictionServiceClient {
}
/**
- * Perform a batch prediction. Unlike the online
- * Predict, batch
+ * Perform a batch prediction. Unlike the online Predict, batch
* prediction result won't be immediately available in the response. Instead,
* a long running operation object is returned. User can poll the operation
* result via GetOperation
- * method. Once the operation is done,
- * BatchPredictResult is returned
- * in the response field. Available
- * for following ML problems:
- * * Image Classification
- * * Image Object Detection
- * * Text Extraction
+ * method. Once the operation is done, BatchPredictResult is returned in
+ * the response field.
+ * Available for following ML scenarios:
+ *
+ * * AutoML Vision Classification
+ * * AutoML Vision Object Detection
+ * * AutoML Video Intelligence Classification
+ * * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification
+ * * AutoML Natural Language Entity Extraction
+ * * AutoML Natural Language Sentiment Analysis
+ * * AutoML Tables
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
- * Name of the model requested to serve the batch prediction.
+ * Required. Name of the model requested to serve the batch prediction.
* @param {Object} request.inputConfig
* Required. The input configuration for batch prediction.
*
@@ -371,26 +414,86 @@ class PredictionServiceClient {
* Additional domain-specific parameters for the predictions, any string must
* be up to 25000 characters long.
*
- * * For Text Classification:
- *
- * `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- * makes predictions for a text snippet, it will only produce results
- * that have at least this confidence score. The default is 0.5.
- *
- * * For Image Classification:
- *
- * `score_threshold` - (float) A value from 0.0 to 1.0. When the model
- * makes predictions for an image, it will only produce results that
- * have at least this confidence score. The default is 0.5.
- *
- * * For Image Object Detection:
- *
- * `score_threshold` - (float) When Model detects objects on the image,
- * it will only produce bounding boxes which have at least this
- * confidence score. Value in 0 to 1 range, default is 0.5.
- * `max_bounding_box_count` - (int64) No more than this number of bounding
- * boxes will be produced per image. Default is 100, the
- * requested value may be limited by server.
+ * AutoML Natural Language Classification
+ *
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for a text snippet, it will only produce results
+ * that have at least this confidence score. The default is 0.5.
+ *
+ *
+ * AutoML Vision Classification
+ *
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for an image, it will only produce results that
+ * have at least this confidence score. The default is 0.5.
+ *
+ * AutoML Vision Object Detection
+ *
+ * `score_threshold`
+ * : (float) When Model detects objects on the image,
+ * it will only produce bounding boxes which have at least this
+ * confidence score. Value in 0 to 1 range, default is 0.5.
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding
+ * boxes returned per image. The default is 100, the
+ * number of bounding boxes returned might be limited by the server.
+ * AutoML Video Intelligence Classification
+ *
+ * `score_threshold`
+ * : (float) A value from 0.0 to 1.0. When the model
+ * makes predictions for a video, it will only produce results that
+ * have at least this confidence score. The default is 0.5.
+ *
+ * `segment_classification`
+ * : (boolean) Set to true to request
+ * segment-level classification. AutoML Video Intelligence returns
+ * labels and their confidence scores for the entire segment of the
+ * video that user specified in the request configuration.
+ * The default is true.
+ *
+ * `shot_classification`
+ * : (boolean) Set to true to request shot-level
+ * classification. AutoML Video Intelligence determines the boundaries
+ * for each camera shot in the entire segment of the video that user
+ * specified in the request configuration. AutoML Video Intelligence
+ * then returns labels and their confidence scores for each detected
+ * shot, along with the start and end time of the shot.
+ * The default is false.
+ *
+ * WARNING: Model evaluation is not done for this classification type,
+ * the quality of it depends on training data, but there are no metrics
+ * provided to describe that quality.
+ *
+ * `1s_interval_classification`
+ * : (boolean) Set to true to request
+ * classification for a video at one-second intervals. AutoML Video
+ * Intelligence returns labels and their confidence scores for each
+ * second of the entire segment of the video that user specified in the
+ * request configuration. The default is false.
+ *
+ * WARNING: Model evaluation is not done for this classification
+ * type, the quality of it depends on training data, but there are no
+ * metrics provided to describe that quality.
+ *
+ * AutoML Video Intelligence Object Tracking
+ *
+ * `score_threshold`
+ * : (float) When Model detects objects on video frames,
+ * it will only produce bounding boxes which have at least this
+ * confidence score. Value in 0 to 1 range, default is 0.5.
+ *
+ * `max_bounding_box_count`
+ * : (int64) The maximum number of bounding
+ * boxes returned per image. The default is 100, the
+ * number of bounding boxes returned might be limited by the server.
+ *
+ * `min_bounding_box_size`
+ * : (float) Only bounding boxes with shortest edge
+ * at least that long as a relative value of video frame size are
+ * returned. Value in 0 to 1 range. Default is 0.
* @param {Object} [options]
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
diff --git a/packages/google-cloud-automl/synth.metadata b/packages/google-cloud-automl/synth.metadata
index 136913d2e52..3ee31e3564b 100644
--- a/packages/google-cloud-automl/synth.metadata
+++ b/packages/google-cloud-automl/synth.metadata
@@ -1,20 +1,20 @@
{
- "updateTime": "2020-03-03T12:14:22.541902Z",
+ "updateTime": "2020-03-05T12:13:17.034526Z",
"sources": [
{
"generator": {
"name": "artman",
- "version": "0.47.0",
- "dockerImage": "googleapis/artman@sha256:b3e50d6b8de03920b9f065bbc3d210e2ca93a043446f1fa16cdf567393c09678"
+ "version": "1.0.0",
+ "dockerImage": "googleapis/artman@sha256:f37f2464788cb551299209b4fcab4eb323533154488c2ef9ec0c75d7c2b4b482"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "4a180bfff8a21645b3a935c2756e8d6ab18a74e0",
- "internalRef": "298484782",
- "log": "4a180bfff8a21645b3a935c2756e8d6ab18a74e0\nautoml/v1beta1 publish proto updates\n\nPiperOrigin-RevId: 298484782\n\n6de6e938b7df1cd62396563a067334abeedb9676\nchore: use the latest gapic-generator and protoc-java-resource-name-plugin in Bazel workspace.\n\nPiperOrigin-RevId: 298474513\n\n244ab2b83a82076a1fa7be63b7e0671af73f5c02\nAdds service config definition for bigqueryreservation v1\n\nPiperOrigin-RevId: 298455048\n\n83c6f84035ee0f80eaa44d8b688a010461cc4080\nUpdate google/api/auth.proto to make AuthProvider to have JwtLocation\n\nPiperOrigin-RevId: 297918498\n\ne9e90a787703ec5d388902e2cb796aaed3a385b4\nDialogflow weekly v2/v2beta1 library update:\n - adding get validation result\n - adding field mask override control for output audio config\nImportant updates are also posted at:\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 297671458\n\n1a2b05cc3541a5f7714529c665aecc3ea042c646\nAdding .yaml and .json config files.\n\nPiperOrigin-RevId: 297570622\n\ndfe1cf7be44dee31d78f78e485d8c95430981d6e\nPublish `QueryOptions` proto.\n\nIntroduced a `query_options` input in `ExecuteSqlRequest`.\n\nPiperOrigin-RevId: 297497710\n\ndafc905f71e5d46f500b41ed715aad585be062c3\npubsub: revert pull init_rpc_timeout & max_rpc_timeout back to 25 seconds and reset multiplier to 1.0\n\nPiperOrigin-RevId: 297486523\n\nf077632ba7fee588922d9e8717ee272039be126d\nfirestore: add update_transform\n\nPiperOrigin-RevId: 297405063\n\n0aba1900ffef672ec5f0da677cf590ee5686e13b\ncluster: use square brace for cross-reference\n\nPiperOrigin-RevId: 297204568\n\n5dac2da18f6325cbaed54603c43f0667ecd50247\nRestore retry params in gapic config because securitycenter has non-standard default retry params.\nRestore a few retry codes for some idempotent methods.\n\nPiperOrigin-RevId: 297196720\n\n1eb61455530252bba8b2c8d4bc9832960e5a56f6\npubsub: v1 replace IAM HTTP rules\n\nPiperOrigin-RevId: 297188590\n\n80b2d25f8d43d9d47024ff06ead7f7166548a7ba\nDialogflow weekly v2/v2beta1 library update:\n - updates to mega agent api\n - adding field mask override control for output audio config\nImportant updates are also posted at:\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 297187629\n\n0b1876b35e98f560f9c9ca9797955f020238a092\nUse an older version of protoc-docs-plugin that is compatible with the specified gapic-generator and protobuf versions.\n\nprotoc-docs-plugin >=0.4.0 (see commit https://github.com/googleapis/protoc-docs-plugin/commit/979f03ede6678c487337f3d7e88bae58df5207af) is incompatible with protobuf 3.9.1.\n\nPiperOrigin-RevId: 296986742\n\n1e47e676cddbbd8d93f19ba0665af15b5532417e\nFix: Restore a method signature for UpdateCluster\n\nPiperOrigin-RevId: 296901854\n\n7f910bcc4fc4704947ccfd3ceed015d16b9e00c2\nUpdate Dataproc v1beta2 client.\n\nPiperOrigin-RevId: 296451205\n\nde287524405a3dce124d301634731584fc0432d7\nFix: Reinstate method signatures that had been missed off some RPCs\nFix: Correct resource types for two fields\n\nPiperOrigin-RevId: 296435091\n\ne5bc9566ae057fb4c92f8b7e047f1c8958235b53\nDeprecate the endpoint_uris field, as it is unused.\n\nPiperOrigin-RevId: 296357191\n\n8c12e2b4dca94e12bff9f538bdac29524ff7ef7a\nUpdate Dataproc v1 client.\n\nPiperOrigin-RevId: 296336662\n\n17567c4a1ef0a9b50faa87024d66f8acbb561089\nRemoving erroneous comment, a la https://github.com/googleapis/java-speech/pull/103\n\nPiperOrigin-RevId: 296332968\n\n3eaaaf8626ce5b0c0bc7eee05e143beffa373b01\nAdd BUILD.bazel for v1 secretmanager.googleapis.com\n\nPiperOrigin-RevId: 296274723\n\ne76149c3d992337f85eeb45643106aacae7ede82\nMove securitycenter v1 to use generate from annotations.\n\nPiperOrigin-RevId: 296266862\n\n203740c78ac69ee07c3bf6be7408048751f618f8\nAdd StackdriverLoggingConfig field to Cloud Tasks v2 API.\n\nPiperOrigin-RevId: 296256388\n\ne4117d5e9ed8bbca28da4a60a94947ca51cb2083\nCreate a Bazel BUILD file for the google.actions.type export.\n\nPiperOrigin-RevId: 296212567\n\na9639a0a9854fd6e1be08bba1ac3897f4f16cb2f\nAdd secretmanager.googleapis.com v1 protos\n\nPiperOrigin-RevId: 295983266\n\nce4f4c21d9dd2bfab18873a80449b9d9851efde8\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295861722\n\ncb61d6c2d070b589980c779b68ffca617f789116\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295855449\n\nab2685d8d3a0e191dc8aef83df36773c07cb3d06\nfix: Dataproc v1 - AutoscalingPolicy annotation\n\nThis adds the second resource name pattern to the\nAutoscalingPolicy resource.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 295738415\n\n8a1020bf6828f6e3c84c3014f2c51cb62b739140\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295286165\n\n5cfa105206e77670369e4b2225597386aba32985\nAdd service control related proto build rule.\n\nPiperOrigin-RevId: 295262088\n\nee4dddf805072004ab19ac94df2ce669046eec26\nmonitoring v3: Add prefix \"https://cloud.google.com/\" into the link for global access\ncl 295167522, get ride of synth.py hacks\n\nPiperOrigin-RevId: 295238095\n\nd9835e922ea79eed8497db270d2f9f85099a519c\nUpdate some minor docs changes about user event proto\n\nPiperOrigin-RevId: 295185610\n\n5f311e416e69c170243de722023b22f3df89ec1c\nfix: use correct PHP package name in gapic configuration\n\nPiperOrigin-RevId: 295161330\n\n6cdd74dcdb071694da6a6b5a206e3a320b62dd11\npubsub: v1 add client config annotations and retry config\n\nPiperOrigin-RevId: 295158776\n\n5169f46d9f792e2934d9fa25c36d0515b4fd0024\nAdded cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295026522\n\n56b55aa8818cd0a532a7d779f6ef337ba809ccbd\nFix: Resource annotations for CreateTimeSeriesRequest and ListTimeSeriesRequest should refer to valid resources. TimeSeries is not a named resource.\n\nPiperOrigin-RevId: 294931650\n\n0646bc775203077226c2c34d3e4d50cc4ec53660\nRemove unnecessary languages from bigquery-related artman configuration files.\n\nPiperOrigin-RevId: 294809380\n\n8b78aa04382e3d4147112ad6d344666771bb1909\nUpdate backend.proto for schemes and protocol\n\nPiperOrigin-RevId: 294788800\n\n80b8f8b3de2359831295e24e5238641a38d8488f\nAdds artman config files for bigquerystorage endpoints v1beta2, v1alpha2, v1\n\nPiperOrigin-RevId: 294763931\n\n2c17ac33b226194041155bb5340c3f34733f1b3a\nAdd parameter to sample generated for UpdateInstance. Related to https://github.com/googleapis/python-redis/issues/4\n\nPiperOrigin-RevId: 294734008\n\nd5e8a8953f2acdfe96fb15e85eb2f33739623957\nMove bigquery datatransfer to gapic v2.\n\nPiperOrigin-RevId: 294703703\n\nefd36705972cfcd7d00ab4c6dfa1135bafacd4ae\nfix: Add two annotations that we missed.\n\nPiperOrigin-RevId: 294664231\n\n8a36b928873ff9c05b43859b9d4ea14cd205df57\nFix: Define the \"bigquery.googleapis.com/Table\" resource in the BigQuery Storage API (v1beta2).\n\nPiperOrigin-RevId: 294459768\n\nc7a3caa2c40c49f034a3c11079dd90eb24987047\nFix: Define the \"bigquery.googleapis.com/Table\" resource in the BigQuery Storage API (v1).\n\nPiperOrigin-RevId: 294456889\n\n5006247aa157e59118833658084345ee59af7c09\nFix: Make deprecated fields optional\nFix: Deprecate SetLoggingServiceRequest.zone in line with the comments\nFeature: Add resource name method signatures where appropriate\n\nPiperOrigin-RevId: 294383128\n\neabba40dac05c5cbe0fca3a35761b17e372036c4\nFix: C# and PHP package/namespace capitalization for BigQuery Storage v1.\n\nPiperOrigin-RevId: 294382444\n\nf8d9a858a7a55eba8009a23aa3f5cc5fe5e88dde\nfix: artman configuration file for bigtable-admin\n\nPiperOrigin-RevId: 294322616\n\n0f29555d1cfcf96add5c0b16b089235afbe9b1a9\nAPI definition for (not-yet-launched) GCS gRPC.\n\nPiperOrigin-RevId: 294321472\n\nfcc86bee0e84dc11e9abbff8d7c3529c0626f390\nfix: Bigtable Admin v2\n\nChange LRO metadata from PartialUpdateInstanceMetadata\nto UpdateInstanceMetadata. (Otherwise, it will not build.)\n\nPiperOrigin-RevId: 294264582\n\n6d9361eae2ebb3f42d8c7ce5baf4bab966fee7c0\nrefactor: Add annotations to Bigtable Admin v2.\n\nPiperOrigin-RevId: 294243406\n\nad7616f3fc8e123451c8b3a7987bc91cea9e6913\nFix: Resource type in CreateLogMetricRequest should use logging.googleapis.com.\nFix: ListLogEntries should have a method signature for convenience of calling it.\n\nPiperOrigin-RevId: 294222165\n\n63796fcbb08712676069e20a3e455c9f7aa21026\nFix: Remove extraneous resource definition for cloudkms.googleapis.com/CryptoKey.\n\nPiperOrigin-RevId: 294176658\n\ne7d8a694f4559201e6913f6610069cb08b39274e\nDepend on the latest gapic-generator and resource names plugin.\n\nThis fixes the very old an very annoying bug: https://github.com/googleapis/gapic-generator/pull/3087\n\nPiperOrigin-RevId: 293903652\n\n806b2854a966d55374ee26bb0cef4e30eda17b58\nfix: correct capitalization of Ruby namespaces in SecurityCenter V1p1beta1\n\nPiperOrigin-RevId: 293903613\n\n1b83c92462b14d67a7644e2980f723112472e03a\nPublish annotations and grpc service config for Logging API.\n\nPiperOrigin-RevId: 293893514\n\ne46f761cd6ec15a9e3d5ed4ff321a4bcba8e8585\nGenerate the Bazel build file for recommendengine public api\n\nPiperOrigin-RevId: 293710856\n\n68477017c4173c98addac0373950c6aa9d7b375f\nMake `language_code` optional for UpdateIntentRequest and BatchUpdateIntentsRequest.\n\nThe comments and proto annotations describe this parameter as optional.\n\nPiperOrigin-RevId: 293703548\n\n16f823f578bca4e845a19b88bb9bc5870ea71ab2\nAdd BUILD.bazel files for managedidentities API\n\nPiperOrigin-RevId: 293698246\n\n2f53fd8178c9a9de4ad10fae8dd17a7ba36133f2\nAdd v1p1beta1 config file\n\nPiperOrigin-RevId: 293696729\n\n052b274138fce2be80f97b6dcb83ab343c7c8812\nAdd source field for user event and add field behavior annotations\n\nPiperOrigin-RevId: 293693115\n\n1e89732b2d69151b1b3418fff3d4cc0434f0dded\ndatacatalog: v1beta1 add three new RPCs to gapic v1beta1 config\n\nPiperOrigin-RevId: 293692823\n\n9c8bd09bbdc7c4160a44f1fbab279b73cd7a2337\nchange the name of AccessApproval service to AccessApprovalAdmin\n\nPiperOrigin-RevId: 293690934\n\n2e23b8fbc45f5d9e200572ca662fe1271bcd6760\nAdd ListEntryGroups method, add http bindings to support entry group tagging, and update some comments.\n\nPiperOrigin-RevId: 293666452\n\n0275e38a4ca03a13d3f47a9613aac8c8b0d3f1f2\nAdd proto_package field to managedidentities API. It is needed for APIs that still depend on artman generation.\n\nPiperOrigin-RevId: 293643323\n\n4cdfe8278cb6f308106580d70648001c9146e759\nRegenerating public protos for Data Catalog to add new Custom Type Entry feature.\n\nPiperOrigin-RevId: 293614782\n\n45d2a569ab526a1fad3720f95eefb1c7330eaada\nEnable client generation for v1 ManagedIdentities API.\n\nPiperOrigin-RevId: 293515675\n\n2c17086b77e6f3bcf04a1f65758dfb0c3da1568f\nAdd the Actions on Google common types (//google/actions/type/*).\n\nPiperOrigin-RevId: 293478245\n\n781aadb932e64a12fb6ead7cd842698d99588433\nDialogflow weekly v2/v2beta1 library update:\n- Documentation updates\nImportant updates are also posted at\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 293443396\n\ne2602608c9138c2fca24162720e67f9307c30b95\nDialogflow weekly v2/v2beta1 library update:\n- Documentation updates\nImportant updates are also posted at\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 293442964\n\nc8aef82028d06b7992278fa9294c18570dc86c3d\nAdd cc_proto_library and cc_grpc_library targets for Bigtable protos.\n\nAlso fix indentation of cc_grpc_library targets in Spanner and IAM protos.\n\nPiperOrigin-RevId: 293440538\n\ne2faab04f4cb7f9755072330866689b1943a16e9\ncloudtasks: v2 replace non-standard retry params in gapic config v2\n\nPiperOrigin-RevId: 293424055\n\ndfb4097ea628a8470292c6590a4313aee0c675bd\nerrorreporting: v1beta1 add legacy artman config for php\n\nPiperOrigin-RevId: 293423790\n\nb18aed55b45bfe5b62476292c72759e6c3e573c6\nasset: v1p1beta1 updated comment for `page_size` limit.\n\nPiperOrigin-RevId: 293421386\n\nc9ef36b7956d9859a2fc86ad35fcaa16958ab44f\nbazel: Refactor CI build scripts\n\nPiperOrigin-RevId: 293387911\n\na8ed9d921fdddc61d8467bfd7c1668f0ad90435c\nfix: set Ruby module name for OrgPolicy\n\nPiperOrigin-RevId: 293257997\n\n6c7d28509bd8315de8af0889688ee20099594269\nredis: v1beta1 add UpgradeInstance and connect_mode field to Instance\n\nPiperOrigin-RevId: 293242878\n\nae0abed4fcb4c21f5cb67a82349a049524c4ef68\nredis: v1 add connect_mode field to Instance\n\nPiperOrigin-RevId: 293241914\n\n3f7a0d29b28ee9365771da2b66edf7fa2b4e9c56\nAdds service config definition for bigqueryreservation v1beta1\n\nPiperOrigin-RevId: 293234418\n\n0c88168d5ed6fe353a8cf8cbdc6bf084f6bb66a5\naddition of BUILD & configuration for accessapproval v1\n\nPiperOrigin-RevId: 293219198\n\n39bedc2e30f4778ce81193f6ba1fec56107bcfc4\naccessapproval: v1 publish protos\n\nPiperOrigin-RevId: 293167048\n\n69d9945330a5721cd679f17331a78850e2618226\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080182\n\nf6a1a6b417f39694275ca286110bc3c1ca4db0dc\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080178\n\n29d40b78e3dc1579b0b209463fbcb76e5767f72a\nExpose managedidentities/v1beta1/ API for client library usage.\n\nPiperOrigin-RevId: 292979741\n\na22129a1fb6e18056d576dfb7717aef74b63734a\nExpose managedidentities/v1/ API for client library usage.\n\nPiperOrigin-RevId: 292968186\n\nb5cbe4a4ba64ab19e6627573ff52057a1657773d\nSecurityCenter v1p1beta1: move file-level option on top to workaround protobuf.js bug.\n\nPiperOrigin-RevId: 292647187\n\nb224b317bf20c6a4fbc5030b4a969c3147f27ad3\nAdds API definitions for bigqueryreservation v1beta1.\n\nPiperOrigin-RevId: 292634722\n\nc1468702f9b17e20dd59007c0804a089b83197d2\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 292626173\n\nffdfa4f55ab2f0afc11d0eb68f125ccbd5e404bd\nvision: v1p3beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292605599\n\n78f61482cd028fc1d9892aa5d89d768666a954cd\nvision: v1p1beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292605125\n\n60bb5a294a604fd1778c7ec87b265d13a7106171\nvision: v1p2beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292604980\n\n3bcf7aa79d45eb9ec29ab9036e9359ea325a7fc3\nvision: v1p4beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292604656\n\n2717b8a1c762b26911b45ecc2e4ee01d98401b28\nFix dataproc artman client library generation.\n\nPiperOrigin-RevId: 292555664\n\n7ac66d9be8a7d7de4f13566d8663978c9ee9dcd7\nAdd Dataproc Autoscaling API to V1.\n\nPiperOrigin-RevId: 292450564\n\n5d932b2c1be3a6ef487d094e3cf5c0673d0241dd\n- Improve documentation\n- Add a client_id field to StreamingPullRequest\n\nPiperOrigin-RevId: 292434036\n\neaff9fa8edec3e914995ce832b087039c5417ea7\nmonitoring: v3 publish annotations and client retry config\n\nPiperOrigin-RevId: 292425288\n\n70958bab8c5353870d31a23fb2c40305b050d3fe\nBigQuery Storage Read API v1 clients.\n\nPiperOrigin-RevId: 292407644\n\n7a15e7fe78ff4b6d5c9606a3264559e5bde341d1\nUpdate backend proto for Google Cloud Endpoints\n\nPiperOrigin-RevId: 292391607\n\n3ca2c014e24eb5111c8e7248b1e1eb833977c83d\nbazel: Add --flaky_test_attempts=3 argument to prevent CI failures caused by flaky tests\n\nPiperOrigin-RevId: 292382559\n\n9933347c1f677e81e19a844c2ef95bfceaf694fe\nbazel:Integrate latest protoc-java-resource-names-plugin changes (fix for PyYAML dependency in bazel rules)\n\nPiperOrigin-RevId: 292376626\n\nb835ab9d2f62c88561392aa26074c0b849fb0bd3\nasset: v1p2beta1 add client config annotations\n\n* remove unintentionally exposed RPCs\n* remove messages relevant to removed RPCs\n\nPiperOrigin-RevId: 292369593\n\nc1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n"
+ "sha": "638253bf86d1ce1c314108a089b7351440c2f0bf",
+ "internalRef": "298971070",
+ "log": "638253bf86d1ce1c314108a089b7351440c2f0bf\nfix: add java_multiple_files option for automl text_sentiment.proto\n\nPiperOrigin-RevId: 298971070\n\n373d655703bf914fb8b0b1cc4071d772bac0e0d1\nUpdate Recs AI Beta public bazel file\n\nPiperOrigin-RevId: 298961623\n\ndcc5d00fc8a8d8b56f16194d7c682027b2c66a3b\nfix: add java_multiple_files option for automl classification.proto\n\nPiperOrigin-RevId: 298953301\n\na3f791827266f3496a6a5201d58adc4bb265c2a3\nchore: automl/v1 publish annotations and retry config\n\nPiperOrigin-RevId: 298942178\n\n01c681586d8d6dbd60155289b587aee678530bd9\nMark return_immediately in PullRequest deprecated.\n\nPiperOrigin-RevId: 298893281\n\nc9f5e9c4bfed54bbd09227e990e7bded5f90f31c\nRemove out of date documentation for predicate support on the Storage API\n\nPiperOrigin-RevId: 298883309\n\nfd5b3b8238d783b04692a113ffe07c0363f5de0f\ngenerate webrisk v1 proto\n\nPiperOrigin-RevId: 298847934\n\n541b1ded4abadcc38e8178680b0677f65594ea6f\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 298686266\n\nc0d171acecb4f5b0bfd2c4ca34fc54716574e300\n Updated to include the Notification v1 API.\n\nPiperOrigin-RevId: 298652775\n\n2346a9186c0bff2c9cc439f2459d558068637e05\nAdd Service Directory v1beta1 protos and configs\n\nPiperOrigin-RevId: 298625638\n\na78ed801b82a5c6d9c5368e24b1412212e541bb7\nPublishing v3 protos and configs.\n\nPiperOrigin-RevId: 298607357\n\n"
}
},
{