diff --git a/CHANGELOG.md b/CHANGELOG.md index b6515d250cc..5c7269bce3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.35.19 (2020-10-30) +=== + +### Service Client Updates +* `service/braket`: Updates service API and documentation +* `service/dms`: Updates service API and documentation + * Adding DocDbSettings to support DocumentDB as a source. +* `service/elasticache`: Updates service documentation + * Documentation updates for AWS ElastiCache +* `service/imagebuilder`: Updates service API and documentation +* `service/macie2`: Updates service API and documentation +* `service/medialive`: Updates service API and documentation + * Support for HLS discontinuity tags in the child manifests. Support for incomplete segment behavior in the media output. Support for automatic input failover condition settings. +* `service/sns`: Updates service documentation + * Documentation updates for Amazon SNS + Release v1.35.18 (2020-10-29) === diff --git a/aws/version.go b/aws/version.go index 44d02a08c6d..3857a0e668d 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.18" +const SDKVersion = "1.35.19" diff --git a/models/apis/braket/2019-09-01/api-2.json b/models/apis/braket/2019-09-01/api-2.json index f03eda762e9..2d7ff5d4d20 100644 --- a/models/apis/braket/2019-09-01/api-2.json +++ b/models/apis/braket/2019-09-01/api-2.json @@ -83,6 +83,21 @@ {"shape":"ValidationException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ] + }, "SearchDevices":{ "name":"SearchDevices", "http":{ @@ -114,6 +129,37 @@ {"shape":"InternalServiceException"}, {"shape":"ValidationException"} ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "idempotent":true } }, "shapes":{ @@ -160,8 +206,8 @@ "CancellationStatus":{ "type":"string", "enum":[ - "CANCELLED", - "CANCELLING" + "CANCELLING", + "CANCELLED" ] }, "ConflictException":{ @@ -196,30 +242,31 @@ }, "deviceArn":{"shape":"DeviceArn"}, "deviceParameters":{ - "shape":"CreateQuantumTaskRequestdeviceParametersJsonValue", + "shape":"CreateQuantumTaskRequestDeviceParametersString", "jsonvalue":true }, - "outputS3Bucket":{"shape":"CreateQuantumTaskRequestoutputS3BucketString"}, - "outputS3KeyPrefix":{"shape":"CreateQuantumTaskRequestoutputS3KeyPrefixString"}, - "shots":{"shape":"CreateQuantumTaskRequestshotsLong"} + "outputS3Bucket":{"shape":"CreateQuantumTaskRequestOutputS3BucketString"}, + "outputS3KeyPrefix":{"shape":"CreateQuantumTaskRequestOutputS3KeyPrefixString"}, + "shots":{"shape":"CreateQuantumTaskRequestShotsLong"}, + "tags":{"shape":"TagsMap"} } }, - "CreateQuantumTaskRequestdeviceParametersJsonValue":{ + "CreateQuantumTaskRequestDeviceParametersString":{ "type":"string", "max":2048, "min":1 }, - "CreateQuantumTaskRequestoutputS3BucketString":{ + "CreateQuantumTaskRequestOutputS3BucketString":{ "type":"string", "max":63, "min":3 }, - "CreateQuantumTaskRequestoutputS3KeyPrefixString":{ + "CreateQuantumTaskRequestOutputS3KeyPrefixString":{ "type":"string", "max":1024, "min":1 }, - "CreateQuantumTaskRequestshotsLong":{ + "CreateQuantumTaskRequestShotsLong":{ "type":"long", "box":true, "min":0 @@ -250,8 +297,8 @@ "DeviceStatus":{ "type":"string", "enum":[ - "OFFLINE", - "ONLINE" + "ONLINE", + "OFFLINE" ] }, "DeviceSummary":{ @@ -351,7 +398,8 @@ "outputS3Directory":{"shape":"String"}, "quantumTaskArn":{"shape":"QuantumTaskArn"}, "shots":{"shape":"Long"}, - "status":{"shape":"QuantumTaskStatus"} + "status":{"shape":"QuantumTaskStatus"}, + "tags":{"shape":"TagsMap"} } }, "InternalServiceException":{ @@ -364,6 +412,23 @@ "fault":true }, "JsonValue":{"type":"string"}, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{"shape":"TagsMap"} + } + }, "Long":{ "type":"long", "box":true @@ -376,13 +441,13 @@ "QuantumTaskStatus":{ "type":"string", "enum":[ - "CANCELLED", - "CANCELLING", - "COMPLETED", "CREATED", - "FAILED", "QUEUED", - "RUNNING" + "RUNNING", + "COMPLETED", + "FAILED", + "CANCELLING", + "CANCELLED" ] }, "QuantumTaskSummary":{ @@ -404,7 +469,8 @@ "outputS3Directory":{"shape":"String"}, "quantumTaskArn":{"shape":"QuantumTaskArn"}, "shots":{"shape":"Long"}, - "status":{"shape":"QuantumTaskStatus"} + "status":{"shape":"QuantumTaskStatus"}, + "tags":{"shape":"TagsMap"} } }, "QuantumTaskSummaryList":{ @@ -429,16 +495,16 @@ "values" ], "members":{ - "name":{"shape":"SearchDevicesFilternameString"}, - "values":{"shape":"SearchDevicesFiltervaluesString256List"} + "name":{"shape":"SearchDevicesFilterNameString"}, + "values":{"shape":"SearchDevicesFilterValuesList"} } }, - "SearchDevicesFilternameString":{ + "SearchDevicesFilterNameString":{ "type":"string", "max":64, "min":1 }, - "SearchDevicesFiltervaluesString256List":{ + "SearchDevicesFilterValuesList":{ "type":"list", "member":{"shape":"String256"}, "max":10, @@ -448,18 +514,18 @@ "type":"structure", "required":["filters"], "members":{ - "filters":{"shape":"SearchDevicesRequestfiltersSearchDevicesFilterList"}, - "maxResults":{"shape":"SearchDevicesRequestmaxResultsInteger"}, + "filters":{"shape":"SearchDevicesRequestFiltersList"}, + "maxResults":{"shape":"SearchDevicesRequestMaxResultsInteger"}, "nextToken":{"shape":"String"} } }, - "SearchDevicesRequestfiltersSearchDevicesFilterList":{ + "SearchDevicesRequestFiltersList":{ "type":"list", "member":{"shape":"SearchDevicesFilter"}, "max":10, "min":0 }, - "SearchDevicesRequestmaxResultsInteger":{ + "SearchDevicesRequestMaxResultsInteger":{ "type":"integer", "box":true, "max":100, @@ -483,21 +549,21 @@ "members":{ "name":{"shape":"String64"}, "operator":{"shape":"SearchQuantumTasksFilterOperator"}, - "values":{"shape":"SearchQuantumTasksFiltervaluesString256List"} + "values":{"shape":"SearchQuantumTasksFilterValuesList"} } }, "SearchQuantumTasksFilterOperator":{ "type":"string", "enum":[ - "BETWEEN", + "LT", + "LTE", "EQUAL", "GT", "GTE", - "LT", - "LTE" + "BETWEEN" ] }, - "SearchQuantumTasksFiltervaluesString256List":{ + "SearchQuantumTasksFilterValuesList":{ "type":"list", "member":{"shape":"String256"}, "max":10, @@ -507,18 +573,18 @@ "type":"structure", "required":["filters"], "members":{ - "filters":{"shape":"SearchQuantumTasksRequestfiltersSearchQuantumTasksFilterList"}, - "maxResults":{"shape":"SearchQuantumTasksRequestmaxResultsInteger"}, + "filters":{"shape":"SearchQuantumTasksRequestFiltersList"}, + "maxResults":{"shape":"SearchQuantumTasksRequestMaxResultsInteger"}, "nextToken":{"shape":"String"} } }, - "SearchQuantumTasksRequestfiltersSearchQuantumTasksFilterList":{ + "SearchQuantumTasksRequestFiltersList":{ "type":"list", "member":{"shape":"SearchQuantumTasksFilter"}, "max":10, "min":0 }, - "SearchQuantumTasksRequestmaxResultsInteger":{ + "SearchQuantumTasksRequestMaxResultsInteger":{ "type":"integer", "box":true, "max":100, @@ -558,6 +624,35 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{"shape":"TagsMap"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -569,6 +664,30 @@ }, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/models/apis/braket/2019-09-01/docs-2.json b/models/apis/braket/2019-09-01/docs-2.json index 950abd15758..fe712c6e05a 100644 --- a/models/apis/braket/2019-09-01/docs-2.json +++ b/models/apis/braket/2019-09-01/docs-2.json @@ -6,8 +6,11 @@ "CreateQuantumTask": "

Creates a quantum task.

", "GetDevice": "

Retrieves the devices available in Amazon Braket.

", "GetQuantumTask": "

Retrieves the specified quantum task.

", + "ListTagsForResource": "

Shows the tags associated with this resource.

", "SearchDevices": "

Searches for devices using the specified filters.

", - "SearchQuantumTasks": "

Searches for tasks that match the specified filter values.

" + "SearchQuantumTasks": "

Searches for tasks that match the specified filter values.

", + "TagResource": "

Add a tag to the specified resource.

", + "UntagResource": "

Remove tags from a resource.

" }, "shapes": { "AccessDeniedException": { @@ -41,25 +44,25 @@ "refs": { } }, - "CreateQuantumTaskRequestdeviceParametersJsonValue": { + "CreateQuantumTaskRequestDeviceParametersString": { "base": null, "refs": { "CreateQuantumTaskRequest$deviceParameters": "

The parameters for the device to run the task on.

" } }, - "CreateQuantumTaskRequestoutputS3BucketString": { + "CreateQuantumTaskRequestOutputS3BucketString": { "base": null, "refs": { "CreateQuantumTaskRequest$outputS3Bucket": "

The S3 bucket to store task result files in.

" } }, - "CreateQuantumTaskRequestoutputS3KeyPrefixString": { + "CreateQuantumTaskRequestOutputS3KeyPrefixString": { "base": null, "refs": { "CreateQuantumTaskRequest$outputS3KeyPrefix": "

The key prefix for the location in the S3 bucket to store task results in.

" } }, - "CreateQuantumTaskRequestshotsLong": { + "CreateQuantumTaskRequestShotsLong": { "base": null, "refs": { "CreateQuantumTaskRequest$shots": "

The number of shots to use for the task.

" @@ -133,7 +136,7 @@ } }, "InternalServiceException": { - "base": "

The request processing has failed because of an unknown error, exception or failure.

", + "base": "

The request processing has failed because of an unknown error, exception, or failure.

", "refs": { } }, @@ -145,6 +148,16 @@ "GetQuantumTaskResponse$deviceParameters": "

The parameters for the device on which the task ran.

" } }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "Long": { "base": null, "refs": { @@ -190,16 +203,16 @@ "SearchDevicesFilter": { "base": "

The filter to use for searching devices.

", "refs": { - "SearchDevicesRequestfiltersSearchDevicesFilterList$member": null + "SearchDevicesRequestFiltersList$member": null } }, - "SearchDevicesFilternameString": { + "SearchDevicesFilterNameString": { "base": null, "refs": { "SearchDevicesFilter$name": "

The name to use to filter results.

" } }, - "SearchDevicesFiltervaluesString256List": { + "SearchDevicesFilterValuesList": { "base": null, "refs": { "SearchDevicesFilter$values": "

The values to use to filter results.

" @@ -210,13 +223,13 @@ "refs": { } }, - "SearchDevicesRequestfiltersSearchDevicesFilterList": { + "SearchDevicesRequestFiltersList": { "base": null, "refs": { "SearchDevicesRequest$filters": "

The filter values to use to search for a device.

" } }, - "SearchDevicesRequestmaxResultsInteger": { + "SearchDevicesRequestMaxResultsInteger": { "base": null, "refs": { "SearchDevicesRequest$maxResults": "

The maximum number of results to return in the response.

" @@ -230,7 +243,7 @@ "SearchQuantumTasksFilter": { "base": "

A filter to use to search for tasks.

", "refs": { - "SearchQuantumTasksRequestfiltersSearchQuantumTasksFilterList$member": null + "SearchQuantumTasksRequestFiltersList$member": null } }, "SearchQuantumTasksFilterOperator": { @@ -239,7 +252,7 @@ "SearchQuantumTasksFilter$operator": "

An operator to use in the filter.

" } }, - "SearchQuantumTasksFiltervaluesString256List": { + "SearchQuantumTasksFilterValuesList": { "base": null, "refs": { "SearchQuantumTasksFilter$values": "

The values to use for the filter.

" @@ -250,13 +263,13 @@ "refs": { } }, - "SearchQuantumTasksRequestfiltersSearchQuantumTasksFilterList": { + "SearchQuantumTasksRequestFiltersList": { "base": null, "refs": { "SearchQuantumTasksRequest$filters": "

Array of SearchQuantumTasksFilter objects.

" } }, - "SearchQuantumTasksRequestmaxResultsInteger": { + "SearchQuantumTasksRequestMaxResultsInteger": { "base": null, "refs": { "SearchQuantumTasksRequest$maxResults": "

Maximum number of results to return in the response.

" @@ -268,7 +281,7 @@ } }, "ServiceQuotaExceededException": { - "base": "

The request failed because a service quota is met.

", + "base": "

The request failed because a service quota is exceeded.

", "refs": { } }, @@ -286,6 +299,7 @@ "GetQuantumTaskResponse$outputS3Bucket": "

The S3 bucket where task results are stored.

", "GetQuantumTaskResponse$outputS3Directory": "

The folder in the S3 bucket where task results are stored.

", "InternalServiceException$message": null, + "ListTagsForResourceRequest$resourceArn": "

Specify the resourceArn for the resource whose tags to display.

", "QuantumTaskSummary$outputS3Bucket": "

The S3 bucket where the task result file is stored..

", "QuantumTaskSummary$outputS3Directory": "

The folder in the S3 bucket where the task result file is stored.

", "ResourceNotFoundException$message": null, @@ -294,15 +308,20 @@ "SearchQuantumTasksRequest$nextToken": "

A token used for pagination of results returned in the response. Use the token returned from the previous request continue results where the previous request ended.

", "SearchQuantumTasksResponse$nextToken": "

A token used for pagination of results, or null if there are no additional results. Use the token value in a subsequent request to continue results where the previous request ended.

", "ServiceQuotaExceededException$message": null, + "TagKeys$member": null, + "TagResourceRequest$resourceArn": "

Specify the resourceArn of the resource to which a tag will be added.

", + "TagsMap$key": null, + "TagsMap$value": null, "ThrottlingException$message": null, + "UntagResourceRequest$resourceArn": "

Specify the resourceArn for the resource from which to remove the tags.

", "ValidationException$message": null } }, "String256": { "base": null, "refs": { - "SearchDevicesFiltervaluesString256List$member": null, - "SearchQuantumTasksFiltervaluesString256List$member": null + "SearchDevicesFilterValuesList$member": null, + "SearchQuantumTasksFilterValuesList$member": null } }, "String64": { @@ -322,11 +341,47 @@ "QuantumTaskSummary$endedAt": "

The time at which the task finished.

" } }, + "TagKeys": { + "base": null, + "refs": { + "UntagResourceRequest$tagKeys": "

pecify the keys for the tags to remove from the resource.

" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagsMap": { + "base": null, + "refs": { + "CreateQuantumTaskRequest$tags": "

Tags to be added to the quantum task you're creating.

", + "GetQuantumTaskResponse$tags": "

The tags that belong to this task.

", + "ListTagsForResourceResponse$tags": "

Displays the key, value pairs of tags associated with this resource.

", + "QuantumTaskSummary$tags": "

Displays the key, value pairs of tags associated with this quantum task.

", + "TagResourceRequest$tags": "

Specify the tags to add to the resource.

" + } + }, "ThrottlingException": { "base": "

The throttling rate limit is met.

", "refs": { } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, "ValidationException": { "base": "

The input fails to satisfy the constraints specified by an AWS service.

", "refs": { diff --git a/models/apis/dms/2016-01-01/api-2.json b/models/apis/dms/2016-01-01/api-2.json index 21b6fd77175..50b9382231d 100644 --- a/models/apis/dms/2016-01-01/api-2.json +++ b/models/apis/dms/2016-01-01/api-2.json @@ -919,7 +919,8 @@ "SybaseSettings":{"shape":"SybaseSettings"}, "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, "IBMDb2Settings":{"shape":"IBMDb2Settings"}, - "ResourceIdentifier":{"shape":"String"} + "ResourceIdentifier":{"shape":"String"}, + "DocDbSettings":{"shape":"DocDbSettings"} } }, "CreateEndpointResponse":{ @@ -1508,6 +1509,20 @@ "BucketName":{"shape":"String"} } }, + "DocDbSettings":{ + "type":"structure", + "members":{ + "Username":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "NestingLevel":{"shape":"NestingLevelValue"}, + "ExtractDocId":{"shape":"BooleanOptional"}, + "DocsToInvestigate":{"shape":"IntegerOptional"}, + "KmsKeyId":{"shape":"String"} + } + }, "DynamoDbSettings":{ "type":"structure", "required":["ServiceAccessRoleArn"], @@ -1577,7 +1592,8 @@ "OracleSettings":{"shape":"OracleSettings"}, "SybaseSettings":{"shape":"SybaseSettings"}, "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, - "IBMDb2Settings":{"shape":"IBMDb2Settings"} + "IBMDb2Settings":{"shape":"IBMDb2Settings"}, + "DocDbSettings":{"shape":"DocDbSettings"} } }, "EndpointList":{ @@ -1880,7 +1896,8 @@ "OracleSettings":{"shape":"OracleSettings"}, "SybaseSettings":{"shape":"SybaseSettings"}, "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, - "IBMDb2Settings":{"shape":"IBMDb2Settings"} + "IBMDb2Settings":{"shape":"IBMDb2Settings"}, + "DocDbSettings":{"shape":"DocDbSettings"} } }, "ModifyEndpointResponse":{ diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index d79810f5790..bfc8133cfb4 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -138,6 +138,7 @@ "CreateReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. This parameter defaults to true.

Default: true

", "CreateReplicationInstanceMessage$PubliclyAccessible": "

Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true.

", "DescribeReplicationTasksMessage$WithoutSettings": "

An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default).

", + "DocDbSettings$ExtractDocId": "

Specifies the document ID. Use this setting when NestingLevel is set to \"none\".

Default value is \"false\".

", "IBMDb2Settings$SetDataCaptureChanges": "

Enables ongoing replication (CDC) as a BOOLEAN value. The default is true.

", "KafkaSettings$IncludeTransactionDetails": "

Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). The default is false.

", "KafkaSettings$IncludePartitionValue": "

Shows the partition value within the Kafka message output, unless the partition type is schema-table-type. The default is false.

", @@ -185,7 +186,7 @@ "S3Settings$CdcInsertsOnly": "

A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.

If CdcInsertsOnly is set to true or y, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad is set to false, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports the interaction described preceding between the CdcInsertsOnly and IncludeOpForFullLoad parameters in versions 3.1.4 and later.

CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true for the same endpoint, but not both.

", "S3Settings$ParquetTimestampInMillisecond": "

A value that specifies the precision of any TIMESTAMP column values that are written to an Amazon S3 object file in .parquet format.

AWS DMS supports the ParquetTimestampInMillisecond parameter in versions 3.1.4 and later.

When ParquetTimestampInMillisecond is set to true or y, AWS DMS writes all TIMESTAMP columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes them with microsecond precision.

Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP values. Set this parameter to true for S3 endpoint object files that are .parquet formatted only if you plan to query or process the data with Athena or AWS Glue.

AWS DMS writes any TIMESTAMP column values written to an S3 file in .csv format with microsecond precision.

Setting ParquetTimestampInMillisecond has no effect on the string format of the timestamp column value that is inserted by setting the TimestampColumnName parameter.

", "S3Settings$CdcInsertsAndUpdates": "

A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet (columnar storage) output files. The default setting is false, but when CdcInsertsAndUpdates is set to true or y, only INSERTs and UPDATEs from the source database are migrated to the .csv or .parquet file.

For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad parameter. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to either I or U to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad is set to false, CDC records are written without an indication of INSERT or UPDATE operations at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports the use of the CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true for the same endpoint, but not both.

", - "S3Settings$DatePartitionEnabled": "

When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false. For more information about date-based folder partitoning, see Using date-based folder partitioning

", + "S3Settings$DatePartitionEnabled": "

When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false. For more information about date-based folder partitoning, see Using date-based folder partitioning.

", "TableStatistics$FullLoadReloaded": "

A value that indicates if the table was reloaded (true) or loaded as part of a new full load operation (false).

" } }, @@ -620,6 +621,14 @@ "ModifyEndpointMessage$DmsTransferSettings": "

The settings in JSON format for the DMS transfer type of source endpoint.

Attributes include the following:

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" } }, + "DocDbSettings": { + "base": "

Provides information that defines a DocumentDB endpoint.

", + "refs": { + "CreateEndpointMessage$DocDbSettings": null, + "Endpoint$DocDbSettings": null, + "ModifyEndpointMessage$DocDbSettings": "

Settings in JSON format for the source DocumentDB endpoint. For more information about the available settings, see the configuration properties section in Using DocumentDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + } + }, "DynamoDbSettings": { "base": "

Provides the Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role used to define an Amazon DynamoDB target endpoint.

", "refs": { @@ -856,6 +865,8 @@ "DescribeReplicationTasksMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeSchemasMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeTableStatisticsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 500.

", + "DocDbSettings$Port": "

The port value for the DocumentDB source endpoint.

", + "DocDbSettings$DocsToInvestigate": "

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

", "ElasticsearchSettings$FullLoadErrorPercentage": "

The maximum percentage of records that can fail to be written before a full load operation stops.

To avoid early failure, this counter is only effective after 1000 records are transferred. Elasticsearch also has the concept of error monitoring during the last 10 minutes of an Observation Window. If transfer of all records fail in the last 10 minutes, the full load operation stops.

", "ElasticsearchSettings$ErrorRetryDuration": "

The maximum number of seconds for which DMS retries failed API requests to the Elasticsearch cluster.

", "Endpoint$Port": "

The port value used to access the endpoint.

", @@ -1099,6 +1110,7 @@ "NestingLevelValue": { "base": null, "refs": { + "DocDbSettings$NestingLevel": "

Specifies either document or table mode.

Default value is \"none\". Specify \"none\" to use document mode. Specify \"one\" to use table mode.

", "MongoDbSettings$NestingLevel": "

Specifies either document or table mode.

Default value is \"none\". Specify \"none\" to use document mode. Specify \"one\" to use table mode.

" } }, @@ -1443,6 +1455,7 @@ "base": null, "refs": { "CreateEndpointMessage$Password": "

The password to be used to log in to the endpoint database.

", + "DocDbSettings$Password": "

The password for the user account you use to access the DocumentDB source endpoint.

", "IBMDb2Settings$Password": "

Endpoint connection password.

", "MicrosoftSQLServerSettings$Password": "

Endpoint connection password.

", "ModifyEndpointMessage$Password": "

The password to be used to login to the endpoint database.

", @@ -1642,6 +1655,10 @@ "DescribeTableStatisticsResponse$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DmsTransferSettings$ServiceAccessRoleArn": "

The IAM role that has permission to access the Amazon S3 bucket.

", "DmsTransferSettings$BucketName": "

The name of the S3 bucket to use.

", + "DocDbSettings$Username": "

The user name you use to access the DocumentDB source endpoint.

", + "DocDbSettings$ServerName": "

The name of the server on the DocumentDB source endpoint.

", + "DocDbSettings$DatabaseName": "

The database name on the DocumentDB source endpoint.

", + "DocDbSettings$KmsKeyId": "

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

", "DynamoDbSettings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) used by the service access IAM role.

", "ElasticsearchSettings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) used by service to access the IAM role.

", "ElasticsearchSettings$EndpointUri": "

The endpoint for the Elasticsearch cluster. AWS DMS uses HTTPS if a transport protocol (http/https) is not specified.

", diff --git a/models/apis/elasticache/2015-02-02/docs-2.json b/models/apis/elasticache/2015-02-02/docs-2.json index cf3e8301fe8..2cda2468edc 100644 --- a/models/apis/elasticache/2015-02-02/docs-2.json +++ b/models/apis/elasticache/2015-02-02/docs-2.json @@ -15,8 +15,8 @@ "CreateGlobalReplicationGroup": "

Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.

", "CreateReplicationGroup": "

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global Datastore.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide.

This operation is valid for Redis only.

", "CreateSnapshot": "

Creates a copy of an entire cluster or replication group at a specific moment in time.

This operation is valid for Redis only.

", - "CreateUser": "

For Redis engine version 6.04 onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC).

", - "CreateUserGroup": "

For Redis engine version 6.04 onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC)

", + "CreateUser": "

For Redis engine version 6.x onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC).

", + "CreateUserGroup": "

For Redis engine version 6.x onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC)

", "DecreaseNodeGroupsInGlobalReplicationGroup": "

Decreases the number of node groups in a Global Datastore

", "DecreaseReplicaCount": "

Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

", "DeleteCacheCluster": "

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation.

This operation is not valid for:

", @@ -66,7 +66,7 @@ "ResetCacheParameterGroup": "

Modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the ResetAllParameters and CacheParameterGroupName parameters.

", "RevokeCacheSecurityGroupIngress": "

Revokes ingress from a cache security group. Use this operation to disallow access from an Amazon EC2 security group that had been previously authorized.

", "StartMigration": "

Start the migration of data.

", - "TestFailover": "

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" + "TestFailover": "

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" }, "shapes": { "APICallRateForCustomerExceededFault": { @@ -312,7 +312,7 @@ } }, "CacheNode": { - "base": "

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "base": "

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "refs": { "CacheNodeList$member": null } @@ -1901,7 +1901,7 @@ "BatchStopUpdateActionMessage$ServiceUpdateName": "

The unique ID of the service update

", "CacheCluster$CacheClusterId": "

The user-supplied identifier of the cluster. This identifier is a unique key that identifies a cluster.

", "CacheCluster$ClientDownloadLandingPage": "

The URL of the web page where you can download the latest ElastiCache client library.

", - "CacheCluster$CacheNodeType": "

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "CacheCluster$CacheNodeType": "

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "CacheCluster$Engine": "

The name of the cache engine (memcached or redis) to be used for this cluster.

", "CacheCluster$EngineVersion": "

The version of the cache engine that is used in this cluster.

", "CacheCluster$CacheClusterStatus": "

The current state of this cluster, one of the following values: available, creating, deleted, deleting, incompatible-network, modifying, rebooting cluster nodes, restore-failed, or snapshotting.

", @@ -1916,7 +1916,7 @@ "CacheClusterMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "CacheEngineVersion$Engine": "

The name of the cache engine.

", "CacheEngineVersion$EngineVersion": "

The version number of the cache engine.

", - "CacheEngineVersion$CacheParameterGroupFamily": "

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

", + "CacheEngineVersion$CacheParameterGroupFamily": "

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 |

", "CacheEngineVersion$CacheEngineDescription": "

The description of the cache engine.

", "CacheEngineVersion$CacheEngineVersionDescription": "

The description of the cache engine version.

", "CacheEngineVersionMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", @@ -1937,7 +1937,7 @@ "CacheNodeTypeSpecificValue$Value": "

The value for the cache node type.

", "CacheNodeUpdateStatus$CacheNodeId": "

The node ID of the cache cluster

", "CacheParameterGroup$CacheParameterGroupName": "

The name of the cache parameter group.

", - "CacheParameterGroup$CacheParameterGroupFamily": "

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

", + "CacheParameterGroup$CacheParameterGroupFamily": "

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 |

", "CacheParameterGroup$Description": "

The description for this cache parameter group.

", "CacheParameterGroup$ARN": "

The ARN (Amazon Resource Name) of the cache parameter group.

", "CacheParameterGroupDetails$Marker": "

Provides an identifier to allow retrieval of paginated results.

", @@ -1962,12 +1962,12 @@ "CompleteMigrationMessage$ReplicationGroupId": "

The ID of the replication group to which data is being migrated.

", "CopySnapshotMessage$SourceSnapshotName": "

The name of an existing snapshot from which to make a copy.

", "CopySnapshotMessage$TargetSnapshotName": "

A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot, therefore this name must be unique within its context - ElastiCache or an Amazon S3 bucket if exporting.

", - "CopySnapshotMessage$TargetBucket": "

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

", + "CopySnapshotMessage$TargetBucket": "

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

", "CopySnapshotMessage$KmsKeyId": "

The ID of the KMS key used to encrypt the target snapshot.

", "CreateCacheClusterMessage$CacheClusterId": "

The node group (shard) identifier. This parameter is stored as a lowercase string.

Constraints:

", "CreateCacheClusterMessage$ReplicationGroupId": "

The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.

If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.

This parameter is only valid if the Engine parameter is redis.

", "CreateCacheClusterMessage$PreferredAvailabilityZone": "

The EC2 Availability Zone in which the cluster is created.

All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones.

Default: System chosen Availability Zone.

", - "CreateCacheClusterMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "CreateCacheClusterMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "CreateCacheClusterMessage$Engine": "

The name of the cache engine to be used for this cluster.

Valid values for this parameter are: memcached | redis

", "CreateCacheClusterMessage$EngineVersion": "

The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

", "CreateCacheClusterMessage$CacheParameterGroupName": "

The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.

", @@ -1979,7 +1979,7 @@ "CreateCacheClusterMessage$AuthToken": "

Reserved parameter. The password used to access a password protected server.

Password constraints:

For more information, see AUTH password at http://redis.io/commands/AUTH.

", "CreateCacheClusterMessage$PreferredOutpostArn": "

The outpost ARN in which the cache cluster is created.

", "CreateCacheParameterGroupMessage$CacheParameterGroupName": "

A user-specified name for the cache parameter group.

", - "CreateCacheParameterGroupMessage$CacheParameterGroupFamily": "

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

", + "CreateCacheParameterGroupMessage$CacheParameterGroupFamily": "

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 |

", "CreateCacheParameterGroupMessage$Description": "

A user-specified description for the cache parameter group.

", "CreateCacheSecurityGroupMessage$CacheSecurityGroupName": "

A name for the cache security group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 alphanumeric characters. Cannot be the word \"Default\".

Example: mysecuritygroup

", "CreateCacheSecurityGroupMessage$Description": "

A description for the cache security group.

", @@ -1992,7 +1992,7 @@ "CreateReplicationGroupMessage$ReplicationGroupDescription": "

A user-created description for the replication group.

", "CreateReplicationGroupMessage$GlobalReplicationGroupId": "

The name of the Global Datastore

", "CreateReplicationGroupMessage$PrimaryClusterId": "

The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available.

This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified.

", - "CreateReplicationGroupMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "CreateReplicationGroupMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "CreateReplicationGroupMessage$Engine": "

The name of the cache engine to be used for the clusters in this replication group.

", "CreateReplicationGroupMessage$EngineVersion": "

The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

", "CreateReplicationGroupMessage$CacheParameterGroupName": "

The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.

If you are restoring to an engine version that is different than the original, you must specify the default version of that version. For example, CacheParameterGroupName=default.redis4.0.

If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.

", @@ -2025,7 +2025,7 @@ "DescribeCacheClustersMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeCacheEngineVersionsMessage$Engine": "

The cache engine to return. Valid values: memcached | redis

", "DescribeCacheEngineVersionsMessage$EngineVersion": "

The cache engine version to return.

Example: 1.4.14

", - "DescribeCacheEngineVersionsMessage$CacheParameterGroupFamily": "

The name of a specific cache parameter group family to return details for.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

Constraints:

", + "DescribeCacheEngineVersionsMessage$CacheParameterGroupFamily": "

The name of a specific cache parameter group family to return details for.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 |

Constraints:

", "DescribeCacheEngineVersionsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeCacheParameterGroupsMessage$CacheParameterGroupName": "

The name of a specific cache parameter group to return details for.

", "DescribeCacheParameterGroupsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", @@ -2036,7 +2036,7 @@ "DescribeCacheSecurityGroupsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeCacheSubnetGroupsMessage$CacheSubnetGroupName": "

The name of the cache subnet group to return details for.

", "DescribeCacheSubnetGroupsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", - "DescribeEngineDefaultParametersMessage$CacheParameterGroupFamily": "

The name of the cache parameter group family.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

", + "DescribeEngineDefaultParametersMessage$CacheParameterGroupFamily": "

The name of the cache parameter group family.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 |

", "DescribeEngineDefaultParametersMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeEventsMessage$SourceIdentifier": "

The identifier of the event source for which events are returned. If not specified, all sources are included in the response.

", "DescribeEventsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", @@ -2047,13 +2047,13 @@ "DescribeReplicationGroupsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReservedCacheNodesMessage$ReservedCacheNodeId": "

The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID.

", "DescribeReservedCacheNodesMessage$ReservedCacheNodesOfferingId": "

The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier.

", - "DescribeReservedCacheNodesMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "DescribeReservedCacheNodesMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "DescribeReservedCacheNodesMessage$Duration": "

The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration.

Valid Values: 1 | 3 | 31536000 | 94608000

", "DescribeReservedCacheNodesMessage$ProductDescription": "

The product description filter value. Use this parameter to show only those reservations matching the specified product description.

", - "DescribeReservedCacheNodesMessage$OfferingType": "

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

", + "DescribeReservedCacheNodesMessage$OfferingType": "

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"|\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"

", "DescribeReservedCacheNodesMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReservedCacheNodesOfferingsMessage$ReservedCacheNodesOfferingId": "

The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.

Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

", - "DescribeReservedCacheNodesOfferingsMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "DescribeReservedCacheNodesOfferingsMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "DescribeReservedCacheNodesOfferingsMessage$Duration": "

Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration.

Valid Values: 1 | 3 | 31536000 | 94608000

", "DescribeReservedCacheNodesOfferingsMessage$ProductDescription": "

The product description filter value. Use this parameter to show only the available offerings matching the specified product description.

", "DescribeReservedCacheNodesOfferingsMessage$OfferingType": "

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

", @@ -2081,7 +2081,7 @@ "EC2SecurityGroup$EC2SecurityGroupName": "

The name of the Amazon EC2 security group.

", "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

The AWS account ID of the Amazon EC2 security group owner.

", "Endpoint$Address": "

The DNS hostname of the cache node.

", - "EngineDefaults$CacheParameterGroupFamily": "

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

", + "EngineDefaults$CacheParameterGroupFamily": "

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 |

", "EngineDefaults$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "Event$SourceIdentifier": "

The identifier for the source of the event. For example, if the event occurred at the cluster level, the identifier would be the name of the cluster.

", "Event$Message": "

The text of the event.

", @@ -2097,7 +2097,7 @@ "GlobalReplicationGroup$Status": "

The status of the Global Datastore

", "GlobalReplicationGroup$CacheNodeType": "

The cache node type of the Global Datastore

", "GlobalReplicationGroup$Engine": "

The Elasticache engine. For Redis only.

", - "GlobalReplicationGroup$EngineVersion": "

The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 only.

", + "GlobalReplicationGroup$EngineVersion": "

The Elasticache Redis engine version.

", "GlobalReplicationGroup$ARN": "

The ARN (Amazon Resource Name) of the global replication group.

", "GlobalReplicationGroupInfo$GlobalReplicationGroupId": "

The name of the Global Datastore

", "GlobalReplicationGroupInfo$GlobalReplicationGroupMemberRole": "

The role of the replication group in a Global Datastore. Can be primary or secondary.

", @@ -2204,14 +2204,14 @@ "ReplicationGroupPendingModifiedValues$PrimaryClusterId": "

The primary cluster ID that is applied immediately (if --apply-immediately was specified), or during the next maintenance window.

", "ReservedCacheNode$ReservedCacheNodeId": "

The unique identifier for the reservation.

", "ReservedCacheNode$ReservedCacheNodesOfferingId": "

The offering identifier.

", - "ReservedCacheNode$CacheNodeType": "

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "ReservedCacheNode$CacheNodeType": "

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "ReservedCacheNode$ProductDescription": "

The description of the reserved cache node.

", "ReservedCacheNode$OfferingType": "

The offering type of this reserved cache node.

", "ReservedCacheNode$State": "

The state of the reserved cache node.

", "ReservedCacheNode$ReservationARN": "

The Amazon Resource Name (ARN) of the reserved cache node.

Example: arn:aws:elasticache:us-east-1:123456789012:reserved-instance:ri-2017-03-27-08-33-25-582

", "ReservedCacheNodeMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "ReservedCacheNodesOffering$ReservedCacheNodesOfferingId": "

A unique identifier for the reserved cache node offering.

", - "ReservedCacheNodesOffering$CacheNodeType": "

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "ReservedCacheNodesOffering$CacheNodeType": "

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "ReservedCacheNodesOffering$ProductDescription": "

The cache engine used by the offering.

", "ReservedCacheNodesOffering$OfferingType": "

The offering type.

", "ReservedCacheNodesOfferingMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", @@ -2234,7 +2234,7 @@ "Snapshot$CacheClusterId": "

The user-supplied identifier of the source cluster.

", "Snapshot$SnapshotStatus": "

The status of the snapshot. Valid values: creating | available | restoring | copying | deleting.

", "Snapshot$SnapshotSource": "

Indicates whether the snapshot is from an automatic backup (automated) or was created manually (manual).

", - "Snapshot$CacheNodeType": "

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "Snapshot$CacheNodeType": "

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "Snapshot$Engine": "

The name of the cache engine (memcached or redis) used by the source cluster.

", "Snapshot$EngineVersion": "

The version of the cache engine version that is used by the source cluster.

", "Snapshot$PreferredAvailabilityZone": "

The name of the Availability Zone in which the source cluster is located.

", diff --git a/models/apis/imagebuilder/2019-12-02/api-2.json b/models/apis/imagebuilder/2019-12-02/api-2.json index 1d98b94f4f3..147b3e594ae 100644 --- a/models/apis/imagebuilder/2019-12-02/api-2.json +++ b/models/apis/imagebuilder/2019-12-02/api-2.json @@ -791,7 +791,7 @@ "AccountList":{ "type":"list", "member":{"shape":"AccountId"}, - "max":50, + "max":1536, "min":1 }, "Ami":{ @@ -1899,7 +1899,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListComponentBuildVersionsResponse":{ @@ -1907,7 +1907,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "componentSummaryList":{"shape":"ComponentSummaryList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListComponentsRequest":{ @@ -1919,7 +1919,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListComponentsResponse":{ @@ -1927,7 +1927,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "componentVersionList":{"shape":"ComponentVersionList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListDistributionConfigurationsRequest":{ @@ -1938,7 +1938,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListDistributionConfigurationsResponse":{ @@ -1946,7 +1946,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "distributionConfigurationSummaryList":{"shape":"DistributionConfigurationSummaryList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImageBuildVersionsRequest":{ @@ -1959,7 +1959,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImageBuildVersionsResponse":{ @@ -1967,7 +1967,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "imageSummaryList":{"shape":"ImageSummaryList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImagePipelineImagesRequest":{ @@ -1980,7 +1980,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImagePipelineImagesResponse":{ @@ -1988,7 +1988,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "imageSummaryList":{"shape":"ImageSummaryList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImagePipelinesRequest":{ @@ -1999,7 +1999,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImagePipelinesResponse":{ @@ -2007,7 +2007,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "imagePipelineList":{"shape":"ImagePipelineList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImageRecipesRequest":{ @@ -2019,7 +2019,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImageRecipesResponse":{ @@ -2027,7 +2027,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "imageRecipeSummaryList":{"shape":"ImageRecipeSummaryList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImagesRequest":{ @@ -2039,7 +2039,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListImagesResponse":{ @@ -2047,7 +2047,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "imageVersionList":{"shape":"ImageVersionList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListInfrastructureConfigurationsRequest":{ @@ -2058,7 +2058,7 @@ "shape":"RestrictedInteger", "box":true }, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListInfrastructureConfigurationsResponse":{ @@ -2066,7 +2066,7 @@ "members":{ "requestId":{"shape":"NonEmptyString"}, "infrastructureConfigurationSummaryList":{"shape":"InfrastructureConfigurationSummaryList"}, - "nextToken":{"shape":"NonEmptyString"} + "nextToken":{"shape":"PaginationToken"} } }, "ListTagsForResourceRequest":{ @@ -2122,6 +2122,11 @@ "Amazon" ] }, + "PaginationToken":{ + "type":"string", + "max":65535, + "min":1 + }, "PipelineExecutionStartCondition":{ "type":"string", "enum":[ diff --git a/models/apis/imagebuilder/2019-12-02/docs-2.json b/models/apis/imagebuilder/2019-12-02/docs-2.json index c2faff90aa2..91df316e221 100644 --- a/models/apis/imagebuilder/2019-12-02/docs-2.json +++ b/models/apis/imagebuilder/2019-12-02/docs-2.json @@ -1023,9 +1023,9 @@ "Ami$region": "

The AWS Region of the EC2 AMI.

", "Ami$image": "

The AMI ID of the EC2 AMI.

", "Ami$name": "

The name of the EC2 AMI.

", - "Ami$description": "

The description of the EC2 AMI.

", + "Ami$description": "

The description of the EC2 AMI. Minimum and maximum length are in characters.

", "Ami$accountId": "

The account ID of the owner of the AMI.

", - "AmiDistributionConfiguration$description": "

The description of the distribution configuration.

", + "AmiDistributionConfiguration$description": "

The description of the distribution configuration. Minimum and maximum length are in characters.

", "AmiDistributionConfiguration$kmsKeyId": "

The KMS key identifier used to encrypt the distributed image.

", "CancelImageCreationResponse$requestId": "

The request ID that uniquely identifies this request.

", "Component$description": "

The description of the component.

", @@ -1098,33 +1098,15 @@ "InfrastructureConfigurationSummary$description": "

The description of the infrastructure configuration.

", "InstanceBlockDeviceMapping$deviceName": "

The device to which these mappings apply.

", "InstanceBlockDeviceMapping$virtualName": "

Use to manage instance ephemeral devices.

", - "ListComponentBuildVersionsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListComponentBuildVersionsResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListComponentBuildVersionsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListComponentsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListComponentsResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListComponentsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListDistributionConfigurationsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListDistributionConfigurationsResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListDistributionConfigurationsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListImageBuildVersionsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListImageBuildVersionsResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListImageBuildVersionsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListImagePipelineImagesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListImagePipelineImagesResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListImagePipelineImagesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListImagePipelinesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListImagePipelinesResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListImagePipelinesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListImageRecipesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListImageRecipesResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListImageRecipesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListImagesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListImagesResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListImagesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", - "ListInfrastructureConfigurationsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", "ListInfrastructureConfigurationsResponse$requestId": "

The request ID that uniquely identifies this request.

", - "ListInfrastructureConfigurationsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", "PutComponentPolicyResponse$requestId": "

The request ID that uniquely identifies this request.

", "PutImagePolicyResponse$requestId": "

The request ID that uniquely identifies this request.

", "PutImageRecipePolicyResponse$requestId": "

The request ID that uniquely identifies this request.

", @@ -1195,6 +1177,29 @@ "ListImagesRequest$owner": "

The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to view images owned by yourself, by Amazon, or those images that have been shared with you by other customers.

" } }, + "PaginationToken": { + "base": null, + "refs": { + "ListComponentBuildVersionsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListComponentBuildVersionsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListComponentsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListComponentsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListDistributionConfigurationsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListDistributionConfigurationsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListImageBuildVersionsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListImageBuildVersionsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListImagePipelineImagesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListImagePipelineImagesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListImagePipelinesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListImagePipelinesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListImageRecipesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListImageRecipesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListImagesRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListImagesResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

", + "ListInfrastructureConfigurationsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "ListInfrastructureConfigurationsResponse$nextToken": "

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + } + }, "PipelineExecutionStartCondition": { "base": null, "refs": { diff --git a/models/apis/macie2/2020-01-01/api-2.json b/models/apis/macie2/2020-01-01/api-2.json index be7fc8abc40..c2cca817862 100644 --- a/models/apis/macie2/2020-01-01/api-2.json +++ b/models/apis/macie2/2020-01-01/api-2.json @@ -2677,6 +2677,10 @@ "shape": "__listOf__string", "locationName": "eq" }, + "eqExactMatch": { + "shape": "__listOf__string", + "locationName": "eqExactMatch" + }, "gt": { "shape": "__long", "locationName": "gt" diff --git a/models/apis/macie2/2020-01-01/docs-2.json b/models/apis/macie2/2020-01-01/docs-2.json index fb6c3b14b9d..30186ff112e 100644 --- a/models/apis/macie2/2020-01-01/docs-2.json +++ b/models/apis/macie2/2020-01-01/docs-2.json @@ -1438,6 +1438,7 @@ "CreateCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters. Keywords aren't case sensitive.

", "CreateInvitationsRequest$AccountIds" : "

An array that lists AWS account IDs, one for each account to send the invitation to.

", "CriterionAdditionalProperties$Eq" : "

An equal to condition to apply to a specified property value for findings.

", + "CriterionAdditionalProperties$EqExactMatch" : "

A condition that requires an array field on a finding to exactly match the specified property values. You can use this operator with the following properties:

", "CriterionAdditionalProperties$Neq" : "

A not equal to condition to apply to a specified property value for findings.

", "DeclineInvitationsRequest$AccountIds" : "

An array that lists AWS account IDs, one for each account that sent an invitation to decline.

", "DeleteInvitationsRequest$AccountIds" : "

An array that lists AWS account IDs, one for each account that sent an invitation to delete.

", diff --git a/models/apis/medialive/2017-10-14/api-2.json b/models/apis/medialive/2017-10-14/api-2.json index 003ef6acd10..3519dc5344c 100644 --- a/models/apis/medialive/2017-10-14/api-2.json +++ b/models/apis/medialive/2017-10-14/api-2.json @@ -2644,6 +2644,14 @@ "AutomaticInputFailoverSettings": { "type": "structure", "members": { + "ErrorClearTimeMsec": { + "shape": "__integerMin1", + "locationName": "errorClearTimeMsec" + }, + "FailoverConditions": { + "shape": "__listOfFailoverCondition", + "locationName": "failoverConditions" + }, "InputPreference": { "shape": "InputPreference", "locationName": "inputPreference" @@ -5514,6 +5522,24 @@ "TimecodeConfig" ] }, + "FailoverCondition": { + "type": "structure", + "members": { + "FailoverConditionSettings": { + "shape": "FailoverConditionSettings", + "locationName": "failoverConditionSettings" + } + } + }, + "FailoverConditionSettings": { + "type": "structure", + "members": { + "InputLossSettings": { + "shape": "InputLossFailoverSettings", + "locationName": "inputLossSettings" + } + } + }, "FeatureActivations": { "type": "structure", "members": { @@ -6528,6 +6554,13 @@ "SUBDIRECTORY_PER_STREAM" ] }, + "HlsDiscontinuityTags": { + "type": "string", + "enum": [ + "INSERT", + "NEVER_INSERT" + ] + }, "HlsEncryptionType": { "type": "string", "enum": [ @@ -6586,6 +6619,10 @@ "shape": "HlsDirectoryStructure", "locationName": "directoryStructure" }, + "DiscontinuityTags": { + "shape": "HlsDiscontinuityTags", + "locationName": "discontinuityTags" + }, "EncryptionType": { "shape": "HlsEncryptionType", "locationName": "encryptionType" @@ -6602,6 +6639,10 @@ "shape": "IFrameOnlyPlaylistType", "locationName": "iFrameOnlyPlaylists" }, + "IncompleteSegmentBehavior": { + "shape": "HlsIncompleteSegmentBehavior", + "locationName": "incompleteSegmentBehavior" + }, "IndexNSegments": { "shape": "__integerMin3", "locationName": "indexNSegments" @@ -6729,6 +6770,13 @@ "ENABLED" ] }, + "HlsIncompleteSegmentBehavior": { + "type": "string", + "enum": [ + "AUTO", + "SUPPRESS" + ] + }, "HlsInputSettings": { "type": "structure", "members": { @@ -7493,6 +7541,15 @@ } } }, + "InputLossFailoverSettings": { + "type": "structure", + "members": { + "InputLossThresholdMsec": { + "shape": "__integerMin100", + "locationName": "inputLossThresholdMsec" + } + } + }, "InputLossImageType": { "type": "string", "enum": [ @@ -12155,6 +12212,10 @@ "type": "integer", "min": 1 }, + "__integerMin100": { + "type": "integer", + "min": 100 + }, "__integerMin1000": { "type": "integer", "min": 1000 @@ -12363,6 +12424,12 @@ "shape": "ChannelSummary" } }, + "__listOfFailoverCondition": { + "type": "list", + "member": { + "shape": "FailoverCondition" + } + }, "__listOfHlsAdMarkers": { "type": "list", "member": { diff --git a/models/apis/medialive/2017-10-14/docs-2.json b/models/apis/medialive/2017-10-14/docs-2.json index a74fe003b9b..d97a8bc35bc 100644 --- a/models/apis/medialive/2017-10-14/docs-2.json +++ b/models/apis/medialive/2017-10-14/docs-2.json @@ -861,6 +861,18 @@ "UpdateChannel$EncoderSettings": "The encoder settings for this channel." } }, + "FailoverCondition": { + "base": "Failover Condition settings. There can be multiple failover conditions inside AutomaticInputFailoverSettings.", + "refs": { + "__listOfFailoverCondition$member": null + } + }, + "FailoverConditionSettings": { + "base": "Settings for one failover condition.", + "refs": { + "FailoverCondition$FailoverConditionSettings": "Failover condition type-specific settings." + } + }, "FeatureActivations": { "base": "Feature Activations", "refs": { @@ -1293,6 +1305,12 @@ "HlsGroupSettings$DirectoryStructure": "Place segments in subdirectories." } }, + "HlsDiscontinuityTags": { + "base": "Hls Discontinuity Tags", + "refs": { + "HlsGroupSettings$DiscontinuityTags": "Specifies whether to insert EXT-X-DISCONTINUITY tags in the HLS child manifests for this output group.\nTypically, choose Insert because these tags are required in the manifest (according to the HLS specification) and serve an important purpose.\nChoose Never Insert only if the downstream system is doing real-time failover (without using the MediaLive automatic failover feature) and only if that downstream system has advised you to exclude the tags." + } + }, "HlsEncryptionType": { "base": "Hls Encryption Type", "refs": { @@ -1323,6 +1341,12 @@ "HlsGroupSettings$HlsId3SegmentTagging": "State of HLS ID3 Segment Tagging" } }, + "HlsIncompleteSegmentBehavior": { + "base": "Hls Incomplete Segment Behavior", + "refs": { + "HlsGroupSettings$IncompleteSegmentBehavior": "Specifies whether to include the final (incomplete) segment in the media output when the pipeline stops producing output because of a channel stop, a channel pause or a loss of input to the pipeline.\nAuto means that MediaLive decides whether to include the final segment, depending on the channel class and the types of output groups.\nSuppress means to never include the incomplete segment. We recommend you choose Auto and let MediaLive control the behavior." + } + }, "HlsInputSettings": { "base": "Hls Input Settings", "refs": { @@ -1673,6 +1697,12 @@ "GlobalConfiguration$InputLossBehavior": "Settings for system actions when input is lost." } }, + "InputLossFailoverSettings": { + "base": "MediaLive will perform a failover if content is not detected in this input for the specified period.", + "refs": { + "FailoverConditionSettings$InputLossSettings": "MediaLive will perform a failover if content is not detected in this input for the specified period." + } + }, "InputLossImageType": { "base": "Input Loss Image Type", "refs": { @@ -3381,6 +3411,7 @@ "refs": { "ArchiveGroupSettings$RolloverInterval": "Number of seconds to write to archive file before closing and starting a new one.", "AudioTrack$Track": "1-based integer value that maps to a specific audio track", + "AutomaticInputFailoverSettings$ErrorClearTimeMsec": "This clear time defines the requirement a recovered input must meet to be considered healthy. The input must have no failover conditions for this length of time. Enter a time in milliseconds. This value is particularly important if the input_preference for the failover pair is set to PRIMARY_INPUT_PREFERRED, because after this time, MediaLive will switch back to the primary input.", "DvbSubSourceSettings$Pid": "When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors.", "H264Settings$FramerateDenominator": "Framerate denominator.", "H264Settings$FramerateNumerator": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps.", @@ -3402,6 +3433,12 @@ "StaticImageActivateScheduleActionSettings$Width": "The width of the image when inserted into the video, in pixels. The overlay will be scaled up or down to the specified width. Leave blank to use the native width of the overlay." } }, + "__integerMin100": { + "base": null, + "refs": { + "InputLossFailoverSettings$InputLossThresholdMsec": "The amount of time (in milliseconds) that no input is detected. After that time, an input failover will occur." + } + }, "__integerMin1000": { "base": null, "refs": { @@ -3661,6 +3698,12 @@ "ListChannelsResultModel$Channels": null } }, + "__listOfFailoverCondition": { + "base": null, + "refs": { + "AutomaticInputFailoverSettings$FailoverConditions": "A list of failover conditions. If any of these conditions occur, MediaLive will perform a failover to the other input." + } + }, "__listOfHlsAdMarkers": { "base": null, "refs": { diff --git a/models/apis/sns/2010-03-31/docs-2.json b/models/apis/sns/2010-03-31/docs-2.json index 58c404a3c19..40a14bbb52e 100644 --- a/models/apis/sns/2010-03-31/docs-2.json +++ b/models/apis/sns/2010-03-31/docs-2.json @@ -28,7 +28,7 @@ "RemovePermission": "

Removes a statement from a topic's access control policy.

", "SetEndpointAttributes": "

Sets the attributes for an endpoint for a device on one of the supported push notification services, such as GCM (Firebase Cloud Messaging) and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

", "SetPlatformApplicationAttributes": "

Sets the attributes of the platform application object for the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging). For more information, see Using Amazon SNS Mobile Push Notifications. For information on configuring attributes for message delivery status, see Using Amazon SNS Application Attributes for Message Delivery Status.

", - "SetSMSAttributes": "

Use this request to set the default settings for sending SMS messages and receiving daily SMS usage reports.

You can override some of these settings for a single message when you use the Publish action with the MessageAttributes.entry.N parameter. For more information, see Sending an SMS Message in the Amazon SNS Developer Guide.

", + "SetSMSAttributes": "

Use this request to set the default settings for sending SMS messages and receiving daily SMS usage reports.

You can override some of these settings for a single message when you use the Publish action with the MessageAttributes.entry.N parameter. For more information, see Publishing to a mobile phone in the Amazon SNS Developer Guide.

", "SetSubscriptionAttributes": "

Allows a subscription owner to set an attribute of the subscription to a new value.

", "SetTopicAttributes": "

Allows a topic owner to set an attribute of the topic to a new value.

", "Subscribe": "

Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same AWS account, the endpoint owner must the ConfirmSubscription action to confirm the subscription.

You call the ConfirmSubscription action with the token from the subscription response. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

", @@ -369,7 +369,7 @@ } }, "MessageAttributeValue": { - "base": "

The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see Publish.

Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes). For more information, see Using Amazon SNS Message Attributes.

", + "base": "

The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see Publish.

Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes). For more information, see Amazon SNS message attributes and Publishing to a mobile phone in the Amazon SNS Developer Guide.

", "refs": { "MessageAttributeMap$value": null } @@ -607,8 +607,8 @@ "TopicAttributesMap": { "base": null, "refs": { - "CreateTopicInput$Attributes": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

The following attribute applies only to server-side-encryption:

The following attribute applies only to FIFO topics:

", - "GetTopicAttributesResponse$Attributes": "

A map of the topic's attributes. Attributes in this map include the following:

The following attribute applies only to server-side-encryption:

" + "CreateTopicInput$Attributes": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

The following attribute applies only to server-side-encryption:

The following attributes apply only to FIFO topics:

", + "GetTopicAttributesResponse$Attributes": "

A map of the topic's attributes. Attributes in this map include the following:

The following attribute applies only to server-side-encryption:

The following attributes apply only to FIFO topics:

" } }, "TopicLimitExceededException": { @@ -653,7 +653,7 @@ "base": null, "refs": { "SetSubscriptionAttributesInput$AttributeName": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

", - "SetTopicAttributesInput$AttributeName": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

The following attribute applies only to server-side-encryption:

The following attribute applies only to FIFO topics:

", + "SetTopicAttributesInput$AttributeName": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

The following attribute applies only to server-side-encryption:

The following attribute applies only to FIFO topics:

", "SubscriptionAttributesMap$key": null, "TopicAttributesMap$key": null } @@ -677,7 +677,7 @@ "base": null, "refs": { "CheckIfPhoneNumberIsOptedOutResponse$isOptedOut": "

Indicates whether the phone number is opted out:

", - "SubscribeInput$ReturnSubscriptionArn": "

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

The default value is false.

" + "SubscribeInput$ReturnSubscriptionArn": "

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

If you set this parameter to true, the response includes the ARN in all cases, even if the subscription is not yet confirmed. In addition to the ARN for confirmed subscriptions, the response also includes the pending subscription ARN value for subscriptions that aren't yet confirmed. A subscription becomes confirmed when the subscriber calls the ConfirmSubscription action with a confirmation token.

The default value is false.

" } }, "delegate": { diff --git a/service/braket/api.go b/service/braket/api.go index de599a73cb9..4342e5a5652 100644 --- a/service/braket/api.go +++ b/service/braket/api.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" ) const opCancelQuantumTask = "CancelQuantumTask" @@ -79,7 +80,7 @@ func (c *Braket) CancelQuantumTaskRequest(input *CancelQuantumTaskInput) (req *r // The throttling rate limit is met. // // * InternalServiceException -// The request processing has failed because of an unknown error, exception +// The request processing has failed because of an unknown error, exception, // or failure. // // * ValidationException @@ -171,11 +172,11 @@ func (c *Braket) CreateQuantumTaskRequest(input *CreateQuantumTaskInput) (req *r // The specified device is currently offline. // // * InternalServiceException -// The request processing has failed because of an unknown error, exception +// The request processing has failed because of an unknown error, exception, // or failure. // // * ServiceQuotaExceededException -// The request failed because a service quota is met. +// The request failed because a service quota is exceeded. // // * ValidationException // The input fails to satisfy the constraints specified by an AWS service. @@ -266,7 +267,7 @@ func (c *Braket) GetDeviceRequest(input *GetDeviceInput) (req *request.Request, // The throttling rate limit is met. // // * InternalServiceException -// The request processing has failed because of an unknown error, exception +// The request processing has failed because of an unknown error, exception, // or failure. // // * ValidationException @@ -358,7 +359,7 @@ func (c *Braket) GetQuantumTaskRequest(input *GetQuantumTaskInput) (req *request // The throttling rate limit is met. // // * InternalServiceException -// The request processing has failed because of an unknown error, exception +// The request processing has failed because of an unknown error, exception, // or failure. // // * ValidationException @@ -386,6 +387,92 @@ func (c *Braket) GetQuantumTaskWithContext(ctx aws.Context, input *GetQuantumTas return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/ListTagsForResource +func (c *Braket) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Braket. +// +// Shows the tags associated with this resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Braket's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InternalServiceException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/ListTagsForResource +func (c *Braket) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Braket) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSearchDevices = "SearchDevices" // SearchDevicesRequest generates a "aws/request.Request" representing the @@ -453,7 +540,7 @@ func (c *Braket) SearchDevicesRequest(input *SearchDevicesInput) (req *request.R // The throttling rate limit is met. // // * InternalServiceException -// The request processing has failed because of an unknown error, exception +// The request processing has failed because of an unknown error, exception, // or failure. // // * ValidationException @@ -600,7 +687,7 @@ func (c *Braket) SearchQuantumTasksRequest(input *SearchQuantumTasksInput) (req // The throttling rate limit is met. // // * InternalServiceException -// The request processing has failed because of an unknown error, exception +// The request processing has failed because of an unknown error, exception, // or failure. // // * ValidationException @@ -680,6 +767,180 @@ func (c *Braket) SearchQuantumTasksPagesWithContext(ctx aws.Context, input *Sear return p.Err() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/TagResource +func (c *Braket) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Braket. +// +// Add a tag to the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Braket's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InternalServiceException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/TagResource +func (c *Braket) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Braket) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/UntagResource +func (c *Braket) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Braket. +// +// Remove tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Braket's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InternalServiceException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/UntagResource +func (c *Braket) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Braket) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // You do not have sufficient access to perform this action. type AccessDeniedException struct { _ struct{} `type:"structure"` @@ -914,6 +1175,9 @@ type CreateQuantumTaskInput struct { // // Shots is a required field Shots *int64 `locationName:"shots" type:"long" required:"true"` + + // Tags to be added to the quantum task you're creating. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -1005,6 +1269,12 @@ func (s *CreateQuantumTaskInput) SetShots(v int64) *CreateQuantumTaskInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateQuantumTaskInput) SetTags(v map[string]*string) *CreateQuantumTaskInput { + s.Tags = v + return s +} + type CreateQuantumTaskOutput struct { _ struct{} `type:"structure"` @@ -1366,6 +1636,9 @@ type GetQuantumTaskOutput struct { // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"QuantumTaskStatus"` + + // The tags that belong to this task. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -1438,7 +1711,13 @@ func (s *GetQuantumTaskOutput) SetStatus(v string) *GetQuantumTaskOutput { return s } -// The request processing has failed because of an unknown error, exception +// SetTags sets the Tags field's value. +func (s *GetQuantumTaskOutput) SetTags(v map[string]*string) *GetQuantumTaskOutput { + s.Tags = v + return s +} + +// The request processing has failed because of an unknown error, exception, // or failure. type InternalServiceException struct { _ struct{} `type:"structure"` @@ -1495,6 +1774,70 @@ func (s *InternalServiceException) RequestID() string { return s.RespMetadata.RequestID } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Specify the resourceArn for the resource whose tags to display. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Displays the key, value pairs of tags associated with this resource. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // Includes information about a quantum task. type QuantumTaskSummary struct { _ struct{} `type:"structure"` @@ -1536,6 +1879,9 @@ type QuantumTaskSummary struct { // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"QuantumTaskStatus"` + + // Displays the key, value pairs of tags associated with this quantum task. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -1596,6 +1942,12 @@ func (s *QuantumTaskSummary) SetStatus(v string) *QuantumTaskSummary { return s } +// SetTags sets the Tags field's value. +func (s *QuantumTaskSummary) SetTags(v map[string]*string) *QuantumTaskSummary { + s.Tags = v + return s +} + // The specified resource was not found. type ResourceNotFoundException struct { _ struct{} `type:"structure"` @@ -2000,7 +2352,7 @@ func (s *SearchQuantumTasksOutput) SetQuantumTasks(v []*QuantumTaskSummary) *Sea return s } -// The request failed because a service quota is met. +// The request failed because a service quota is exceeded. type ServiceQuotaExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -2056,6 +2408,75 @@ func (s *ServiceQuotaExceededException) RequestID() string { return s.RespMetadata.RequestID } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // Specify the resourceArn of the resource to which a tag will be added. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // Specify the tags to add to the resource. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // The throttling rate limit is met. type ThrottlingException struct { _ struct{} `type:"structure"` @@ -2112,6 +2533,75 @@ func (s *ThrottlingException) RequestID() string { return s.RespMetadata.RequestID } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // Specify the resourceArn for the resource from which to remove the tags. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // pecify the keys for the tags to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // The input fails to satisfy the constraints specified by an AWS service. type ValidationException struct { _ struct{} `type:"structure"` @@ -2169,34 +2659,34 @@ func (s *ValidationException) RequestID() string { } const ( - // CancellationStatusCancelled is a CancellationStatus enum value - CancellationStatusCancelled = "CANCELLED" - // CancellationStatusCancelling is a CancellationStatus enum value CancellationStatusCancelling = "CANCELLING" + + // CancellationStatusCancelled is a CancellationStatus enum value + CancellationStatusCancelled = "CANCELLED" ) // CancellationStatus_Values returns all elements of the CancellationStatus enum func CancellationStatus_Values() []string { return []string{ - CancellationStatusCancelled, CancellationStatusCancelling, + CancellationStatusCancelled, } } const ( - // DeviceStatusOffline is a DeviceStatus enum value - DeviceStatusOffline = "OFFLINE" - // DeviceStatusOnline is a DeviceStatus enum value DeviceStatusOnline = "ONLINE" + + // DeviceStatusOffline is a DeviceStatus enum value + DeviceStatusOffline = "OFFLINE" ) // DeviceStatus_Values returns all elements of the DeviceStatus enum func DeviceStatus_Values() []string { return []string{ - DeviceStatusOffline, DeviceStatusOnline, + DeviceStatusOffline, } } @@ -2217,44 +2707,47 @@ func DeviceType_Values() []string { } const ( - // QuantumTaskStatusCancelled is a QuantumTaskStatus enum value - QuantumTaskStatusCancelled = "CANCELLED" + // QuantumTaskStatusCreated is a QuantumTaskStatus enum value + QuantumTaskStatusCreated = "CREATED" - // QuantumTaskStatusCancelling is a QuantumTaskStatus enum value - QuantumTaskStatusCancelling = "CANCELLING" + // QuantumTaskStatusQueued is a QuantumTaskStatus enum value + QuantumTaskStatusQueued = "QUEUED" + + // QuantumTaskStatusRunning is a QuantumTaskStatus enum value + QuantumTaskStatusRunning = "RUNNING" // QuantumTaskStatusCompleted is a QuantumTaskStatus enum value QuantumTaskStatusCompleted = "COMPLETED" - // QuantumTaskStatusCreated is a QuantumTaskStatus enum value - QuantumTaskStatusCreated = "CREATED" - // QuantumTaskStatusFailed is a QuantumTaskStatus enum value QuantumTaskStatusFailed = "FAILED" - // QuantumTaskStatusQueued is a QuantumTaskStatus enum value - QuantumTaskStatusQueued = "QUEUED" + // QuantumTaskStatusCancelling is a QuantumTaskStatus enum value + QuantumTaskStatusCancelling = "CANCELLING" - // QuantumTaskStatusRunning is a QuantumTaskStatus enum value - QuantumTaskStatusRunning = "RUNNING" + // QuantumTaskStatusCancelled is a QuantumTaskStatus enum value + QuantumTaskStatusCancelled = "CANCELLED" ) // QuantumTaskStatus_Values returns all elements of the QuantumTaskStatus enum func QuantumTaskStatus_Values() []string { return []string{ - QuantumTaskStatusCancelled, - QuantumTaskStatusCancelling, - QuantumTaskStatusCompleted, QuantumTaskStatusCreated, - QuantumTaskStatusFailed, QuantumTaskStatusQueued, QuantumTaskStatusRunning, + QuantumTaskStatusCompleted, + QuantumTaskStatusFailed, + QuantumTaskStatusCancelling, + QuantumTaskStatusCancelled, } } const ( - // SearchQuantumTasksFilterOperatorBetween is a SearchQuantumTasksFilterOperator enum value - SearchQuantumTasksFilterOperatorBetween = "BETWEEN" + // SearchQuantumTasksFilterOperatorLt is a SearchQuantumTasksFilterOperator enum value + SearchQuantumTasksFilterOperatorLt = "LT" + + // SearchQuantumTasksFilterOperatorLte is a SearchQuantumTasksFilterOperator enum value + SearchQuantumTasksFilterOperatorLte = "LTE" // SearchQuantumTasksFilterOperatorEqual is a SearchQuantumTasksFilterOperator enum value SearchQuantumTasksFilterOperatorEqual = "EQUAL" @@ -2265,21 +2758,18 @@ const ( // SearchQuantumTasksFilterOperatorGte is a SearchQuantumTasksFilterOperator enum value SearchQuantumTasksFilterOperatorGte = "GTE" - // SearchQuantumTasksFilterOperatorLt is a SearchQuantumTasksFilterOperator enum value - SearchQuantumTasksFilterOperatorLt = "LT" - - // SearchQuantumTasksFilterOperatorLte is a SearchQuantumTasksFilterOperator enum value - SearchQuantumTasksFilterOperatorLte = "LTE" + // SearchQuantumTasksFilterOperatorBetween is a SearchQuantumTasksFilterOperator enum value + SearchQuantumTasksFilterOperatorBetween = "BETWEEN" ) // SearchQuantumTasksFilterOperator_Values returns all elements of the SearchQuantumTasksFilterOperator enum func SearchQuantumTasksFilterOperator_Values() []string { return []string{ - SearchQuantumTasksFilterOperatorBetween, + SearchQuantumTasksFilterOperatorLt, + SearchQuantumTasksFilterOperatorLte, SearchQuantumTasksFilterOperatorEqual, SearchQuantumTasksFilterOperatorGt, SearchQuantumTasksFilterOperatorGte, - SearchQuantumTasksFilterOperatorLt, - SearchQuantumTasksFilterOperatorLte, + SearchQuantumTasksFilterOperatorBetween, } } diff --git a/service/braket/braketiface/interface.go b/service/braket/braketiface/interface.go index 22c4d93028b..c89d7d1fc4d 100644 --- a/service/braket/braketiface/interface.go +++ b/service/braket/braketiface/interface.go @@ -76,6 +76,10 @@ type BraketAPI interface { GetQuantumTaskWithContext(aws.Context, *braket.GetQuantumTaskInput, ...request.Option) (*braket.GetQuantumTaskOutput, error) GetQuantumTaskRequest(*braket.GetQuantumTaskInput) (*request.Request, *braket.GetQuantumTaskOutput) + ListTagsForResource(*braket.ListTagsForResourceInput) (*braket.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *braket.ListTagsForResourceInput, ...request.Option) (*braket.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*braket.ListTagsForResourceInput) (*request.Request, *braket.ListTagsForResourceOutput) + SearchDevices(*braket.SearchDevicesInput) (*braket.SearchDevicesOutput, error) SearchDevicesWithContext(aws.Context, *braket.SearchDevicesInput, ...request.Option) (*braket.SearchDevicesOutput, error) SearchDevicesRequest(*braket.SearchDevicesInput) (*request.Request, *braket.SearchDevicesOutput) @@ -89,6 +93,14 @@ type BraketAPI interface { SearchQuantumTasksPages(*braket.SearchQuantumTasksInput, func(*braket.SearchQuantumTasksOutput, bool) bool) error SearchQuantumTasksPagesWithContext(aws.Context, *braket.SearchQuantumTasksInput, func(*braket.SearchQuantumTasksOutput, bool) bool, ...request.Option) error + + TagResource(*braket.TagResourceInput) (*braket.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *braket.TagResourceInput, ...request.Option) (*braket.TagResourceOutput, error) + TagResourceRequest(*braket.TagResourceInput) (*request.Request, *braket.TagResourceOutput) + + UntagResource(*braket.UntagResourceInput) (*braket.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *braket.UntagResourceInput, ...request.Option) (*braket.UntagResourceOutput, error) + UntagResourceRequest(*braket.UntagResourceInput) (*request.Request, *braket.UntagResourceOutput) } var _ BraketAPI = (*braket.Braket)(nil) diff --git a/service/braket/errors.go b/service/braket/errors.go index c6cd7dc8d98..d01eca58193 100644 --- a/service/braket/errors.go +++ b/service/braket/errors.go @@ -29,7 +29,7 @@ const ( // ErrCodeInternalServiceException for service response error code // "InternalServiceException". // - // The request processing has failed because of an unknown error, exception + // The request processing has failed because of an unknown error, exception, // or failure. ErrCodeInternalServiceException = "InternalServiceException" @@ -42,7 +42,7 @@ const ( // ErrCodeServiceQuotaExceededException for service response error code // "ServiceQuotaExceededException". // - // The request failed because a service quota is met. + // The request failed because a service quota is exceeded. ErrCodeServiceQuotaExceededException = "ServiceQuotaExceededException" // ErrCodeThrottlingException for service response error code diff --git a/service/databasemigrationservice/api.go b/service/databasemigrationservice/api.go index 6bc40dc27eb..0ee07cc7784 100644 --- a/service/databasemigrationservice/api.go +++ b/service/databasemigrationservice/api.go @@ -6291,6 +6291,9 @@ type CreateEndpointInput struct { // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` + // Provides information that defines a DocumentDB endpoint. + DocDbSettings *DocDbSettings `type:"structure"` + // Settings in JSON format for the target Amazon DynamoDB endpoint. For information // about other available settings, see Using Object Mapping to Migrate Data // to DynamoDB (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html) @@ -6519,6 +6522,12 @@ func (s *CreateEndpointInput) SetDmsTransferSettings(v *DmsTransferSettings) *Cr return s } +// SetDocDbSettings sets the DocDbSettings field's value. +func (s *CreateEndpointInput) SetDocDbSettings(v *DocDbSettings) *CreateEndpointInput { + s.DocDbSettings = v + return s +} + // SetDynamoDbSettings sets the DynamoDbSettings field's value. func (s *CreateEndpointInput) SetDynamoDbSettings(v *DynamoDbSettings) *CreateEndpointInput { s.DynamoDbSettings = v @@ -10129,6 +10138,115 @@ func (s *DmsTransferSettings) SetServiceAccessRoleArn(v string) *DmsTransferSett return s } +// Provides information that defines a DocumentDB endpoint. +type DocDbSettings struct { + _ struct{} `type:"structure"` + + // The database name on the DocumentDB source endpoint. + DatabaseName *string `type:"string"` + + // Indicates the number of documents to preview to determine the document organization. + // Use this setting when NestingLevel is set to "one". + // + // Must be a positive value greater than 0. Default value is 1000. + DocsToInvestigate *int64 `type:"integer"` + + // Specifies the document ID. Use this setting when NestingLevel is set to "none". + // + // Default value is "false". + ExtractDocId *bool `type:"boolean"` + + // The AWS KMS key identifier that is used to encrypt the content on the replication + // instance. If you don't specify a value for the KmsKeyId parameter, then AWS + // DMS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. + KmsKeyId *string `type:"string"` + + // Specifies either document or table mode. + // + // Default value is "none". Specify "none" to use document mode. Specify "one" + // to use table mode. + NestingLevel *string `type:"string" enum:"NestingLevelValue"` + + // The password for the user account you use to access the DocumentDB source + // endpoint. + Password *string `type:"string" sensitive:"true"` + + // The port value for the DocumentDB source endpoint. + Port *int64 `type:"integer"` + + // The name of the server on the DocumentDB source endpoint. + ServerName *string `type:"string"` + + // The user name you use to access the DocumentDB source endpoint. + Username *string `type:"string"` +} + +// String returns the string representation +func (s DocDbSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocDbSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *DocDbSettings) SetDatabaseName(v string) *DocDbSettings { + s.DatabaseName = &v + return s +} + +// SetDocsToInvestigate sets the DocsToInvestigate field's value. +func (s *DocDbSettings) SetDocsToInvestigate(v int64) *DocDbSettings { + s.DocsToInvestigate = &v + return s +} + +// SetExtractDocId sets the ExtractDocId field's value. +func (s *DocDbSettings) SetExtractDocId(v bool) *DocDbSettings { + s.ExtractDocId = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *DocDbSettings) SetKmsKeyId(v string) *DocDbSettings { + s.KmsKeyId = &v + return s +} + +// SetNestingLevel sets the NestingLevel field's value. +func (s *DocDbSettings) SetNestingLevel(v string) *DocDbSettings { + s.NestingLevel = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *DocDbSettings) SetPassword(v string) *DocDbSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *DocDbSettings) SetPort(v int64) *DocDbSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *DocDbSettings) SetServerName(v string) *DocDbSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *DocDbSettings) SetUsername(v string) *DocDbSettings { + s.Username = &v + return s +} + // Provides the Amazon Resource Name (ARN) of the AWS Identity and Access Management // (IAM) role used to define an Amazon DynamoDB target endpoint. type DynamoDbSettings struct { @@ -10286,6 +10404,9 @@ type Endpoint struct { // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` + // Provides information that defines a DocumentDB endpoint. + DocDbSettings *DocDbSettings `type:"structure"` + // The settings for the DynamoDB target endpoint. For more information, see // the DynamoDBSettings structure. DynamoDbSettings *DynamoDbSettings `type:"structure"` @@ -10430,6 +10551,12 @@ func (s *Endpoint) SetDmsTransferSettings(v *DmsTransferSettings) *Endpoint { return s } +// SetDocDbSettings sets the DocDbSettings field's value. +func (s *Endpoint) SetDocDbSettings(v *DocDbSettings) *Endpoint { + s.DocDbSettings = v + return s +} + // SetDynamoDbSettings sets the DynamoDbSettings field's value. func (s *Endpoint) SetDynamoDbSettings(v *DynamoDbSettings) *Endpoint { s.DynamoDbSettings = v @@ -12131,6 +12258,12 @@ type ModifyEndpointInput struct { // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` + // Settings in JSON format for the source DocumentDB endpoint. For more information + // about the available settings, see the configuration properties section in + // Using DocumentDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DocumentDB.html) + // in the AWS Database Migration Service User Guide. + DocDbSettings *DocDbSettings `type:"structure"` + // Settings in JSON format for the target Amazon DynamoDB endpoint. For information // about other available settings, see Using Object Mapping to Migrate Data // to DynamoDB (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html) @@ -12324,6 +12457,12 @@ func (s *ModifyEndpointInput) SetDmsTransferSettings(v *DmsTransferSettings) *Mo return s } +// SetDocDbSettings sets the DocDbSettings field's value. +func (s *ModifyEndpointInput) SetDocDbSettings(v *DocDbSettings) *ModifyEndpointInput { + s.DocDbSettings = v + return s +} + // SetDynamoDbSettings sets the DynamoDbSettings field's value. func (s *ModifyEndpointInput) SetDynamoDbSettings(v *DynamoDbSettings) *ModifyEndpointInput { s.DynamoDbSettings = v @@ -16498,7 +16637,7 @@ type S3Settings struct { // When set to true, this parameter partitions S3 bucket folders based on transaction // commit dates. The default value is false. For more information about date-based - // folder partitoning, see Using date-based folder partitioning (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib) + // folder partitoning, see Using date-based folder partitioning (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning). DatePartitionEnabled *bool `type:"boolean"` // Identifies the sequence of the date format to use during folder partitioning. diff --git a/service/elasticache/api.go b/service/elasticache/api.go index bb9e651e4b3..2a836bbc435 100644 --- a/service/elasticache/api.go +++ b/service/elasticache/api.go @@ -1472,7 +1472,7 @@ func (c *ElastiCache) CreateUserRequest(input *CreateUserInput) (req *request.Re // CreateUser API operation for Amazon ElastiCache. // -// For Redis engine version 6.04 onwards: Creates a Redis user. For more information, +// For Redis engine version 6.x onwards: Creates a Redis user. For more information, // see Using Role Based Access Control (RBAC) (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1564,7 +1564,7 @@ func (c *ElastiCache) CreateUserGroupRequest(input *CreateUserGroupInput) (req * // CreateUserGroup API operation for Amazon ElastiCache. // -// For Redis engine version 6.04 onwards: Creates a Redis user group. For more +// For Redis engine version 6.x onwards: Creates a Redis user group. For more // information, see Using Role Based Access Control (RBAC) (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7259,8 +7259,8 @@ func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *reques // API. Look for the following automatic failover related events, listed // here in order of occurrance: Replication group message: Test Failover // API called for node group Cache cluster message: Failover -// from master node to replica node completed -// Replication group message: Failover from master node +// from primary node to replica node completed +// Replication group message: Failover from primary node // to replica node completed Cache cluster message: Recovering // cache nodes Cache cluster message: Finished recovery for cache // nodes For more information see: Viewing ElastiCache Events (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html) @@ -7769,25 +7769,36 @@ type CacheCluster struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -8100,7 +8111,7 @@ type CacheEngineVersion struct { // The name of the cache parameter group family associated with this cache engine. // // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // | redis4.0 | redis5.0 | redis6.0 | CacheParameterGroupFamily *string `type:"string"` // The name of the cache engine. @@ -8158,25 +8169,36 @@ func (s *CacheEngineVersion) SetEngineVersion(v string) *CacheEngineVersion { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // -// * General purpose: Current generation: M5 node types: cache.m5.large, -// cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, -// cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, -// cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, -// cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium -// Previous generation: (not recommended) T1 node types: cache.t1.micro M1 -// node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge -// M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// * General purpose: Current generation: M6g node types (available only +// for Redis engine version 5.0.6 onward and for Memcached engine version +// 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, +// cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge +// At this time, M6g node types are available in the following regions: us-east-1, +// us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node +// types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, +// cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, +// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, +// cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, +// cache.t2.medium Previous generation: (not recommended) T1 node types: +// cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, +// cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // -// * Memory optimized: Current generation: R5 node types: cache.r5.large, -// cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, -// cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, -// cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: -// (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge -// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, -// cache.r3.8xlarge +// * Memory optimized: Current generation: R6g node types (available only +// for Redis engine version 5.0.6 onward and for Memcached engine version +// 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, +// cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge +// At this time, R6g node types are available in the following regions: us-east-1, +// us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node +// types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, +// cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, +// cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge +// Previous generation: (not recommended) M2 node types: cache.m2.xlarge, +// cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, +// cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -8513,7 +8535,7 @@ type CacheParameterGroup struct { // is compatible with. // // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // | redis4.0 | redis5.0 | redis6.0 | CacheParameterGroupFamily *string `type:"string"` // The name of the cache parameter group. @@ -8985,7 +9007,7 @@ type CopySnapshotInput struct { // // When using this parameter to export a snapshot, be sure Amazon ElastiCache // has the needed permissions to this S3 bucket. For more information, see Step - // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) + // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the Amazon ElastiCache User Guide. // // For more information, see Exporting a Snapshot (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html) @@ -9127,25 +9149,36 @@ type CreateCacheClusterInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -9549,7 +9582,7 @@ type CreateCacheParameterGroupInput struct { // can be used with. // // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // | redis4.0 | redis5.0 | redis6.0 | // // CacheParameterGroupFamily is a required field CacheParameterGroupFamily *string `type:"string" required:"true"` @@ -9977,25 +10010,36 @@ type CreateReplicationGroupInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -12034,7 +12078,7 @@ type DescribeCacheEngineVersionsInput struct { // The name of a specific cache parameter group family to return details for. // // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // | redis4.0 | redis5.0 | redis6.0 | // // Constraints: // @@ -12530,7 +12574,7 @@ type DescribeEngineDefaultParametersInput struct { // The name of the cache parameter group family. // // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // | redis4.0 | redis5.0 | redis6.0 | // // CacheParameterGroupFamily is a required field CacheParameterGroupFamily *string `type:"string" required:"true"` @@ -12930,25 +12974,36 @@ type DescribeReservedCacheNodesInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -12985,7 +13040,8 @@ type DescribeReservedCacheNodesInput struct { // The offering type filter value. Use this parameter to show only the available // offerings matching the specified offering type. // - // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization"|"All + // Upfront"|"Partial Upfront"| "No Upfront" OfferingType *string `type:"string"` // The product description filter value. Use this parameter to show only those @@ -13070,25 +13126,36 @@ type DescribeReservedCacheNodesOfferingsInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -14001,7 +14068,7 @@ type EngineDefaults struct { // default parameters apply. // // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // | redis4.0 | redis5.0 | redis6.0 | CacheParameterGroupFamily *string `type:"string"` // Provides an identifier to allow retrieval of paginated results. @@ -14322,8 +14389,7 @@ type GlobalReplicationGroup struct { // The Elasticache engine. For Redis only. Engine *string `type:"string"` - // The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 - // only. + // The Elasticache Redis engine version. EngineVersion *string `type:"string"` // Indicates the slot configuration and global identifier for each slice group. @@ -17879,25 +17945,36 @@ type ReservedCacheNode struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -18045,25 +18122,36 @@ type ReservedCacheNodesOffering struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // @@ -18603,25 +18691,36 @@ type Snapshot struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, - // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 - // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge + // At this time, M6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. M5 node + // types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, + // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge + // * Memory optimized: Current generation: R6g node types (available only + // for Redis engine version 5.0.6 onward and for Memcached engine version + // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, + // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge + // At this time, R6g node types are available in the following regions: us-east-1, + // us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1. R5 node + // types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, + // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // Previous generation: (not recommended) M2 node types: cache.m2.xlarge, + // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + // cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge // // Additional node type info // diff --git a/service/imagebuilder/api.go b/service/imagebuilder/api.go index 6c57feba8cf..34775d297a5 100644 --- a/service/imagebuilder/api.go +++ b/service/imagebuilder/api.go @@ -4796,7 +4796,7 @@ type Ami struct { // The account ID of the owner of the AMI. AccountId *string `locationName:"accountId" min:"1" type:"string"` - // The description of the EC2 AMI. + // The description of the EC2 AMI. Minimum and maximum length are in characters. Description *string `locationName:"description" min:"1" type:"string"` // The AMI ID of the EC2 AMI. @@ -4865,7 +4865,8 @@ type AmiDistributionConfiguration struct { // The tags to apply to AMIs distributed to this Region. AmiTags map[string]*string `locationName:"amiTags" min:"1" type:"map"` - // The description of the distribution configuration. + // The description of the distribution configuration. Minimum and maximum length + // are in characters. Description *string `locationName:"description" min:"1" type:"string"` // The KMS key identifier used to encrypt the distributed image. diff --git a/service/macie2/api.go b/service/macie2/api.go index f8ac29e847c..d2a84ab639f 100644 --- a/service/macie2/api.go +++ b/service/macie2/api.go @@ -7864,6 +7864,8 @@ type CriterionAdditionalProperties struct { Eq []*string `locationName:"eq" type:"list"` + EqExactMatch []*string `locationName:"eqExactMatch" type:"list"` + Gt *int64 `locationName:"gt" type:"long"` Gte *int64 `locationName:"gte" type:"long"` @@ -7891,6 +7893,12 @@ func (s *CriterionAdditionalProperties) SetEq(v []*string) *CriterionAdditionalP return s } +// SetEqExactMatch sets the EqExactMatch field's value. +func (s *CriterionAdditionalProperties) SetEqExactMatch(v []*string) *CriterionAdditionalProperties { + s.EqExactMatch = v + return s +} + // SetGt sets the Gt field's value. func (s *CriterionAdditionalProperties) SetGt(v int64) *CriterionAdditionalProperties { s.Gt = &v diff --git a/service/medialive/api.go b/service/medialive/api.go index afb64dcd612..98fa3eed02c 100644 --- a/service/medialive/api.go +++ b/service/medialive/api.go @@ -6823,6 +6823,17 @@ func (s *AudioTrackSelection) SetTracks(v []*AudioTrack) *AudioTrackSelection { type AutomaticInputFailoverSettings struct { _ struct{} `type:"structure"` + // This clear time defines the requirement a recovered input must meet to be + // considered healthy. The input must have no failover conditions for this length + // of time. Enter a time in milliseconds. This value is particularly important + // if the input_preference for the failover pair is set to PRIMARY_INPUT_PREFERRED, + // because after this time, MediaLive will switch back to the primary input. + ErrorClearTimeMsec *int64 `locationName:"errorClearTimeMsec" min:"1" type:"integer"` + + // A list of failover conditions. If any of these conditions occur, MediaLive + // will perform a failover to the other input. + FailoverConditions []*FailoverCondition `locationName:"failoverConditions" type:"list"` + // Input preference when deciding which input to make active when a previously // failed input has recovered. InputPreference *string `locationName:"inputPreference" type:"string" enum:"InputPreference"` @@ -6846,9 +6857,22 @@ func (s AutomaticInputFailoverSettings) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *AutomaticInputFailoverSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AutomaticInputFailoverSettings"} + if s.ErrorClearTimeMsec != nil && *s.ErrorClearTimeMsec < 1 { + invalidParams.Add(request.NewErrParamMinValue("ErrorClearTimeMsec", 1)) + } if s.SecondaryInputId == nil { invalidParams.Add(request.NewErrParamRequired("SecondaryInputId")) } + if s.FailoverConditions != nil { + for i, v := range s.FailoverConditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FailoverConditions", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6856,6 +6880,18 @@ func (s *AutomaticInputFailoverSettings) Validate() error { return nil } +// SetErrorClearTimeMsec sets the ErrorClearTimeMsec field's value. +func (s *AutomaticInputFailoverSettings) SetErrorClearTimeMsec(v int64) *AutomaticInputFailoverSettings { + s.ErrorClearTimeMsec = &v + return s +} + +// SetFailoverConditions sets the FailoverConditions field's value. +func (s *AutomaticInputFailoverSettings) SetFailoverConditions(v []*FailoverCondition) *AutomaticInputFailoverSettings { + s.FailoverConditions = v + return s +} + // SetInputPreference sets the InputPreference field's value. func (s *AutomaticInputFailoverSettings) SetInputPreference(v string) *AutomaticInputFailoverSettings { s.InputPreference = &v @@ -12867,6 +12903,86 @@ func (s *EncoderSettings) SetVideoDescriptions(v []*VideoDescription) *EncoderSe return s } +// Failover Condition settings. There can be multiple failover conditions inside +// AutomaticInputFailoverSettings. +type FailoverCondition struct { + _ struct{} `type:"structure"` + + // Failover condition type-specific settings. + FailoverConditionSettings *FailoverConditionSettings `locationName:"failoverConditionSettings" type:"structure"` +} + +// String returns the string representation +func (s FailoverCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverCondition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FailoverCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FailoverCondition"} + if s.FailoverConditionSettings != nil { + if err := s.FailoverConditionSettings.Validate(); err != nil { + invalidParams.AddNested("FailoverConditionSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFailoverConditionSettings sets the FailoverConditionSettings field's value. +func (s *FailoverCondition) SetFailoverConditionSettings(v *FailoverConditionSettings) *FailoverCondition { + s.FailoverConditionSettings = v + return s +} + +// Settings for one failover condition. +type FailoverConditionSettings struct { + _ struct{} `type:"structure"` + + // MediaLive will perform a failover if content is not detected in this input + // for the specified period. + InputLossSettings *InputLossFailoverSettings `locationName:"inputLossSettings" type:"structure"` +} + +// String returns the string representation +func (s FailoverConditionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverConditionSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FailoverConditionSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FailoverConditionSettings"} + if s.InputLossSettings != nil { + if err := s.InputLossSettings.Validate(); err != nil { + invalidParams.AddNested("InputLossSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputLossSettings sets the InputLossSettings field's value. +func (s *FailoverConditionSettings) SetInputLossSettings(v *InputLossFailoverSettings) *FailoverConditionSettings { + s.InputLossSettings = v + return s +} + // Feature Activations type FeatureActivations struct { _ struct{} `type:"structure"` @@ -14721,6 +14837,14 @@ type HlsGroupSettings struct { // Place segments in subdirectories. DirectoryStructure *string `locationName:"directoryStructure" type:"string" enum:"HlsDirectoryStructure"` + // Specifies whether to insert EXT-X-DISCONTINUITY tags in the HLS child manifests + // for this output group.Typically, choose Insert because these tags are required + // in the manifest (according to the HLS specification) and serve an important + // purpose.Choose Never Insert only if the downstream system is doing real-time + // failover (without using the MediaLive automatic failover feature) and only + // if that downstream system has advised you to exclude the tags. + DiscontinuityTags *string `locationName:"discontinuityTags" type:"string" enum:"HlsDiscontinuityTags"` + // Encrypts the segments with the given encryption scheme. Exclude this parameter // if no encryption is desired. EncryptionType *string `locationName:"encryptionType" type:"string" enum:"HlsEncryptionType"` @@ -14740,6 +14864,14 @@ type HlsGroupSettings struct { // For example, #EXT-X-BYTERANGE:160364@1461888" IFrameOnlyPlaylists *string `locationName:"iFrameOnlyPlaylists" type:"string" enum:"IFrameOnlyPlaylistType"` + // Specifies whether to include the final (incomplete) segment in the media + // output when the pipeline stops producing output because of a channel stop, + // a channel pause or a loss of input to the pipeline.Auto means that MediaLive + // decides whether to include the final segment, depending on the channel class + // and the types of output groups.Suppress means to never include the incomplete + // segment. We recommend you choose Auto and let MediaLive control the behavior. + IncompleteSegmentBehavior *string `locationName:"incompleteSegmentBehavior" type:"string" enum:"HlsIncompleteSegmentBehavior"` + // Applies only if Mode field is LIVE.Specifies the maximum number of segments // in the media manifest file. After this maximum, older segments are removed // from the media manifest. This number must be smaller than the number in the @@ -14992,6 +15124,12 @@ func (s *HlsGroupSettings) SetDirectoryStructure(v string) *HlsGroupSettings { return s } +// SetDiscontinuityTags sets the DiscontinuityTags field's value. +func (s *HlsGroupSettings) SetDiscontinuityTags(v string) *HlsGroupSettings { + s.DiscontinuityTags = &v + return s +} + // SetEncryptionType sets the EncryptionType field's value. func (s *HlsGroupSettings) SetEncryptionType(v string) *HlsGroupSettings { s.EncryptionType = &v @@ -15016,6 +15154,12 @@ func (s *HlsGroupSettings) SetIFrameOnlyPlaylists(v string) *HlsGroupSettings { return s } +// SetIncompleteSegmentBehavior sets the IncompleteSegmentBehavior field's value. +func (s *HlsGroupSettings) SetIncompleteSegmentBehavior(v string) *HlsGroupSettings { + s.IncompleteSegmentBehavior = &v + return s +} + // SetIndexNSegments sets the IndexNSegments field's value. func (s *HlsGroupSettings) SetIndexNSegments(v int64) *HlsGroupSettings { s.IndexNSegments = &v @@ -16526,6 +16670,45 @@ func (s *InputLossBehavior) SetRepeatFrameMsec(v int64) *InputLossBehavior { return s } +// MediaLive will perform a failover if content is not detected in this input +// for the specified period. +type InputLossFailoverSettings struct { + _ struct{} `type:"structure"` + + // The amount of time (in milliseconds) that no input is detected. After that + // time, an input failover will occur. + InputLossThresholdMsec *int64 `locationName:"inputLossThresholdMsec" min:"100" type:"integer"` +} + +// String returns the string representation +func (s InputLossFailoverSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputLossFailoverSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputLossFailoverSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputLossFailoverSettings"} + if s.InputLossThresholdMsec != nil && *s.InputLossThresholdMsec < 100 { + invalidParams.Add(request.NewErrParamMinValue("InputLossThresholdMsec", 100)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputLossThresholdMsec sets the InputLossThresholdMsec field's value. +func (s *InputLossFailoverSettings) SetInputLossThresholdMsec(v int64) *InputLossFailoverSettings { + s.InputLossThresholdMsec = &v + return s +} + // Action to prepare an input for a future immediate input switch. type InputPrepareScheduleActionSettings struct { _ struct{} `type:"structure"` @@ -28418,6 +28601,23 @@ func HlsDirectoryStructure_Values() []string { } } +// Hls Discontinuity Tags +const ( + // HlsDiscontinuityTagsInsert is a HlsDiscontinuityTags enum value + HlsDiscontinuityTagsInsert = "INSERT" + + // HlsDiscontinuityTagsNeverInsert is a HlsDiscontinuityTags enum value + HlsDiscontinuityTagsNeverInsert = "NEVER_INSERT" +) + +// HlsDiscontinuityTags_Values returns all elements of the HlsDiscontinuityTags enum +func HlsDiscontinuityTags_Values() []string { + return []string{ + HlsDiscontinuityTagsInsert, + HlsDiscontinuityTagsNeverInsert, + } +} + // Hls Encryption Type const ( // HlsEncryptionTypeAes128 is a HlsEncryptionType enum value @@ -28469,6 +28669,23 @@ func HlsId3SegmentTaggingState_Values() []string { } } +// Hls Incomplete Segment Behavior +const ( + // HlsIncompleteSegmentBehaviorAuto is a HlsIncompleteSegmentBehavior enum value + HlsIncompleteSegmentBehaviorAuto = "AUTO" + + // HlsIncompleteSegmentBehaviorSuppress is a HlsIncompleteSegmentBehavior enum value + HlsIncompleteSegmentBehaviorSuppress = "SUPPRESS" +) + +// HlsIncompleteSegmentBehavior_Values returns all elements of the HlsIncompleteSegmentBehavior enum +func HlsIncompleteSegmentBehavior_Values() []string { + return []string{ + HlsIncompleteSegmentBehaviorAuto, + HlsIncompleteSegmentBehaviorSuppress, + } +} + // Hls Iv In Manifest const ( // HlsIvInManifestExclude is a HlsIvInManifest enum value diff --git a/service/sns/api.go b/service/sns/api.go index a8a408ab2a7..4fc7edc98b8 100644 --- a/service/sns/api.go +++ b/service/sns/api.go @@ -2862,7 +2862,7 @@ func (c *SNS) SetSMSAttributesRequest(input *SetSMSAttributesInput) (req *reques // // You can override some of these settings for a single message when you use // the Publish action with the MessageAttributes.entry.N parameter. For more -// information, see Sending an SMS Message (https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) +// information, see Publishing to a mobile phone (https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) // in the Amazon SNS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4004,18 +4004,19 @@ type CreateTopicInput struct { // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // The following attribute applies only to FIFO topics: + // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): // - // * ContentBasedDeduplication – Enables content-based deduplication. Amazon - // SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the - // body of the message (but not the attributes of the message). + // * FifoTopic – When this is set to true, a FIFO topic is created. // - // * When ContentBasedDeduplication is in effect, messages with identical - // content sent within the deduplication interval are treated as duplicates - // and only one copy of the message is delivered. - // - // * If the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. + // * ContentBasedDeduplication – Enables content-based deduplication for + // FIFO topics. By default, ContentBasedDeduplication is set to false. If + // you create a FIFO topic and this attribute is false, you must specify + // a value for the MessageDeduplicationId parameter for the Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) + // action. When you set ContentBasedDeduplication to true, Amazon SNS uses + // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). (Optional) To override + // the generated value, you can specify a value for the the MessageDeduplicationId + // parameter for the Publish action. Attributes map[string]*string `type:"map"` // The name of the topic you want to create. @@ -4681,6 +4682,20 @@ type GetTopicAttributesOutput struct { // for Amazon SNS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. + // + // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): + // + // * FifoTopic – When this is set to true, a FIFO topic is created. + // + // * ContentBasedDeduplication – Enables content-based deduplication for + // FIFO topics. By default, ContentBasedDeduplication is set to false. If + // you create a FIFO topic and this attribute is false, you must specify + // a value for the MessageDeduplicationId parameter for the Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) + // action. When you set ContentBasedDeduplication to true, Amazon SNS uses + // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). (Optional) To override + // the generated value, you can specify a value for the the MessageDeduplicationId + // parameter for the Publish action. Attributes map[string]*string `type:"map"` } @@ -5172,8 +5187,10 @@ func (s *ListTopicsOutput) SetTopics(v []*Topic) *ListTopicsOutput { // Name, type, and value must not be empty or null. In addition, the message // body should not be empty or null. All parts of the message attribute, including // name, type, and value, are included in the message size restriction, which -// is currently 256 KB (262,144 bytes). For more information, see Using Amazon -// SNS Message Attributes (https://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html). +// is currently 256 KB (262,144 bytes). For more information, see Amazon SNS +// message attributes (https://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html) +// and Publishing to a mobile phone (https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) +// in the Amazon SNS Developer Guide. type MessageAttributeValue struct { _ struct{} `type:"structure"` @@ -6067,18 +6084,17 @@ type SetTopicAttributesInput struct { // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // The following attribute applies only to FIFO topics: - // - // * ContentBasedDeduplication – Enables content-based deduplication. Amazon - // SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the - // body of the message (but not the attributes of the message). - // - // * When ContentBasedDeduplication is in effect, messages with identical - // content sent within the deduplication interval are treated as duplicates - // and only one copy of the message is delivered. + // The following attribute applies only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): // - // * If the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. + // * ContentBasedDeduplication – Enables content-based deduplication for + // FIFO topics. By default, ContentBasedDeduplication is set to false. If + // you create a FIFO topic and this attribute is false, you must specify + // a value for the MessageDeduplicationId parameter for the Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) + // action. When you set ContentBasedDeduplication to true, Amazon SNS uses + // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). (Optional) To override + // the generated value, you can specify a value for the the MessageDeduplicationId + // parameter for the Publish action. // // AttributeName is a required field AttributeName *string `type:"string" required:"true"` @@ -6228,12 +6244,12 @@ type SubscribeInput struct { // Sets whether the response from the Subscribe request includes the subscription // ARN, even if the subscription is not yet confirmed. // - // * If you set this parameter to true, the response includes the ARN in - // all cases, even if the subscription is not yet confirmed. In addition - // to the ARN for confirmed subscriptions, the response also includes the - // pending subscription ARN value for subscriptions that aren't yet confirmed. - // A subscription becomes confirmed when the subscriber calls the ConfirmSubscription - // action with a confirmation token. + // If you set this parameter to true, the response includes the ARN in all cases, + // even if the subscription is not yet confirmed. In addition to the ARN for + // confirmed subscriptions, the response also includes the pending subscription + // ARN value for subscriptions that aren't yet confirmed. A subscription becomes + // confirmed when the subscriber calls the ConfirmSubscription action with a + // confirmation token. // // The default value is false. ReturnSubscriptionArn *bool `type:"boolean"`